use LIBAV_LICENSE and LIBAV_VERSION instead of FFMPEG_*
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "avio_internal.h"
23 #include "internal.h"
24 #include "libavcodec/internal.h"
25 #include "libavutil/opt.h"
26 #include "metadata.h"
27 #include "id3v2.h"
28 #include "libavutil/avstring.h"
29 #include "riff.h"
30 #include "audiointerleave.h"
31 #include <sys/time.h>
32 #include <time.h>
33 #include <strings.h>
34 #include <stdarg.h>
35 #if CONFIG_NETWORK
36 #include "network.h"
37 #endif
38
39 #undef NDEBUG
40 #include <assert.h>
41
42 /**
43 * @file
44 * various utility functions for use within FFmpeg
45 */
46
47 unsigned avformat_version(void)
48 {
49 return LIBAVFORMAT_VERSION_INT;
50 }
51
52 const char *avformat_configuration(void)
53 {
54 return LIBAV_CONFIGURATION;
55 }
56
57 const char *avformat_license(void)
58 {
59 #define LICENSE_PREFIX "libavformat license: "
60 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
61 }
62
63 /* fraction handling */
64
65 /**
66 * f = val + (num / den) + 0.5.
67 *
68 * 'num' is normalized so that it is such as 0 <= num < den.
69 *
70 * @param f fractional number
71 * @param val integer value
72 * @param num must be >= 0
73 * @param den must be >= 1
74 */
75 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
76 {
77 num += (den >> 1);
78 if (num >= den) {
79 val += num / den;
80 num = num % den;
81 }
82 f->val = val;
83 f->num = num;
84 f->den = den;
85 }
86
87 /**
88 * Fractional addition to f: f = f + (incr / f->den).
89 *
90 * @param f fractional number
91 * @param incr increment, can be positive or negative
92 */
93 static void av_frac_add(AVFrac *f, int64_t incr)
94 {
95 int64_t num, den;
96
97 num = f->num + incr;
98 den = f->den;
99 if (num < 0) {
100 f->val += num / den;
101 num = num % den;
102 if (num < 0) {
103 num += den;
104 f->val--;
105 }
106 } else if (num >= den) {
107 f->val += num / den;
108 num = num % den;
109 }
110 f->num = num;
111 }
112
113 /** head of registered input format linked list */
114 #if !FF_API_FIRST_FORMAT
115 static
116 #endif
117 AVInputFormat *first_iformat = NULL;
118 /** head of registered output format linked list */
119 #if !FF_API_FIRST_FORMAT
120 static
121 #endif
122 AVOutputFormat *first_oformat = NULL;
123
124 AVInputFormat *av_iformat_next(AVInputFormat *f)
125 {
126 if(f) return f->next;
127 else return first_iformat;
128 }
129
130 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
131 {
132 if(f) return f->next;
133 else return first_oformat;
134 }
135
136 void av_register_input_format(AVInputFormat *format)
137 {
138 AVInputFormat **p;
139 p = &first_iformat;
140 while (*p != NULL) p = &(*p)->next;
141 *p = format;
142 format->next = NULL;
143 }
144
145 void av_register_output_format(AVOutputFormat *format)
146 {
147 AVOutputFormat **p;
148 p = &first_oformat;
149 while (*p != NULL) p = &(*p)->next;
150 *p = format;
151 format->next = NULL;
152 }
153
154 int av_match_ext(const char *filename, const char *extensions)
155 {
156 const char *ext, *p;
157 char ext1[32], *q;
158
159 if(!filename)
160 return 0;
161
162 ext = strrchr(filename, '.');
163 if (ext) {
164 ext++;
165 p = extensions;
166 for(;;) {
167 q = ext1;
168 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
169 *q++ = *p++;
170 *q = '\0';
171 if (!strcasecmp(ext1, ext))
172 return 1;
173 if (*p == '\0')
174 break;
175 p++;
176 }
177 }
178 return 0;
179 }
180
181 static int match_format(const char *name, const char *names)
182 {
183 const char *p;
184 int len, namelen;
185
186 if (!name || !names)
187 return 0;
188
189 namelen = strlen(name);
190 while ((p = strchr(names, ','))) {
191 len = FFMAX(p - names, namelen);
192 if (!strncasecmp(name, names, len))
193 return 1;
194 names = p+1;
195 }
196 return !strcasecmp(name, names);
197 }
198
199 #if FF_API_GUESS_FORMAT
200 AVOutputFormat *guess_format(const char *short_name, const char *filename,
201 const char *mime_type)
202 {
203 return av_guess_format(short_name, filename, mime_type);
204 }
205 #endif
206
207 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
208 const char *mime_type)
209 {
210 AVOutputFormat *fmt = NULL, *fmt_found;
211 int score_max, score;
212
213 /* specific test for image sequences */
214 #if CONFIG_IMAGE2_MUXER
215 if (!short_name && filename &&
216 av_filename_number_test(filename) &&
217 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
218 return av_guess_format("image2", NULL, NULL);
219 }
220 #endif
221 /* Find the proper file type. */
222 fmt_found = NULL;
223 score_max = 0;
224 while ((fmt = av_oformat_next(fmt))) {
225 score = 0;
226 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
227 score += 100;
228 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
229 score += 10;
230 if (filename && fmt->extensions &&
231 av_match_ext(filename, fmt->extensions)) {
232 score += 5;
233 }
234 if (score > score_max) {
235 score_max = score;
236 fmt_found = fmt;
237 }
238 }
239 return fmt_found;
240 }
241
242 #if FF_API_GUESS_FORMAT
243 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
244 const char *mime_type)
245 {
246 AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
247
248 if (fmt) {
249 AVOutputFormat *stream_fmt;
250 char stream_format_name[64];
251
252 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
253 stream_fmt = av_guess_format(stream_format_name, NULL, NULL);
254
255 if (stream_fmt)
256 fmt = stream_fmt;
257 }
258
259 return fmt;
260 }
261 #endif
262
263 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
264 const char *filename, const char *mime_type, enum AVMediaType type){
265 if(type == AVMEDIA_TYPE_VIDEO){
266 enum CodecID codec_id= CODEC_ID_NONE;
267
268 #if CONFIG_IMAGE2_MUXER
269 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
270 codec_id= av_guess_image2_codec(filename);
271 }
272 #endif
273 if(codec_id == CODEC_ID_NONE)
274 codec_id= fmt->video_codec;
275 return codec_id;
276 }else if(type == AVMEDIA_TYPE_AUDIO)
277 return fmt->audio_codec;
278 else if (type == AVMEDIA_TYPE_SUBTITLE)
279 return fmt->subtitle_codec;
280 else
281 return CODEC_ID_NONE;
282 }
283
284 AVInputFormat *av_find_input_format(const char *short_name)
285 {
286 AVInputFormat *fmt = NULL;
287 while ((fmt = av_iformat_next(fmt))) {
288 if (match_format(short_name, fmt->name))
289 return fmt;
290 }
291 return NULL;
292 }
293
294 #if FF_API_SYMVER && CONFIG_SHARED && HAVE_SYMVER
295 FF_SYMVER(void, av_destruct_packet_nofree, (AVPacket *pkt), "LIBAVFORMAT_52")
296 {
297 av_destruct_packet_nofree(pkt);
298 }
299
300 FF_SYMVER(void, av_destruct_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
301 {
302 av_destruct_packet(pkt);
303 }
304
305 FF_SYMVER(int, av_new_packet, (AVPacket *pkt, int size), "LIBAVFORMAT_52")
306 {
307 return av_new_packet(pkt, size);
308 }
309
310 FF_SYMVER(int, av_dup_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
311 {
312 return av_dup_packet(pkt);
313 }
314
315 FF_SYMVER(void, av_free_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
316 {
317 av_free_packet(pkt);
318 }
319
320 FF_SYMVER(void, av_init_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
321 {
322 av_log(NULL, AV_LOG_WARNING, "Diverting av_*_packet function calls to libavcodec. Recompile to improve performance\n");
323 av_init_packet(pkt);
324 }
325 #endif
326
327 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
328 {
329 int ret= av_new_packet(pkt, size);
330
331 if(ret<0)
332 return ret;
333
334 pkt->pos= avio_tell(s);
335
336 ret= avio_read(s, pkt->data, size);
337 if(ret<=0)
338 av_free_packet(pkt);
339 else
340 av_shrink_packet(pkt, ret);
341
342 return ret;
343 }
344
345 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
346 {
347 int ret;
348 int old_size;
349 if (!pkt->size)
350 return av_get_packet(s, pkt, size);
351 old_size = pkt->size;
352 ret = av_grow_packet(pkt, size);
353 if (ret < 0)
354 return ret;
355 ret = avio_read(s, pkt->data + old_size, size);
356 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
357 return ret;
358 }
359
360
361 int av_filename_number_test(const char *filename)
362 {
363 char buf[1024];
364 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
365 }
366
367 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
368 {
369 AVProbeData lpd = *pd;
370 AVInputFormat *fmt1 = NULL, *fmt;
371 int score;
372
373 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
374 int id3len = ff_id3v2_tag_len(lpd.buf);
375 if (lpd.buf_size > id3len + 16) {
376 lpd.buf += id3len;
377 lpd.buf_size -= id3len;
378 }
379 }
380
381 fmt = NULL;
382 while ((fmt1 = av_iformat_next(fmt1))) {
383 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
384 continue;
385 score = 0;
386 if (fmt1->read_probe) {
387 score = fmt1->read_probe(&lpd);
388 } else if (fmt1->extensions) {
389 if (av_match_ext(lpd.filename, fmt1->extensions)) {
390 score = 50;
391 }
392 }
393 if (score > *score_max) {
394 *score_max = score;
395 fmt = fmt1;
396 }else if (score == *score_max)
397 fmt = NULL;
398 }
399 return fmt;
400 }
401
402 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
403 int score=0;
404 return av_probe_input_format2(pd, is_opened, &score);
405 }
406
407 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
408 {
409 static const struct {
410 const char *name; enum CodecID id; enum AVMediaType type;
411 } fmt_id_type[] = {
412 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
413 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
414 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
415 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
416 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
417 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
418 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
419 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
420 { 0 }
421 };
422 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
423
424 if (fmt) {
425 int i;
426 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
427 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
428 for (i = 0; fmt_id_type[i].name; i++) {
429 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
430 st->codec->codec_id = fmt_id_type[i].id;
431 st->codec->codec_type = fmt_id_type[i].type;
432 break;
433 }
434 }
435 }
436 return !!fmt;
437 }
438
439 /************************************************************/
440 /* input media file */
441
442 /**
443 * Open a media file from an IO stream. 'fmt' must be specified.
444 */
445 int av_open_input_stream(AVFormatContext **ic_ptr,
446 AVIOContext *pb, const char *filename,
447 AVInputFormat *fmt, AVFormatParameters *ap)
448 {
449 int err;
450 AVFormatContext *ic;
451 AVFormatParameters default_ap;
452
453 if(!ap){
454 ap=&default_ap;
455 memset(ap, 0, sizeof(default_ap));
456 }
457
458 if(!ap->prealloced_context)
459 ic = avformat_alloc_context();
460 else
461 ic = *ic_ptr;
462 if (!ic) {
463 err = AVERROR(ENOMEM);
464 goto fail;
465 }
466 ic->iformat = fmt;
467 ic->pb = pb;
468 ic->duration = AV_NOPTS_VALUE;
469 ic->start_time = AV_NOPTS_VALUE;
470 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
471
472 /* allocate private data */
473 if (fmt->priv_data_size > 0) {
474 ic->priv_data = av_mallocz(fmt->priv_data_size);
475 if (!ic->priv_data) {
476 err = AVERROR(ENOMEM);
477 goto fail;
478 }
479 } else {
480 ic->priv_data = NULL;
481 }
482
483 // e.g. AVFMT_NOFILE formats will not have a AVIOContext
484 if (ic->pb)
485 ff_id3v2_read(ic, ID3v2_DEFAULT_MAGIC);
486
487 if (ic->iformat->read_header) {
488 err = ic->iformat->read_header(ic, ap);
489 if (err < 0)
490 goto fail;
491 }
492
493 if (pb && !ic->data_offset)
494 ic->data_offset = avio_tell(ic->pb);
495
496 #if FF_API_OLD_METADATA
497 ff_metadata_demux_compat(ic);
498 #endif
499
500 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
501
502 *ic_ptr = ic;
503 return 0;
504 fail:
505 if (ic) {
506 int i;
507 av_freep(&ic->priv_data);
508 for(i=0;i<ic->nb_streams;i++) {
509 AVStream *st = ic->streams[i];
510 if (st) {
511 av_free(st->priv_data);
512 av_free(st->codec->extradata);
513 av_free(st->codec);
514 av_free(st->info);
515 }
516 av_free(st);
517 }
518 }
519 av_free(ic);
520 *ic_ptr = NULL;
521 return err;
522 }
523
524 /** size of probe buffer, for guessing file type from file contents */
525 #define PROBE_BUF_MIN 2048
526 #define PROBE_BUF_MAX (1<<20)
527
528 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
529 const char *filename, void *logctx,
530 unsigned int offset, unsigned int max_probe_size)
531 {
532 AVProbeData pd = { filename ? filename : "", NULL, -offset };
533 unsigned char *buf = NULL;
534 int ret = 0, probe_size;
535
536 if (!max_probe_size) {
537 max_probe_size = PROBE_BUF_MAX;
538 } else if (max_probe_size > PROBE_BUF_MAX) {
539 max_probe_size = PROBE_BUF_MAX;
540 } else if (max_probe_size < PROBE_BUF_MIN) {
541 return AVERROR(EINVAL);
542 }
543
544 if (offset >= max_probe_size) {
545 return AVERROR(EINVAL);
546 }
547
548 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0;
549 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
550 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
551 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
552
553 if (probe_size < offset) {
554 continue;
555 }
556
557 /* read probe data */
558 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
559 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
560 /* fail if error was not end of file, otherwise, lower score */
561 if (ret != AVERROR_EOF) {
562 av_free(buf);
563 return ret;
564 }
565 score = 0;
566 ret = 0; /* error was end of file, nothing read */
567 }
568 pd.buf_size += ret;
569 pd.buf = &buf[offset];
570
571 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
572
573 /* guess file format */
574 *fmt = av_probe_input_format2(&pd, 1, &score);
575 if(*fmt){
576 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
577 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
578 }else
579 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
580 }
581 }
582
583 if (!*fmt) {
584 av_free(buf);
585 return AVERROR_INVALIDDATA;
586 }
587
588 /* rewind. reuse probe buffer to avoid seeking */
589 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
590 av_free(buf);
591
592 return ret;
593 }
594
595 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
596 AVInputFormat *fmt,
597 int buf_size,
598 AVFormatParameters *ap)
599 {
600 int err;
601 AVProbeData probe_data, *pd = &probe_data;
602 AVIOContext *pb = NULL;
603 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
604
605 pd->filename = "";
606 if (filename)
607 pd->filename = filename;
608 pd->buf = NULL;
609 pd->buf_size = 0;
610
611 if (!fmt) {
612 /* guess format if no file can be opened */
613 fmt = av_probe_input_format(pd, 0);
614 }
615
616 /* Do not open file if the format does not need it. XXX: specific
617 hack needed to handle RTSP/TCP */
618 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
619 /* if no file needed do not try to open one */
620 if ((err=avio_open(&pb, filename, URL_RDONLY)) < 0) {
621 goto fail;
622 }
623 if (buf_size > 0) {
624 url_setbufsize(pb, buf_size);
625 }
626 if (!fmt && (err = av_probe_input_buffer(pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) {
627 goto fail;
628 }
629 }
630
631 /* if still no format found, error */
632 if (!fmt) {
633 err = AVERROR_INVALIDDATA;
634 goto fail;
635 }
636
637 /* check filename in case an image number is expected */
638 if (fmt->flags & AVFMT_NEEDNUMBER) {
639 if (!av_filename_number_test(filename)) {
640 err = AVERROR_NUMEXPECTED;
641 goto fail;
642 }
643 }
644 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
645 if (err)
646 goto fail;
647 return 0;
648 fail:
649 av_freep(&pd->buf);
650 if (pb)
651 avio_close(pb);
652 if (ap && ap->prealloced_context)
653 av_free(*ic_ptr);
654 *ic_ptr = NULL;
655 return err;
656
657 }
658
659 /*******************************************************/
660
661 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
662 AVPacketList **plast_pktl){
663 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
664 if (!pktl)
665 return NULL;
666
667 if (*packet_buffer)
668 (*plast_pktl)->next = pktl;
669 else
670 *packet_buffer = pktl;
671
672 /* add the packet in the buffered packet list */
673 *plast_pktl = pktl;
674 pktl->pkt= *pkt;
675 return &pktl->pkt;
676 }
677
678 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
679 {
680 int ret, i;
681 AVStream *st;
682
683 for(;;){
684 AVPacketList *pktl = s->raw_packet_buffer;
685
686 if (pktl) {
687 *pkt = pktl->pkt;
688 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
689 !s->streams[pkt->stream_index]->probe_packets ||
690 s->raw_packet_buffer_remaining_size < pkt->size){
691 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
692 av_freep(&pd->buf);
693 pd->buf_size = 0;
694 s->raw_packet_buffer = pktl->next;
695 s->raw_packet_buffer_remaining_size += pkt->size;
696 av_free(pktl);
697 return 0;
698 }
699 }
700
701 av_init_packet(pkt);
702 ret= s->iformat->read_packet(s, pkt);
703 if (ret < 0) {
704 if (!pktl || ret == AVERROR(EAGAIN))
705 return ret;
706 for (i = 0; i < s->nb_streams; i++)
707 s->streams[i]->probe_packets = 0;
708 continue;
709 }
710 st= s->streams[pkt->stream_index];
711
712 switch(st->codec->codec_type){
713 case AVMEDIA_TYPE_VIDEO:
714 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
715 break;
716 case AVMEDIA_TYPE_AUDIO:
717 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
718 break;
719 case AVMEDIA_TYPE_SUBTITLE:
720 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
721 break;
722 }
723
724 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
725 !st->probe_packets))
726 return ret;
727
728 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
729 s->raw_packet_buffer_remaining_size -= pkt->size;
730
731 if(st->codec->codec_id == CODEC_ID_PROBE){
732 AVProbeData *pd = &st->probe_data;
733 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
734 --st->probe_packets;
735
736 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
737 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
738 pd->buf_size += pkt->size;
739 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
740
741 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
742 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
743 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
744 if(st->codec->codec_id != CODEC_ID_PROBE){
745 pd->buf_size=0;
746 av_freep(&pd->buf);
747 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
748 }
749 }
750 }
751 }
752 }
753
754 /**********************************************************/
755
756 /**
757 * Get the number of samples of an audio frame. Return -1 on error.
758 */
759 static int get_audio_frame_size(AVCodecContext *enc, int size)
760 {
761 int frame_size;
762
763 if(enc->codec_id == CODEC_ID_VORBIS)
764 return -1;
765
766 if (enc->frame_size <= 1) {
767 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
768
769 if (bits_per_sample) {
770 if (enc->channels == 0)
771 return -1;
772 frame_size = (size << 3) / (bits_per_sample * enc->channels);
773 } else {
774 /* used for example by ADPCM codecs */
775 if (enc->bit_rate == 0)
776 return -1;
777 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
778 }
779 } else {
780 frame_size = enc->frame_size;
781 }
782 return frame_size;
783 }
784
785
786 /**
787 * Return the frame duration in seconds. Return 0 if not available.
788 */
789 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
790 AVCodecParserContext *pc, AVPacket *pkt)
791 {
792 int frame_size;
793
794 *pnum = 0;
795 *pden = 0;
796 switch(st->codec->codec_type) {
797 case AVMEDIA_TYPE_VIDEO:
798 if(st->time_base.num*1000LL > st->time_base.den){
799 *pnum = st->time_base.num;
800 *pden = st->time_base.den;
801 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
802 *pnum = st->codec->time_base.num;
803 *pden = st->codec->time_base.den;
804 if (pc && pc->repeat_pict) {
805 *pnum = (*pnum) * (1 + pc->repeat_pict);
806 }
807 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
808 //Thus if we have no parser in such case leave duration undefined.
809 if(st->codec->ticks_per_frame>1 && !pc){
810 *pnum = *pden = 0;
811 }
812 }
813 break;
814 case AVMEDIA_TYPE_AUDIO:
815 frame_size = get_audio_frame_size(st->codec, pkt->size);
816 if (frame_size <= 0 || st->codec->sample_rate <= 0)
817 break;
818 *pnum = frame_size;
819 *pden = st->codec->sample_rate;
820 break;
821 default:
822 break;
823 }
824 }
825
826 static int is_intra_only(AVCodecContext *enc){
827 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
828 return 1;
829 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
830 switch(enc->codec_id){
831 case CODEC_ID_MJPEG:
832 case CODEC_ID_MJPEGB:
833 case CODEC_ID_LJPEG:
834 case CODEC_ID_RAWVIDEO:
835 case CODEC_ID_DVVIDEO:
836 case CODEC_ID_HUFFYUV:
837 case CODEC_ID_FFVHUFF:
838 case CODEC_ID_ASV1:
839 case CODEC_ID_ASV2:
840 case CODEC_ID_VCR1:
841 case CODEC_ID_DNXHD:
842 case CODEC_ID_JPEG2000:
843 return 1;
844 default: break;
845 }
846 }
847 return 0;
848 }
849
850 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
851 int64_t dts, int64_t pts)
852 {
853 AVStream *st= s->streams[stream_index];
854 AVPacketList *pktl= s->packet_buffer;
855
856 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
857 return;
858
859 st->first_dts= dts - st->cur_dts;
860 st->cur_dts= dts;
861
862 for(; pktl; pktl= pktl->next){
863 if(pktl->pkt.stream_index != stream_index)
864 continue;
865 //FIXME think more about this check
866 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
867 pktl->pkt.pts += st->first_dts;
868
869 if(pktl->pkt.dts != AV_NOPTS_VALUE)
870 pktl->pkt.dts += st->first_dts;
871
872 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
873 st->start_time= pktl->pkt.pts;
874 }
875 if (st->start_time == AV_NOPTS_VALUE)
876 st->start_time = pts;
877 }
878
879 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
880 {
881 AVPacketList *pktl= s->packet_buffer;
882 int64_t cur_dts= 0;
883
884 if(st->first_dts != AV_NOPTS_VALUE){
885 cur_dts= st->first_dts;
886 for(; pktl; pktl= pktl->next){
887 if(pktl->pkt.stream_index == pkt->stream_index){
888 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
889 break;
890 cur_dts -= pkt->duration;
891 }
892 }
893 pktl= s->packet_buffer;
894 st->first_dts = cur_dts;
895 }else if(st->cur_dts)
896 return;
897
898 for(; pktl; pktl= pktl->next){
899 if(pktl->pkt.stream_index != pkt->stream_index)
900 continue;
901 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
902 && !pktl->pkt.duration){
903 pktl->pkt.dts= cur_dts;
904 if(!st->codec->has_b_frames)
905 pktl->pkt.pts= cur_dts;
906 cur_dts += pkt->duration;
907 pktl->pkt.duration= pkt->duration;
908 }else
909 break;
910 }
911 if(st->first_dts == AV_NOPTS_VALUE)
912 st->cur_dts= cur_dts;
913 }
914
915 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
916 AVCodecParserContext *pc, AVPacket *pkt)
917 {
918 int num, den, presentation_delayed, delay, i;
919 int64_t offset;
920
921 if (s->flags & AVFMT_FLAG_NOFILLIN)
922 return;
923
924 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
925 pkt->dts= AV_NOPTS_VALUE;
926
927 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
928 //FIXME Set low_delay = 0 when has_b_frames = 1
929 st->codec->has_b_frames = 1;
930
931 /* do we have a video B-frame ? */
932 delay= st->codec->has_b_frames;
933 presentation_delayed = 0;
934
935 // ignore delay caused by frame threading so that the mpeg2-without-dts
936 // warning will not trigger
937 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
938 delay -= st->codec->thread_count-1;
939
940 /* XXX: need has_b_frame, but cannot get it if the codec is
941 not initialized */
942 if (delay &&
943 pc && pc->pict_type != FF_B_TYPE)
944 presentation_delayed = 1;
945
946 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
947 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
948 pkt->dts -= 1LL<<st->pts_wrap_bits;
949 }
950
951 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
952 // we take the conservative approach and discard both
953 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
954 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
955 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
956 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
957 }
958
959 if (pkt->duration == 0) {
960 compute_frame_duration(&num, &den, st, pc, pkt);
961 if (den && num) {
962 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
963
964 if(pkt->duration != 0 && s->packet_buffer)
965 update_initial_durations(s, st, pkt);
966 }
967 }
968
969 /* correct timestamps with byte offset if demuxers only have timestamps
970 on packet boundaries */
971 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
972 /* this will estimate bitrate based on this frame's duration and size */
973 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
974 if(pkt->pts != AV_NOPTS_VALUE)
975 pkt->pts += offset;
976 if(pkt->dts != AV_NOPTS_VALUE)
977 pkt->dts += offset;
978 }
979
980 if (pc && pc->dts_sync_point >= 0) {
981 // we have synchronization info from the parser
982 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
983 if (den > 0) {
984 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
985 if (pkt->dts != AV_NOPTS_VALUE) {
986 // got DTS from the stream, update reference timestamp
987 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
988 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
989 } else if (st->reference_dts != AV_NOPTS_VALUE) {
990 // compute DTS based on reference timestamp
991 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
992 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
993 }
994 if (pc->dts_sync_point > 0)
995 st->reference_dts = pkt->dts; // new reference
996 }
997 }
998
999 /* This may be redundant, but it should not hurt. */
1000 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1001 presentation_delayed = 1;
1002
1003 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1004 /* interpolate PTS and DTS if they are not present */
1005 //We skip H264 currently because delay and has_b_frames are not reliably set
1006 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1007 if (presentation_delayed) {
1008 /* DTS = decompression timestamp */
1009 /* PTS = presentation timestamp */
1010 if (pkt->dts == AV_NOPTS_VALUE)
1011 pkt->dts = st->last_IP_pts;
1012 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1013 if (pkt->dts == AV_NOPTS_VALUE)
1014 pkt->dts = st->cur_dts;
1015
1016 /* this is tricky: the dts must be incremented by the duration
1017 of the frame we are displaying, i.e. the last I- or P-frame */
1018 if (st->last_IP_duration == 0)
1019 st->last_IP_duration = pkt->duration;
1020 if(pkt->dts != AV_NOPTS_VALUE)
1021 st->cur_dts = pkt->dts + st->last_IP_duration;
1022 st->last_IP_duration = pkt->duration;
1023 st->last_IP_pts= pkt->pts;
1024 /* cannot compute PTS if not present (we can compute it only
1025 by knowing the future */
1026 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1027 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1028 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1029 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1030 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1031 pkt->pts += pkt->duration;
1032 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1033 }
1034 }
1035
1036 /* presentation is not delayed : PTS and DTS are the same */
1037 if(pkt->pts == AV_NOPTS_VALUE)
1038 pkt->pts = pkt->dts;
1039 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1040 if(pkt->pts == AV_NOPTS_VALUE)
1041 pkt->pts = st->cur_dts;
1042 pkt->dts = pkt->pts;
1043 if(pkt->pts != AV_NOPTS_VALUE)
1044 st->cur_dts = pkt->pts + pkt->duration;
1045 }
1046 }
1047
1048 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1049 st->pts_buffer[0]= pkt->pts;
1050 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1051 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1052 if(pkt->dts == AV_NOPTS_VALUE)
1053 pkt->dts= st->pts_buffer[0];
1054 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1055 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1056 }
1057 if(pkt->dts > st->cur_dts)
1058 st->cur_dts = pkt->dts;
1059 }
1060
1061 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1062
1063 /* update flags */
1064 if(is_intra_only(st->codec))
1065 pkt->flags |= AV_PKT_FLAG_KEY;
1066 else if (pc) {
1067 pkt->flags = 0;
1068 /* keyframe computation */
1069 if (pc->key_frame == 1)
1070 pkt->flags |= AV_PKT_FLAG_KEY;
1071 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
1072 pkt->flags |= AV_PKT_FLAG_KEY;
1073 }
1074 if (pc)
1075 pkt->convergence_duration = pc->convergence_duration;
1076 }
1077
1078
1079 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1080 {
1081 AVStream *st;
1082 int len, ret, i;
1083
1084 av_init_packet(pkt);
1085
1086 for(;;) {
1087 /* select current input stream component */
1088 st = s->cur_st;
1089 if (st) {
1090 if (!st->need_parsing || !st->parser) {
1091 /* no parsing needed: we just output the packet as is */
1092 /* raw data support */
1093 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1094 compute_pkt_fields(s, st, NULL, pkt);
1095 s->cur_st = NULL;
1096 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1097 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1098 ff_reduce_index(s, st->index);
1099 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1100 }
1101 break;
1102 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1103 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1104 st->cur_ptr, st->cur_len,
1105 st->cur_pkt.pts, st->cur_pkt.dts,
1106 st->cur_pkt.pos);
1107 st->cur_pkt.pts = AV_NOPTS_VALUE;
1108 st->cur_pkt.dts = AV_NOPTS_VALUE;
1109 /* increment read pointer */
1110 st->cur_ptr += len;
1111 st->cur_len -= len;
1112
1113 /* return packet if any */
1114 if (pkt->size) {
1115 got_packet:
1116 pkt->duration = 0;
1117 pkt->stream_index = st->index;
1118 pkt->pts = st->parser->pts;
1119 pkt->dts = st->parser->dts;
1120 pkt->pos = st->parser->pos;
1121 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1122 s->cur_st = NULL;
1123 pkt->destruct= st->cur_pkt.destruct;
1124 st->cur_pkt.destruct= NULL;
1125 st->cur_pkt.data = NULL;
1126 assert(st->cur_len == 0);
1127 }else{
1128 pkt->destruct = NULL;
1129 }
1130 compute_pkt_fields(s, st, st->parser, pkt);
1131
1132 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1133 ff_reduce_index(s, st->index);
1134 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1135 0, 0, AVINDEX_KEYFRAME);
1136 }
1137
1138 break;
1139 }
1140 } else {
1141 /* free packet */
1142 av_free_packet(&st->cur_pkt);
1143 s->cur_st = NULL;
1144 }
1145 } else {
1146 AVPacket cur_pkt;
1147 /* read next packet */
1148 ret = av_read_packet(s, &cur_pkt);
1149 if (ret < 0) {
1150 if (ret == AVERROR(EAGAIN))
1151 return ret;
1152 /* return the last frames, if any */
1153 for(i = 0; i < s->nb_streams; i++) {
1154 st = s->streams[i];
1155 if (st->parser && st->need_parsing) {
1156 av_parser_parse2(st->parser, st->codec,
1157 &pkt->data, &pkt->size,
1158 NULL, 0,
1159 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1160 AV_NOPTS_VALUE);
1161 if (pkt->size)
1162 goto got_packet;
1163 }
1164 }
1165 /* no more packets: really terminate parsing */
1166 return ret;
1167 }
1168 st = s->streams[cur_pkt.stream_index];
1169 st->cur_pkt= cur_pkt;
1170
1171 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1172 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1173 st->cur_pkt.pts < st->cur_pkt.dts){
1174 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1175 st->cur_pkt.stream_index,
1176 st->cur_pkt.pts,
1177 st->cur_pkt.dts,
1178 st->cur_pkt.size);
1179 // av_free_packet(&st->cur_pkt);
1180 // return -1;
1181 }
1182
1183 if(s->debug & FF_FDEBUG_TS)
1184 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1185 st->cur_pkt.stream_index,
1186 st->cur_pkt.pts,
1187 st->cur_pkt.dts,
1188 st->cur_pkt.size,
1189 st->cur_pkt.duration,
1190 st->cur_pkt.flags);
1191
1192 s->cur_st = st;
1193 st->cur_ptr = st->cur_pkt.data;
1194 st->cur_len = st->cur_pkt.size;
1195 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1196 st->parser = av_parser_init(st->codec->codec_id);
1197 if (!st->parser) {
1198 /* no parser available: just output the raw packets */
1199 st->need_parsing = AVSTREAM_PARSE_NONE;
1200 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1201 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1202 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1203 st->parser->flags |= PARSER_FLAG_ONCE;
1204 }
1205 }
1206 }
1207 }
1208 if(s->debug & FF_FDEBUG_TS)
1209 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1210 pkt->stream_index,
1211 pkt->pts,
1212 pkt->dts,
1213 pkt->size,
1214 pkt->duration,
1215 pkt->flags);
1216
1217 return 0;
1218 }
1219
1220 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1221 {
1222 AVPacketList *pktl;
1223 int eof=0;
1224 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1225
1226 for(;;){
1227 pktl = s->packet_buffer;
1228 if (pktl) {
1229 AVPacket *next_pkt= &pktl->pkt;
1230
1231 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1232 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1233 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1234 if( pktl->pkt.stream_index == next_pkt->stream_index
1235 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1236 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1237 next_pkt->pts= pktl->pkt.dts;
1238 }
1239 pktl= pktl->next;
1240 }
1241 pktl = s->packet_buffer;
1242 }
1243
1244 if( next_pkt->pts != AV_NOPTS_VALUE
1245 || next_pkt->dts == AV_NOPTS_VALUE
1246 || !genpts || eof){
1247 /* read packet from packet buffer, if there is data */
1248 *pkt = *next_pkt;
1249 s->packet_buffer = pktl->next;
1250 av_free(pktl);
1251 return 0;
1252 }
1253 }
1254 if(genpts){
1255 int ret= av_read_frame_internal(s, pkt);
1256 if(ret<0){
1257 if(pktl && ret != AVERROR(EAGAIN)){
1258 eof=1;
1259 continue;
1260 }else
1261 return ret;
1262 }
1263
1264 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1265 &s->packet_buffer_end)) < 0)
1266 return AVERROR(ENOMEM);
1267 }else{
1268 assert(!s->packet_buffer);
1269 return av_read_frame_internal(s, pkt);
1270 }
1271 }
1272 }
1273
1274 /* XXX: suppress the packet queue */
1275 static void flush_packet_queue(AVFormatContext *s)
1276 {
1277 AVPacketList *pktl;
1278
1279 for(;;) {
1280 pktl = s->packet_buffer;
1281 if (!pktl)
1282 break;
1283 s->packet_buffer = pktl->next;
1284 av_free_packet(&pktl->pkt);
1285 av_free(pktl);
1286 }
1287 while(s->raw_packet_buffer){
1288 pktl = s->raw_packet_buffer;
1289 s->raw_packet_buffer = pktl->next;
1290 av_free_packet(&pktl->pkt);
1291 av_free(pktl);
1292 }
1293 s->packet_buffer_end=
1294 s->raw_packet_buffer_end= NULL;
1295 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1296 }
1297
1298 /*******************************************************/
1299 /* seek support */
1300
1301 int av_find_default_stream_index(AVFormatContext *s)
1302 {
1303 int first_audio_index = -1;
1304 int i;
1305 AVStream *st;
1306
1307 if (s->nb_streams <= 0)
1308 return -1;
1309 for(i = 0; i < s->nb_streams; i++) {
1310 st = s->streams[i];
1311 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1312 return i;
1313 }
1314 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1315 first_audio_index = i;
1316 }
1317 return first_audio_index >= 0 ? first_audio_index : 0;
1318 }
1319
1320 /**
1321 * Flush the frame reader.
1322 */
1323 void ff_read_frame_flush(AVFormatContext *s)
1324 {
1325 AVStream *st;
1326 int i, j;
1327
1328 flush_packet_queue(s);
1329
1330 s->cur_st = NULL;
1331
1332 /* for each stream, reset read state */
1333 for(i = 0; i < s->nb_streams; i++) {
1334 st = s->streams[i];
1335
1336 if (st->parser) {
1337 av_parser_close(st->parser);
1338 st->parser = NULL;
1339 av_free_packet(&st->cur_pkt);
1340 }
1341 st->last_IP_pts = AV_NOPTS_VALUE;
1342 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1343 st->reference_dts = AV_NOPTS_VALUE;
1344 /* fail safe */
1345 st->cur_ptr = NULL;
1346 st->cur_len = 0;
1347
1348 st->probe_packets = MAX_PROBE_PACKETS;
1349
1350 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1351 st->pts_buffer[j]= AV_NOPTS_VALUE;
1352 }
1353 }
1354
1355 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1356 int i;
1357
1358 for(i = 0; i < s->nb_streams; i++) {
1359 AVStream *st = s->streams[i];
1360
1361 st->cur_dts = av_rescale(timestamp,
1362 st->time_base.den * (int64_t)ref_st->time_base.num,
1363 st->time_base.num * (int64_t)ref_st->time_base.den);
1364 }
1365 }
1366
1367 void ff_reduce_index(AVFormatContext *s, int stream_index)
1368 {
1369 AVStream *st= s->streams[stream_index];
1370 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1371
1372 if((unsigned)st->nb_index_entries >= max_entries){
1373 int i;
1374 for(i=0; 2*i<st->nb_index_entries; i++)
1375 st->index_entries[i]= st->index_entries[2*i];
1376 st->nb_index_entries= i;
1377 }
1378 }
1379
1380 int ff_add_index_entry(AVIndexEntry **index_entries,
1381 int *nb_index_entries,
1382 unsigned int *index_entries_allocated_size,
1383 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1384 {
1385 AVIndexEntry *entries, *ie;
1386 int index;
1387
1388 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1389 return -1;
1390
1391 entries = av_fast_realloc(*index_entries,
1392 index_entries_allocated_size,
1393 (*nb_index_entries + 1) *
1394 sizeof(AVIndexEntry));
1395 if(!entries)
1396 return -1;
1397
1398 *index_entries= entries;
1399
1400 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1401
1402 if(index<0){
1403 index= (*nb_index_entries)++;
1404 ie= &entries[index];
1405 assert(index==0 || ie[-1].timestamp < timestamp);
1406 }else{
1407 ie= &entries[index];
1408 if(ie->timestamp != timestamp){
1409 if(ie->timestamp <= timestamp)
1410 return -1;
1411 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1412 (*nb_index_entries)++;
1413 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1414 distance= ie->min_distance;
1415 }
1416
1417 ie->pos = pos;
1418 ie->timestamp = timestamp;
1419 ie->min_distance= distance;
1420 ie->size= size;
1421 ie->flags = flags;
1422
1423 return index;
1424 }
1425
1426 int av_add_index_entry(AVStream *st,
1427 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1428 {
1429 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1430 &st->index_entries_allocated_size, pos,
1431 timestamp, size, distance, flags);
1432 }
1433
1434 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1435 int64_t wanted_timestamp, int flags)
1436 {
1437 int a, b, m;
1438 int64_t timestamp;
1439
1440 a = - 1;
1441 b = nb_entries;
1442
1443 //optimize appending index entries at the end
1444 if(b && entries[b-1].timestamp < wanted_timestamp)
1445 a= b-1;
1446
1447 while (b - a > 1) {
1448 m = (a + b) >> 1;
1449 timestamp = entries[m].timestamp;
1450 if(timestamp >= wanted_timestamp)
1451 b = m;
1452 if(timestamp <= wanted_timestamp)
1453 a = m;
1454 }
1455 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1456
1457 if(!(flags & AVSEEK_FLAG_ANY)){
1458 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1459 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1460 }
1461 }
1462
1463 if(m == nb_entries)
1464 return -1;
1465 return m;
1466 }
1467
1468 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1469 int flags)
1470 {
1471 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1472 wanted_timestamp, flags);
1473 }
1474
1475 #define DEBUG_SEEK
1476
1477 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1478 AVInputFormat *avif= s->iformat;
1479 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1480 int64_t ts_min, ts_max, ts;
1481 int index;
1482 int64_t ret;
1483 AVStream *st;
1484
1485 if (stream_index < 0)
1486 return -1;
1487
1488 #ifdef DEBUG_SEEK
1489 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1490 #endif
1491
1492 ts_max=
1493 ts_min= AV_NOPTS_VALUE;
1494 pos_limit= -1; //gcc falsely says it may be uninitialized
1495
1496 st= s->streams[stream_index];
1497 if(st->index_entries){
1498 AVIndexEntry *e;
1499
1500 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1501 index= FFMAX(index, 0);
1502 e= &st->index_entries[index];
1503
1504 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1505 pos_min= e->pos;
1506 ts_min= e->timestamp;
1507 #ifdef DEBUG_SEEK
1508 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1509 pos_min,ts_min);
1510 #endif
1511 }else{
1512 assert(index==0);
1513 }
1514
1515 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1516 assert(index < st->nb_index_entries);
1517 if(index >= 0){
1518 e= &st->index_entries[index];
1519 assert(e->timestamp >= target_ts);
1520 pos_max= e->pos;
1521 ts_max= e->timestamp;
1522 pos_limit= pos_max - e->min_distance;
1523 #ifdef DEBUG_SEEK
1524 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1525 pos_max,pos_limit, ts_max);
1526 #endif
1527 }
1528 }
1529
1530 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1531 if(pos<0)
1532 return -1;
1533
1534 /* do the seek */
1535 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1536 return ret;
1537
1538 av_update_cur_dts(s, st, ts);
1539
1540 return 0;
1541 }
1542
1543 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1544 int64_t pos, ts;
1545 int64_t start_pos, filesize;
1546 int no_change;
1547
1548 #ifdef DEBUG_SEEK
1549 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1550 #endif
1551
1552 if(ts_min == AV_NOPTS_VALUE){
1553 pos_min = s->data_offset;
1554 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1555 if (ts_min == AV_NOPTS_VALUE)
1556 return -1;
1557 }
1558
1559 if(ts_max == AV_NOPTS_VALUE){
1560 int step= 1024;
1561 filesize = avio_size(s->pb);
1562 pos_max = filesize - 1;
1563 do{
1564 pos_max -= step;
1565 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1566 step += step;
1567 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1568 if (ts_max == AV_NOPTS_VALUE)
1569 return -1;
1570
1571 for(;;){
1572 int64_t tmp_pos= pos_max + 1;
1573 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1574 if(tmp_ts == AV_NOPTS_VALUE)
1575 break;
1576 ts_max= tmp_ts;
1577 pos_max= tmp_pos;
1578 if(tmp_pos >= filesize)
1579 break;
1580 }
1581 pos_limit= pos_max;
1582 }
1583
1584 if(ts_min > ts_max){
1585 return -1;
1586 }else if(ts_min == ts_max){
1587 pos_limit= pos_min;
1588 }
1589
1590 no_change=0;
1591 while (pos_min < pos_limit) {
1592 #ifdef DEBUG_SEEK
1593 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1594 pos_min, pos_max,
1595 ts_min, ts_max);
1596 #endif
1597 assert(pos_limit <= pos_max);
1598
1599 if(no_change==0){
1600 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1601 // interpolate position (better than dichotomy)
1602 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1603 + pos_min - approximate_keyframe_distance;
1604 }else if(no_change==1){
1605 // bisection, if interpolation failed to change min or max pos last time
1606 pos = (pos_min + pos_limit)>>1;
1607 }else{
1608 /* linear search if bisection failed, can only happen if there
1609 are very few or no keyframes between min/max */
1610 pos=pos_min;
1611 }
1612 if(pos <= pos_min)
1613 pos= pos_min + 1;
1614 else if(pos > pos_limit)
1615 pos= pos_limit;
1616 start_pos= pos;
1617
1618 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1619 if(pos == pos_max)
1620 no_change++;
1621 else
1622 no_change=0;
1623 #ifdef DEBUG_SEEK
1624 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1625 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1626 start_pos, no_change);
1627 #endif
1628 if(ts == AV_NOPTS_VALUE){
1629 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1630 return -1;
1631 }
1632 assert(ts != AV_NOPTS_VALUE);
1633 if (target_ts <= ts) {
1634 pos_limit = start_pos - 1;
1635 pos_max = pos;
1636 ts_max = ts;
1637 }
1638 if (target_ts >= ts) {
1639 pos_min = pos;
1640 ts_min = ts;
1641 }
1642 }
1643
1644 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1645 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1646 #ifdef DEBUG_SEEK
1647 pos_min = pos;
1648 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1649 pos_min++;
1650 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1651 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1652 pos, ts_min, target_ts, ts_max);
1653 #endif
1654 *ts_ret= ts;
1655 return pos;
1656 }
1657
1658 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1659 int64_t pos_min, pos_max;
1660 #if 0
1661 AVStream *st;
1662
1663 if (stream_index < 0)
1664 return -1;
1665
1666 st= s->streams[stream_index];
1667 #endif
1668
1669 pos_min = s->data_offset;
1670 pos_max = avio_size(s->pb) - 1;
1671
1672 if (pos < pos_min) pos= pos_min;
1673 else if(pos > pos_max) pos= pos_max;
1674
1675 avio_seek(s->pb, pos, SEEK_SET);
1676
1677 #if 0
1678 av_update_cur_dts(s, st, ts);
1679 #endif
1680 return 0;
1681 }
1682
1683 static int av_seek_frame_generic(AVFormatContext *s,
1684 int stream_index, int64_t timestamp, int flags)
1685 {
1686 int index;
1687 int64_t ret;
1688 AVStream *st;
1689 AVIndexEntry *ie;
1690
1691 st = s->streams[stream_index];
1692
1693 index = av_index_search_timestamp(st, timestamp, flags);
1694
1695 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1696 return -1;
1697
1698 if(index < 0 || index==st->nb_index_entries-1){
1699 int i;
1700 AVPacket pkt;
1701
1702 if(st->nb_index_entries){
1703 assert(st->index_entries);
1704 ie= &st->index_entries[st->nb_index_entries-1];
1705 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1706 return ret;
1707 av_update_cur_dts(s, st, ie->timestamp);
1708 }else{
1709 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1710 return ret;
1711 }
1712 for(i=0;; i++) {
1713 int ret;
1714 do{
1715 ret = av_read_frame(s, &pkt);
1716 }while(ret == AVERROR(EAGAIN));
1717 if(ret<0)
1718 break;
1719 av_free_packet(&pkt);
1720 if(stream_index == pkt.stream_index){
1721 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1722 break;
1723 }
1724 }
1725 index = av_index_search_timestamp(st, timestamp, flags);
1726 }
1727 if (index < 0)
1728 return -1;
1729
1730 ff_read_frame_flush(s);
1731 if (s->iformat->read_seek){
1732 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1733 return 0;
1734 }
1735 ie = &st->index_entries[index];
1736 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1737 return ret;
1738 av_update_cur_dts(s, st, ie->timestamp);
1739
1740 return 0;
1741 }
1742
1743 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1744 {
1745 int ret;
1746 AVStream *st;
1747
1748 ff_read_frame_flush(s);
1749
1750 if(flags & AVSEEK_FLAG_BYTE)
1751 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1752
1753 if(stream_index < 0){
1754 stream_index= av_find_default_stream_index(s);
1755 if(stream_index < 0)
1756 return -1;
1757
1758 st= s->streams[stream_index];
1759 /* timestamp for default must be expressed in AV_TIME_BASE units */
1760 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1761 }
1762
1763 /* first, we try the format specific seek */
1764 if (s->iformat->read_seek)
1765 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1766 else
1767 ret = -1;
1768 if (ret >= 0) {
1769 return 0;
1770 }
1771
1772 if(s->iformat->read_timestamp)
1773 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1774 else
1775 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1776 }
1777
1778 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1779 {
1780 if(min_ts > ts || max_ts < ts)
1781 return -1;
1782
1783 ff_read_frame_flush(s);
1784
1785 if (s->iformat->read_seek2)
1786 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1787
1788 if(s->iformat->read_timestamp){
1789 //try to seek via read_timestamp()
1790 }
1791
1792 //Fallback to old API if new is not implemented but old is
1793 //Note the old has somewat different sematics
1794 if(s->iformat->read_seek || 1)
1795 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1796
1797 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1798 }
1799
1800 /*******************************************************/
1801
1802 /**
1803 * Return TRUE if the stream has accurate duration in any stream.
1804 *
1805 * @return TRUE if the stream has accurate duration for at least one component.
1806 */
1807 static int av_has_duration(AVFormatContext *ic)
1808 {
1809 int i;
1810 AVStream *st;
1811
1812 for(i = 0;i < ic->nb_streams; i++) {
1813 st = ic->streams[i];
1814 if (st->duration != AV_NOPTS_VALUE)
1815 return 1;
1816 }
1817 return 0;
1818 }
1819
1820 /**
1821 * Estimate the stream timings from the one of each components.
1822 *
1823 * Also computes the global bitrate if possible.
1824 */
1825 static void av_update_stream_timings(AVFormatContext *ic)
1826 {
1827 int64_t start_time, start_time1, end_time, end_time1;
1828 int64_t duration, duration1;
1829 int i;
1830 AVStream *st;
1831
1832 start_time = INT64_MAX;
1833 end_time = INT64_MIN;
1834 duration = INT64_MIN;
1835 for(i = 0;i < ic->nb_streams; i++) {
1836 st = ic->streams[i];
1837 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1838 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1839 if (start_time1 < start_time)
1840 start_time = start_time1;
1841 if (st->duration != AV_NOPTS_VALUE) {
1842 end_time1 = start_time1
1843 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1844 if (end_time1 > end_time)
1845 end_time = end_time1;
1846 }
1847 }
1848 if (st->duration != AV_NOPTS_VALUE) {
1849 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1850 if (duration1 > duration)
1851 duration = duration1;
1852 }
1853 }
1854 if (start_time != INT64_MAX) {
1855 ic->start_time = start_time;
1856 if (end_time != INT64_MIN) {
1857 if (end_time - start_time > duration)
1858 duration = end_time - start_time;
1859 }
1860 }
1861 if (duration != INT64_MIN) {
1862 ic->duration = duration;
1863 if (ic->file_size > 0) {
1864 /* compute the bitrate */
1865 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1866 (double)ic->duration;
1867 }
1868 }
1869 }
1870
1871 static void fill_all_stream_timings(AVFormatContext *ic)
1872 {
1873 int i;
1874 AVStream *st;
1875
1876 av_update_stream_timings(ic);
1877 for(i = 0;i < ic->nb_streams; i++) {
1878 st = ic->streams[i];
1879 if (st->start_time == AV_NOPTS_VALUE) {
1880 if(ic->start_time != AV_NOPTS_VALUE)
1881 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1882 if(ic->duration != AV_NOPTS_VALUE)
1883 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1884 }
1885 }
1886 }
1887
1888 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1889 {
1890 int64_t filesize, duration;
1891 int bit_rate, i;
1892 AVStream *st;
1893
1894 /* if bit_rate is already set, we believe it */
1895 if (ic->bit_rate <= 0) {
1896 bit_rate = 0;
1897 for(i=0;i<ic->nb_streams;i++) {
1898 st = ic->streams[i];
1899 if (st->codec->bit_rate > 0)
1900 bit_rate += st->codec->bit_rate;
1901 }
1902 ic->bit_rate = bit_rate;
1903 }
1904
1905 /* if duration is already set, we believe it */
1906 if (ic->duration == AV_NOPTS_VALUE &&
1907 ic->bit_rate != 0 &&
1908 ic->file_size != 0) {
1909 filesize = ic->file_size;
1910 if (filesize > 0) {
1911 for(i = 0; i < ic->nb_streams; i++) {
1912 st = ic->streams[i];
1913 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1914 if (st->duration == AV_NOPTS_VALUE)
1915 st->duration = duration;
1916 }
1917 }
1918 }
1919 }
1920
1921 #define DURATION_MAX_READ_SIZE 250000
1922 #define DURATION_MAX_RETRY 3
1923
1924 /* only usable for MPEG-PS streams */
1925 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1926 {
1927 AVPacket pkt1, *pkt = &pkt1;
1928 AVStream *st;
1929 int read_size, i, ret;
1930 int64_t end_time;
1931 int64_t filesize, offset, duration;
1932 int retry=0;
1933
1934 ic->cur_st = NULL;
1935
1936 /* flush packet queue */
1937 flush_packet_queue(ic);
1938
1939 for (i=0; i<ic->nb_streams; i++) {
1940 st = ic->streams[i];
1941 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1942 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1943
1944 if (st->parser) {
1945 av_parser_close(st->parser);
1946 st->parser= NULL;
1947 av_free_packet(&st->cur_pkt);
1948 }
1949 }
1950
1951 /* estimate the end time (duration) */
1952 /* XXX: may need to support wrapping */
1953 filesize = ic->file_size;
1954 end_time = AV_NOPTS_VALUE;
1955 do{
1956 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1957 if (offset < 0)
1958 offset = 0;
1959
1960 avio_seek(ic->pb, offset, SEEK_SET);
1961 read_size = 0;
1962 for(;;) {
1963 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1964 break;
1965
1966 do{
1967 ret = av_read_packet(ic, pkt);
1968 }while(ret == AVERROR(EAGAIN));
1969 if (ret != 0)
1970 break;
1971 read_size += pkt->size;
1972 st = ic->streams[pkt->stream_index];
1973 if (pkt->pts != AV_NOPTS_VALUE &&
1974 (st->start_time != AV_NOPTS_VALUE ||
1975 st->first_dts != AV_NOPTS_VALUE)) {
1976 duration = end_time = pkt->pts;
1977 if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time;
1978 else duration -= st->first_dts;
1979 if (duration < 0)
1980 duration += 1LL<<st->pts_wrap_bits;
1981 if (duration > 0) {
1982 if (st->duration == AV_NOPTS_VALUE ||
1983 st->duration < duration)
1984 st->duration = duration;
1985 }
1986 }
1987 av_free_packet(pkt);
1988 }
1989 }while( end_time==AV_NOPTS_VALUE
1990 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1991 && ++retry <= DURATION_MAX_RETRY);
1992
1993 fill_all_stream_timings(ic);
1994
1995 avio_seek(ic->pb, old_offset, SEEK_SET);
1996 for (i=0; i<ic->nb_streams; i++) {
1997 st= ic->streams[i];
1998 st->cur_dts= st->first_dts;
1999 st->last_IP_pts = AV_NOPTS_VALUE;
2000 }
2001 }
2002
2003 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
2004 {
2005 int64_t file_size;
2006
2007 /* get the file size, if possible */
2008 if (ic->iformat->flags & AVFMT_NOFILE) {
2009 file_size = 0;
2010 } else {
2011 file_size = avio_size(ic->pb);
2012 if (file_size < 0)
2013 file_size = 0;
2014 }
2015 ic->file_size = file_size;
2016
2017 if ((!strcmp(ic->iformat->name, "mpeg") ||
2018 !strcmp(ic->iformat->name, "mpegts")) &&
2019 file_size && !url_is_streamed(ic->pb)) {
2020 /* get accurate estimate from the PTSes */
2021 av_estimate_timings_from_pts(ic, old_offset);
2022 } else if (av_has_duration(ic)) {
2023 /* at least one component has timings - we use them for all
2024 the components */
2025 fill_all_stream_timings(ic);
2026 } else {
2027 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2028 /* less precise: use bitrate info */
2029 av_estimate_timings_from_bit_rate(ic);
2030 }
2031 av_update_stream_timings(ic);
2032
2033 #if 0
2034 {
2035 int i;
2036 AVStream *st;
2037 for(i = 0;i < ic->nb_streams; i++) {
2038 st = ic->streams[i];
2039 printf("%d: start_time: %0.3f duration: %0.3f\n",
2040 i, (double)st->start_time / AV_TIME_BASE,
2041 (double)st->duration / AV_TIME_BASE);
2042 }
2043 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2044 (double)ic->start_time / AV_TIME_BASE,
2045 (double)ic->duration / AV_TIME_BASE,
2046 ic->bit_rate / 1000);
2047 }
2048 #endif
2049 }
2050
2051 static int has_codec_parameters(AVCodecContext *enc)
2052 {
2053 int val;
2054 switch(enc->codec_type) {
2055 case AVMEDIA_TYPE_AUDIO:
2056 val = enc->sample_rate && enc->channels && enc->sample_fmt != AV_SAMPLE_FMT_NONE;
2057 if(!enc->frame_size &&
2058 (enc->codec_id == CODEC_ID_VORBIS ||
2059 enc->codec_id == CODEC_ID_AAC ||
2060 enc->codec_id == CODEC_ID_MP1 ||
2061 enc->codec_id == CODEC_ID_MP2 ||
2062 enc->codec_id == CODEC_ID_MP3 ||
2063 enc->codec_id == CODEC_ID_SPEEX))
2064 return 0;
2065 break;
2066 case AVMEDIA_TYPE_VIDEO:
2067 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
2068 break;
2069 default:
2070 val = 1;
2071 break;
2072 }
2073 return enc->codec_id != CODEC_ID_NONE && val != 0;
2074 }
2075
2076 static int has_decode_delay_been_guessed(AVStream *st)
2077 {
2078 return st->codec->codec_id != CODEC_ID_H264 ||
2079 st->codec_info_nb_frames >= 6 + st->codec->has_b_frames;
2080 }
2081
2082 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
2083 {
2084 int16_t *samples;
2085 AVCodec *codec;
2086 int got_picture, data_size, ret=0;
2087 AVFrame picture;
2088
2089 if(!st->codec->codec){
2090 codec = avcodec_find_decoder(st->codec->codec_id);
2091 if (!codec)
2092 return -1;
2093 ret = avcodec_open(st->codec, codec);
2094 if (ret < 0)
2095 return ret;
2096 }
2097
2098 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){
2099 switch(st->codec->codec_type) {
2100 case AVMEDIA_TYPE_VIDEO:
2101 avcodec_get_frame_defaults(&picture);
2102 ret = avcodec_decode_video2(st->codec, &picture,
2103 &got_picture, avpkt);
2104 break;
2105 case AVMEDIA_TYPE_AUDIO:
2106 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2107 samples = av_malloc(data_size);
2108 if (!samples)
2109 goto fail;
2110 ret = avcodec_decode_audio3(st->codec, samples,
2111 &data_size, avpkt);
2112 av_free(samples);
2113 break;
2114 default:
2115 break;
2116 }
2117 }
2118 fail:
2119 return ret;
2120 }
2121
2122 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2123 {
2124 while (tags->id != CODEC_ID_NONE) {
2125 if (tags->id == id)
2126 return tags->tag;
2127 tags++;
2128 }
2129 return 0;
2130 }
2131
2132 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2133 {
2134 int i;
2135 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2136 if(tag == tags[i].tag)
2137 return tags[i].id;
2138 }
2139 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2140 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag))
2141 return tags[i].id;
2142 }
2143 return CODEC_ID_NONE;
2144 }
2145
2146 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2147 {
2148 int i;
2149 for(i=0; tags && tags[i]; i++){
2150 int tag= ff_codec_get_tag(tags[i], id);
2151 if(tag) return tag;
2152 }
2153 return 0;
2154 }
2155
2156 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2157 {
2158 int i;
2159 for(i=0; tags && tags[i]; i++){
2160 enum CodecID id= ff_codec_get_id(tags[i], tag);
2161 if(id!=CODEC_ID_NONE) return id;
2162 }
2163 return CODEC_ID_NONE;
2164 }
2165
2166 static void compute_chapters_end(AVFormatContext *s)
2167 {
2168 unsigned int i;
2169
2170 for (i=0; i+1<s->nb_chapters; i++)
2171 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2172 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
2173 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
2174 s->chapters[i]->end = s->chapters[i+1]->start;
2175 }
2176
2177 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
2178 assert(s->start_time != AV_NOPTS_VALUE);
2179 assert(s->duration > 0);
2180 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
2181 AV_TIME_BASE_Q,
2182 s->chapters[i]->time_base);
2183 }
2184 }
2185
2186 static int get_std_framerate(int i){
2187 if(i<60*12) return i*1001;
2188 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2189 }
2190
2191 /*
2192 * Is the time base unreliable.
2193 * This is a heuristic to balance between quick acceptance of the values in
2194 * the headers vs. some extra checks.
2195 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2196 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2197 * And there are "variable" fps files this needs to detect as well.
2198 */
2199 static int tb_unreliable(AVCodecContext *c){
2200 if( c->time_base.den >= 101L*c->time_base.num
2201 || c->time_base.den < 5L*c->time_base.num
2202 /* || c->codec_tag == AV_RL32("DIVX")
2203 || c->codec_tag == AV_RL32("XVID")*/
2204 || c->codec_id == CODEC_ID_MPEG2VIDEO
2205 || c->codec_id == CODEC_ID_H264
2206 )
2207 return 1;
2208 return 0;
2209 }
2210
2211 int av_find_stream_info(AVFormatContext *ic)
2212 {
2213 int i, count, ret, read_size, j;
2214 AVStream *st;
2215 AVPacket pkt1, *pkt;
2216 int64_t old_offset = avio_tell(ic->pb);
2217
2218 for(i=0;i<ic->nb_streams;i++) {
2219 AVCodec *codec;
2220 st = ic->streams[i];
2221 if (st->codec->codec_id == CODEC_ID_AAC) {
2222 st->codec->sample_rate = 0;
2223 st->codec->frame_size = 0;
2224 st->codec->channels = 0;
2225 }
2226 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2227 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2228 /* if(!st->time_base.num)
2229 st->time_base= */
2230 if(!st->codec->time_base.num)
2231 st->codec->time_base= st->time_base;
2232 }
2233 //only for the split stuff
2234 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2235 st->parser = av_parser_init(st->codec->codec_id);
2236 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2237 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2238 }
2239 }
2240 assert(!st->codec->codec);
2241 codec = avcodec_find_decoder(st->codec->codec_id);
2242
2243 /* Force decoding of at least one frame of codec data
2244 * this makes sure the codec initializes the channel configuration
2245 * and does not trust the values from the container.
2246 */
2247 if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF)
2248 st->codec->channels = 0;
2249
2250 /* Ensure that subtitle_header is properly set. */
2251 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2252 && codec && !st->codec->codec)
2253 avcodec_open(st->codec, codec);
2254
2255 //try to just open decoders, in case this is enough to get parameters
2256 if(!has_codec_parameters(st->codec)){
2257 if (codec && !st->codec->codec)
2258 avcodec_open(st->codec, codec);
2259 }
2260 }
2261
2262 for (i=0; i<ic->nb_streams; i++) {
2263 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2264 }
2265
2266 count = 0;
2267 read_size = 0;
2268 for(;;) {
2269 if(url_interrupt_cb()){
2270 ret= AVERROR_EXIT;
2271 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2272 break;
2273 }
2274
2275 /* check if one codec still needs to be handled */
2276 for(i=0;i<ic->nb_streams;i++) {
2277 st = ic->streams[i];
2278 if (!has_codec_parameters(st->codec))
2279 break;
2280 /* variable fps and no guess at the real fps */
2281 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2282 && st->info->duration_count<20 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2283 break;
2284 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2285 break;
2286 if(st->first_dts == AV_NOPTS_VALUE)
2287 break;
2288 }
2289 if (i == ic->nb_streams) {
2290 /* NOTE: if the format has no header, then we need to read
2291 some packets to get most of the streams, so we cannot
2292 stop here */
2293 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2294 /* if we found the info for all the codecs, we can stop */
2295 ret = count;
2296 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2297 break;
2298 }
2299 }
2300 /* we did not get all the codec info, but we read too much data */
2301 if (read_size >= ic->probesize) {
2302 ret = count;
2303 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2304 break;
2305 }
2306
2307 /* NOTE: a new stream can be added there if no header in file
2308 (AVFMTCTX_NOHEADER) */
2309 ret = av_read_frame_internal(ic, &pkt1);
2310 if (ret < 0 && ret != AVERROR(EAGAIN)) {
2311 /* EOF or error */
2312 ret = -1; /* we could not have all the codec parameters before EOF */
2313 for(i=0;i<ic->nb_streams;i++) {
2314 st = ic->streams[i];
2315 if (!has_codec_parameters(st->codec)){
2316 char buf[256];
2317 avcodec_string(buf, sizeof(buf), st->codec, 0);
2318 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2319 } else {
2320 ret = 0;
2321 }
2322 }
2323 break;
2324 }
2325
2326 if (ret == AVERROR(EAGAIN))
2327 continue;
2328
2329 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2330 if ((ret = av_dup_packet(pkt)) < 0)
2331 goto find_stream_info_err;
2332
2333 read_size += pkt->size;
2334
2335 st = ic->streams[pkt->stream_index];
2336 if (st->codec_info_nb_frames>1) {
2337 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2338 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2339 break;
2340 }
2341 st->info->codec_info_duration += pkt->duration;
2342 }
2343 {
2344 int64_t last = st->info->last_dts;
2345 int64_t duration= pkt->dts - last;
2346
2347 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2348 double dur= duration * av_q2d(st->time_base);
2349
2350 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2351 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2352 if (st->info->duration_count < 2)
2353 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2354 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2355 int framerate= get_std_framerate(i);
2356 int ticks= lrintf(dur*framerate/(1001*12));
2357 double error= dur - ticks*1001*12/(double)framerate;
2358 st->info->duration_error[i] += error*error;
2359 }
2360 st->info->duration_count++;
2361 // ignore the first 4 values, they might have some random jitter
2362 if (st->info->duration_count > 3)
2363 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2364 }
2365 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2366 st->info->last_dts = pkt->dts;
2367 }
2368 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2369 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2370 if(i){
2371 st->codec->extradata_size= i;
2372 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2373 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2374 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2375 }
2376 }
2377
2378 /* if still no information, we try to open the codec and to
2379 decompress the frame. We try to avoid that in most cases as
2380 it takes longer and uses more memory. For MPEG-4, we need to
2381 decompress for QuickTime. */
2382 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st))
2383 try_decode_frame(st, pkt);
2384
2385 st->codec_info_nb_frames++;
2386 count++;
2387 }
2388
2389 // close codecs which were opened in try_decode_frame()
2390 for(i=0;i<ic->nb_streams;i++) {
2391 st = ic->streams[i];
2392 if(st->codec->codec)
2393 avcodec_close(st->codec);
2394 }
2395 for(i=0;i<ic->nb_streams;i++) {
2396 st = ic->streams[i];
2397 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2398 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2399 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2400 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2401 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2402 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2403 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2404
2405 // the check for tb_unreliable() is not completely correct, since this is not about handling
2406 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2407 // ipmovie.c produces.
2408 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2409 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2410 if (st->info->duration_count && !st->r_frame_rate.num
2411 && tb_unreliable(st->codec) /*&&
2412 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2413 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2414 int num = 0;
2415 double best_error= 2*av_q2d(st->time_base);
2416 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2417
2418 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2419 double error = st->info->duration_error[j] * get_std_framerate(j);
2420 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2421 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2422 if(error < best_error){
2423 best_error= error;
2424 num = get_std_framerate(j);
2425 }
2426 }
2427 // do not increase frame rate by more than 1 % in order to match a standard rate.
2428 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2429 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2430 }
2431
2432 if (!st->r_frame_rate.num){
2433 if( st->codec->time_base.den * (int64_t)st->time_base.num
2434 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2435 st->r_frame_rate.num = st->codec->time_base.den;
2436 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2437 }else{
2438 st->r_frame_rate.num = st->time_base.den;
2439 st->r_frame_rate.den = st->time_base.num;
2440 }
2441 }
2442 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2443 if(!st->codec->bits_per_coded_sample)
2444 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2445 }
2446 }
2447
2448 av_estimate_timings(ic, old_offset);
2449
2450 compute_chapters_end(ic);
2451
2452 #if 0
2453 /* correct DTS for B-frame streams with no timestamps */
2454 for(i=0;i<ic->nb_streams;i++) {
2455 st = ic->streams[i];
2456 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2457 if(b-frames){
2458 ppktl = &ic->packet_buffer;
2459 while(ppkt1){
2460 if(ppkt1->stream_index != i)
2461 continue;
2462 if(ppkt1->pkt->dts < 0)
2463 break;
2464 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2465 break;
2466 ppkt1->pkt->dts -= delta;
2467 ppkt1= ppkt1->next;
2468 }
2469 if(ppkt1)
2470 continue;
2471 st->cur_dts -= delta;
2472 }
2473 }
2474 }
2475 #endif
2476
2477 find_stream_info_err:
2478 for (i=0; i < ic->nb_streams; i++)
2479 av_freep(&ic->streams[i]->info);
2480 return ret;
2481 }
2482
2483 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2484 {
2485 int i, j;
2486
2487 for (i = 0; i < ic->nb_programs; i++)
2488 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2489 if (ic->programs[i]->stream_index[j] == s)
2490 return ic->programs[i];
2491 return NULL;
2492 }
2493
2494 int av_find_best_stream(AVFormatContext *ic,
2495 enum AVMediaType type,
2496 int wanted_stream_nb,
2497 int related_stream,
2498 AVCodec **decoder_ret,
2499 int flags)
2500 {
2501 int i, nb_streams = ic->nb_streams, stream_number = 0;
2502 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2503 unsigned *program = NULL;
2504 AVCodec *decoder = NULL, *best_decoder = NULL;
2505
2506 if (related_stream >= 0 && wanted_stream_nb < 0) {
2507 AVProgram *p = find_program_from_stream(ic, related_stream);
2508 if (p) {
2509 program = p->stream_index;
2510 nb_streams = p->nb_stream_indexes;
2511 }
2512 }
2513 for (i = 0; i < nb_streams; i++) {
2514 AVStream *st = ic->streams[program ? program[i] : i];
2515 AVCodecContext *avctx = st->codec;
2516 if (avctx->codec_type != type)
2517 continue;
2518 if (wanted_stream_nb >= 0 && stream_number++ != wanted_stream_nb)
2519 continue;
2520 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2521 continue;
2522 if (decoder_ret) {
2523 decoder = avcodec_find_decoder(ic->streams[i]->codec->codec_id);
2524 if (!decoder) {
2525 if (ret < 0)
2526 ret = AVERROR_DECODER_NOT_FOUND;
2527 continue;
2528 }
2529 }
2530 if (best_count >= st->codec_info_nb_frames)
2531 continue;
2532 best_count = st->codec_info_nb_frames;
2533 ret = program ? program[i] : i;
2534 best_decoder = decoder;
2535 if (program && i == nb_streams - 1 && ret < 0) {
2536 program = NULL;
2537 nb_streams = ic->nb_streams;
2538 i = 0; /* no related stream found, try again with everything */
2539 }
2540 }
2541 if (decoder_ret)
2542 *decoder_ret = best_decoder;
2543 return ret;
2544 }
2545
2546 /*******************************************************/
2547
2548 int av_read_play(AVFormatContext *s)
2549 {
2550 if (s->iformat->read_play)
2551 return s->iformat->read_play(s);
2552 if (s->pb)
2553 return av_url_read_fpause(s->pb, 0);
2554 return AVERROR(ENOSYS);
2555 }
2556
2557 int av_read_pause(AVFormatContext *s)
2558 {
2559 if (s->iformat->read_pause)
2560 return s->iformat->read_pause(s);
2561 if (s->pb)
2562 return av_url_read_fpause(s->pb, 1);
2563 return AVERROR(ENOSYS);
2564 }
2565
2566 void av_close_input_stream(AVFormatContext *s)
2567 {
2568 flush_packet_queue(s);
2569 if (s->iformat->read_close)
2570 s->iformat->read_close(s);
2571 avformat_free_context(s);
2572 }
2573
2574 void avformat_free_context(AVFormatContext *s)
2575 {
2576 int i;
2577 AVStream *st;
2578
2579 for(i=0;i<s->nb_streams;i++) {
2580 /* free all data in a stream component */
2581 st = s->streams[i];
2582 if (st->parser) {
2583 av_parser_close(st->parser);
2584 av_free_packet(&st->cur_pkt);
2585 }
2586 av_metadata_free(&st->metadata);
2587 av_free(st->index_entries);
2588 av_free(st->codec->extradata);
2589 av_free(st->codec->subtitle_header);
2590 av_free(st->codec);
2591 #if FF_API_OLD_METADATA
2592 av_free(st->filename);
2593 #endif
2594 av_free(st->priv_data);
2595 av_free(st->info);
2596 av_free(st);
2597 }
2598 for(i=s->nb_programs-1; i>=0; i--) {
2599 #if FF_API_OLD_METADATA
2600 av_freep(&s->programs[i]->provider_name);
2601 av_freep(&s->programs[i]->name);
2602 #endif
2603 av_metadata_free(&s->programs[i]->metadata);
2604 av_freep(&s->programs[i]->stream_index);
2605 av_freep(&s->programs[i]);
2606 }
2607 av_freep(&s->programs);
2608 av_freep(&s->priv_data);
2609 while(s->nb_chapters--) {
2610 #if FF_API_OLD_METADATA
2611 av_free(s->chapters[s->nb_chapters]->title);
2612 #endif
2613 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2614 av_free(s->chapters[s->nb_chapters]);
2615 }
2616 av_freep(&s->chapters);
2617 av_metadata_free(&s->metadata);
2618 av_freep(&s->key);
2619 av_free(s);
2620 }
2621
2622 void av_close_input_file(AVFormatContext *s)
2623 {
2624 AVIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2625 av_close_input_stream(s);
2626 if (pb)
2627 avio_close(pb);
2628 }
2629
2630 AVStream *av_new_stream(AVFormatContext *s, int id)
2631 {
2632 AVStream *st;
2633 int i;
2634
2635 #if FF_API_MAX_STREAMS
2636 if (s->nb_streams >= MAX_STREAMS){
2637 av_log(s, AV_LOG_ERROR, "Too many streams\n");
2638 return NULL;
2639 }
2640 #else
2641 AVStream **streams;
2642
2643 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2644 return NULL;
2645 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2646 if (!streams)
2647 return NULL;
2648 s->streams = streams;
2649 #endif
2650
2651 st = av_mallocz(sizeof(AVStream));
2652 if (!st)
2653 return NULL;
2654 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2655 av_free(st);
2656 return NULL;
2657 }
2658
2659 st->codec= avcodec_alloc_context();
2660 if (s->iformat) {
2661 /* no default bitrate if decoding */
2662 st->codec->bit_rate = 0;
2663 }
2664 st->index = s->nb_streams;
2665 st->id = id;
2666 st->start_time = AV_NOPTS_VALUE;
2667 st->duration = AV_NOPTS_VALUE;
2668 /* we set the current DTS to 0 so that formats without any timestamps
2669 but durations get some timestamps, formats with some unknown
2670 timestamps have their first few packets buffered and the
2671 timestamps corrected before they are returned to the user */
2672 st->cur_dts = 0;
2673 st->first_dts = AV_NOPTS_VALUE;
2674 st->probe_packets = MAX_PROBE_PACKETS;
2675
2676 /* default pts setting is MPEG-like */
2677 av_set_pts_info(st, 33, 1, 90000);
2678 st->last_IP_pts = AV_NOPTS_VALUE;
2679 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2680 st->pts_buffer[i]= AV_NOPTS_VALUE;
2681 st->reference_dts = AV_NOPTS_VALUE;
2682
2683 st->sample_aspect_ratio = (AVRational){0,1};
2684
2685 s->streams[s->nb_streams++] = st;
2686 return st;
2687 }
2688
2689 AVProgram *av_new_program(AVFormatContext *ac, int id)
2690 {
2691 AVProgram *program=NULL;
2692 int i;
2693
2694 #ifdef DEBUG_SI
2695 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2696 #endif
2697
2698 for(i=0; i<ac->nb_programs; i++)
2699 if(ac->programs[i]->id == id)
2700 program = ac->programs[i];
2701
2702 if(!program){
2703 program = av_mallocz(sizeof(AVProgram));
2704 if (!program)
2705 return NULL;
2706 dynarray_add(&ac->programs, &ac->nb_programs, program);
2707 program->discard = AVDISCARD_NONE;
2708 }
2709 program->id = id;
2710
2711 return program;
2712 }
2713
2714 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2715 {
2716 AVChapter *chapter = NULL;
2717 int i;
2718
2719 for(i=0; i<s->nb_chapters; i++)
2720 if(s->chapters[i]->id == id)
2721 chapter = s->chapters[i];
2722
2723 if(!chapter){
2724 chapter= av_mallocz(sizeof(AVChapter));
2725 if(!chapter)
2726 return NULL;
2727 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2728 }
2729 #if FF_API_OLD_METADATA
2730 av_free(chapter->title);
2731 #endif
2732 av_metadata_set2(&chapter->metadata, "title", title, 0);
2733 chapter->id = id;
2734 chapter->time_base= time_base;
2735 chapter->start = start;
2736 chapter->end = end;
2737
2738 return chapter;
2739 }
2740
2741 /************************************************************/
2742 /* output media file */
2743
2744 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2745 {
2746 int ret;
2747
2748 if (s->oformat->priv_data_size > 0) {
2749 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2750 if (!s->priv_data)
2751 return AVERROR(ENOMEM);
2752 if (s->oformat->priv_class) {
2753 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2754 av_opt_set_defaults(s->priv_data);
2755 }
2756 } else
2757 s->priv_data = NULL;
2758
2759 if (s->oformat->set_parameters) {
2760 ret = s->oformat->set_parameters(s, ap);
2761 if (ret < 0)
2762 return ret;
2763 }
2764 return 0;
2765 }
2766
2767 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2768 {
2769 const AVCodecTag *avctag;
2770 int n;
2771 enum CodecID id = CODEC_ID_NONE;
2772 unsigned int tag = 0;
2773
2774 /**
2775 * Check that tag + id is in the table
2776 * If neither is in the table -> OK
2777 * If tag is in the table with another id -> FAIL
2778 * If id is in the table with another tag -> FAIL unless strict < normal
2779 */
2780 for (n = 0; s->oformat->codec_tag[n]; n++) {
2781 avctag = s->oformat->codec_tag[n];
2782 while (avctag->id != CODEC_ID_NONE) {
2783 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) {
2784 id = avctag->id;
2785 if (id == st->codec->codec_id)
2786 return 1;
2787 }
2788 if (avctag->id == st->codec->codec_id)
2789 tag = avctag->tag;
2790 avctag++;
2791 }
2792 }
2793 if (id != CODEC_ID_NONE)
2794 return 0;
2795 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2796 return 0;
2797 return 1;
2798 }
2799
2800 int av_write_header(AVFormatContext *s)
2801 {
2802 int ret, i;
2803 AVStream *st;
2804
2805 // some sanity checks
2806 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2807 av_log(s, AV_LOG_ERROR, "no streams\n");
2808 return AVERROR(EINVAL);
2809 }
2810
2811 for(i=0;i<s->nb_streams;i++) {
2812 st = s->streams[i];
2813
2814 switch (st->codec->codec_type) {
2815 case AVMEDIA_TYPE_AUDIO:
2816 if(st->codec->sample_rate<=0){
2817 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2818 return AVERROR(EINVAL);
2819 }
2820 if(!st->codec->block_align)
2821 st->codec->block_align = st->codec->channels *
2822 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2823 break;
2824 case AVMEDIA_TYPE_VIDEO:
2825 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2826 av_log(s, AV_LOG_ERROR, "time base not set\n");
2827 return AVERROR(EINVAL);
2828 }
2829 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2830 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2831 return AVERROR(EINVAL);
2832 }
2833 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2834 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2835 return AVERROR(EINVAL);
2836 }
2837 break;
2838 }
2839
2840 if(s->oformat->codec_tag){
2841 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2842 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2843 st->codec->codec_tag= 0;
2844 }
2845 if(st->codec->codec_tag){
2846 if (!validate_codec_tag(s, st)) {
2847 char tagbuf[32];
2848 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2849 av_log(s, AV_LOG_ERROR,
2850 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2851 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2852 return AVERROR_INVALIDDATA;
2853 }
2854 }else
2855 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2856 }
2857
2858 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2859 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2860 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2861 }
2862
2863 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2864 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2865 if (!s->priv_data)
2866 return AVERROR(ENOMEM);
2867 }
2868
2869 #if FF_API_OLD_METADATA
2870 ff_metadata_mux_compat(s);
2871 #endif
2872
2873 /* set muxer identification string */
2874 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2875 av_metadata_set2(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2876 }
2877
2878 if(s->oformat->write_header){
2879 ret = s->oformat->write_header(s);
2880 if (ret < 0)
2881 return ret;
2882 }
2883
2884 /* init PTS generation */
2885 for(i=0;i<s->nb_streams;i++) {
2886 int64_t den = AV_NOPTS_VALUE;
2887 st = s->streams[i];
2888
2889 switch (st->codec->codec_type) {
2890 case AVMEDIA_TYPE_AUDIO:
2891 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2892 break;
2893 case AVMEDIA_TYPE_VIDEO:
2894 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2895 break;
2896 default:
2897 break;
2898 }
2899 if (den != AV_NOPTS_VALUE) {
2900 if (den <= 0)
2901 return AVERROR_INVALIDDATA;
2902 av_frac_init(&st->pts, 0, 0, den);
2903 }
2904 }
2905 return 0;
2906 }
2907
2908 //FIXME merge with compute_pkt_fields
2909 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2910 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2911 int num, den, frame_size, i;
2912
2913 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2914
2915 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2916 return -1;*/
2917
2918 /* duration field */
2919 if (pkt->duration == 0) {
2920 compute_frame_duration(&num, &den, st, NULL, pkt);
2921 if (den && num) {
2922 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2923 }
2924 }
2925
2926 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2927 pkt->pts= pkt->dts;
2928
2929 //XXX/FIXME this is a temporary hack until all encoders output pts
2930 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2931 pkt->dts=
2932 // pkt->pts= st->cur_dts;
2933 pkt->pts= st->pts.val;
2934 }
2935
2936 //calculate dts from pts
2937 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2938 st->pts_buffer[0]= pkt->pts;
2939 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2940 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2941 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2942 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2943
2944 pkt->dts= st->pts_buffer[0];
2945 }
2946
2947 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2948 av_log(s, AV_LOG_ERROR,
2949 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
2950 st->index, st->cur_dts, pkt->dts);
2951 return -1;
2952 }
2953 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2954 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
2955 return -1;
2956 }
2957
2958 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2959 st->cur_dts= pkt->dts;
2960 st->pts.val= pkt->dts;
2961
2962 /* update pts */
2963 switch (st->codec->codec_type) {
2964 case AVMEDIA_TYPE_AUDIO:
2965 frame_size = get_audio_frame_size(st->codec, pkt->size);
2966
2967 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2968 likely equal to the encoder delay, but it would be better if we
2969 had the real timestamps from the encoder */
2970 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2971 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2972 }
2973 break;
2974 case AVMEDIA_TYPE_VIDEO:
2975 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2976 break;
2977 default:
2978 break;
2979 }
2980 return 0;
2981 }
2982
2983 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2984 {
2985 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2986
2987 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2988 return ret;
2989
2990 ret= s->oformat->write_packet(s, pkt);
2991 return ret;
2992 }
2993
2994 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2995 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2996 {
2997 AVPacketList **next_point, *this_pktl;
2998
2999 this_pktl = av_mallocz(sizeof(AVPacketList));
3000 this_pktl->pkt= *pkt;
3001 pkt->destruct= NULL; // do not free original but only the copy
3002 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3003
3004 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3005 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3006 }else
3007 next_point = &s->packet_buffer;
3008
3009 if(*next_point){
3010 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3011 while(!compare(s, &(*next_point)->pkt, pkt)){
3012 next_point= &(*next_point)->next;
3013 }
3014 goto next_non_null;
3015 }else{
3016 next_point = &(s->packet_buffer_end->next);
3017 }
3018 }
3019 assert(!*next_point);
3020
3021 s->packet_buffer_end= this_pktl;
3022 next_non_null:
3023
3024 this_pktl->next= *next_point;
3025
3026 s->streams[pkt->stream_index]->last_in_packet_buffer=
3027 *next_point= this_pktl;
3028 }
3029
3030 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3031 {
3032 AVStream *st = s->streams[ pkt ->stream_index];
3033 AVStream *st2= s->streams[ next->stream_index];
3034 int64_t a= st2->time_base.num * (int64_t)st ->time_base.den;
3035 int64_t b= st ->time_base.num * (int64_t)st2->time_base.den;
3036 return av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN) < next->dts;
3037 }
3038
3039 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3040 AVPacketList *pktl;
3041 int stream_count=0;
3042 int i;
3043
3044 if(pkt){
3045 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3046 }
3047
3048 for(i=0; i < s->nb_streams; i++)
3049 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3050
3051 if(stream_count && (s->nb_streams == stream_count || flush)){
3052 pktl= s->packet_buffer;
3053 *out= pktl->pkt;
3054
3055 s->packet_buffer= pktl->next;
3056 if(!s->packet_buffer)
3057 s->packet_buffer_end= NULL;
3058
3059 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3060 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3061 av_freep(&pktl);
3062 return 1;
3063 }else{
3064 av_init_packet(out);
3065 return 0;
3066 }
3067 }
3068
3069 /**
3070 * Interleave an AVPacket correctly so it can be muxed.
3071 * @param out the interleaved packet will be output here
3072 * @param in the input packet
3073 * @param flush 1 if no further packets are available as input and all
3074 * remaining packets should be output
3075 * @return 1 if a packet was output, 0 if no packet could be output,
3076 * < 0 if an error occurred
3077 */
3078 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3079 if(s->oformat->interleave_packet)
3080 return s->oformat->interleave_packet(s, out, in, flush);
3081 else
3082 return av_interleave_packet_per_dts(s, out, in, flush);
3083 }
3084
3085 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3086 AVStream *st= s->streams[ pkt->stream_index];
3087
3088 //FIXME/XXX/HACK drop zero sized packets
3089 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3090 return 0;
3091
3092 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
3093 if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3094 return -1;
3095
3096 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3097 return -1;
3098
3099 for(;;){
3100 AVPacket opkt;
3101 int ret= av_interleave_packet(s, &opkt, pkt, 0);
3102 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3103 return ret;
3104
3105 ret= s->oformat->write_packet(s, &opkt);
3106
3107 av_free_packet(&opkt);
3108 pkt= NULL;
3109
3110 if(ret<0)
3111 return ret;
3112 }
3113 }
3114
3115 int av_write_trailer(AVFormatContext *s)
3116 {
3117 int ret, i;
3118
3119 for(;;){
3120 AVPacket pkt;
3121 ret= av_interleave_packet(s, &pkt, NULL, 1);
3122 if(ret<0) //FIXME cleanup needed for ret<0 ?
3123 goto fail;
3124 if(!ret)