lavf: enable av_dlog() in compute_pkt_fields2()
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* #define DEBUG */
23
24 #include "avformat.h"
25 #include "avio_internal.h"
26 #include "internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "metadata.h"
30 #include "id3v2.h"
31 #include "libavutil/avstring.h"
32 #include "riff.h"
33 #include "audiointerleave.h"
34 #include <sys/time.h>
35 #include <time.h>
36 #include <strings.h>
37 #include <stdarg.h>
38 #if CONFIG_NETWORK
39 #include "network.h"
40 #endif
41
42 #undef NDEBUG
43 #include <assert.h>
44
45 /**
46 * @file
47 * various utility functions for use within FFmpeg
48 */
49
50 unsigned avformat_version(void)
51 {
52 return LIBAVFORMAT_VERSION_INT;
53 }
54
55 const char *avformat_configuration(void)
56 {
57 return LIBAV_CONFIGURATION;
58 }
59
60 const char *avformat_license(void)
61 {
62 #define LICENSE_PREFIX "libavformat license: "
63 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
64 }
65
66 /* fraction handling */
67
68 /**
69 * f = val + (num / den) + 0.5.
70 *
71 * 'num' is normalized so that it is such as 0 <= num < den.
72 *
73 * @param f fractional number
74 * @param val integer value
75 * @param num must be >= 0
76 * @param den must be >= 1
77 */
78 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
79 {
80 num += (den >> 1);
81 if (num >= den) {
82 val += num / den;
83 num = num % den;
84 }
85 f->val = val;
86 f->num = num;
87 f->den = den;
88 }
89
90 /**
91 * Fractional addition to f: f = f + (incr / f->den).
92 *
93 * @param f fractional number
94 * @param incr increment, can be positive or negative
95 */
96 static void av_frac_add(AVFrac *f, int64_t incr)
97 {
98 int64_t num, den;
99
100 num = f->num + incr;
101 den = f->den;
102 if (num < 0) {
103 f->val += num / den;
104 num = num % den;
105 if (num < 0) {
106 num += den;
107 f->val--;
108 }
109 } else if (num >= den) {
110 f->val += num / den;
111 num = num % den;
112 }
113 f->num = num;
114 }
115
116 /** head of registered input format linked list */
117 #if !FF_API_FIRST_FORMAT
118 static
119 #endif
120 AVInputFormat *first_iformat = NULL;
121 /** head of registered output format linked list */
122 #if !FF_API_FIRST_FORMAT
123 static
124 #endif
125 AVOutputFormat *first_oformat = NULL;
126
127 AVInputFormat *av_iformat_next(AVInputFormat *f)
128 {
129 if(f) return f->next;
130 else return first_iformat;
131 }
132
133 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
134 {
135 if(f) return f->next;
136 else return first_oformat;
137 }
138
139 void av_register_input_format(AVInputFormat *format)
140 {
141 AVInputFormat **p;
142 p = &first_iformat;
143 while (*p != NULL) p = &(*p)->next;
144 *p = format;
145 format->next = NULL;
146 }
147
148 void av_register_output_format(AVOutputFormat *format)
149 {
150 AVOutputFormat **p;
151 p = &first_oformat;
152 while (*p != NULL) p = &(*p)->next;
153 *p = format;
154 format->next = NULL;
155 }
156
157 int av_match_ext(const char *filename, const char *extensions)
158 {
159 const char *ext, *p;
160 char ext1[32], *q;
161
162 if(!filename)
163 return 0;
164
165 ext = strrchr(filename, '.');
166 if (ext) {
167 ext++;
168 p = extensions;
169 for(;;) {
170 q = ext1;
171 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
172 *q++ = *p++;
173 *q = '\0';
174 if (!strcasecmp(ext1, ext))
175 return 1;
176 if (*p == '\0')
177 break;
178 p++;
179 }
180 }
181 return 0;
182 }
183
184 static int match_format(const char *name, const char *names)
185 {
186 const char *p;
187 int len, namelen;
188
189 if (!name || !names)
190 return 0;
191
192 namelen = strlen(name);
193 while ((p = strchr(names, ','))) {
194 len = FFMAX(p - names, namelen);
195 if (!strncasecmp(name, names, len))
196 return 1;
197 names = p+1;
198 }
199 return !strcasecmp(name, names);
200 }
201
202 #if FF_API_GUESS_FORMAT
203 AVOutputFormat *guess_format(const char *short_name, const char *filename,
204 const char *mime_type)
205 {
206 return av_guess_format(short_name, filename, mime_type);
207 }
208 #endif
209
210 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
211 const char *mime_type)
212 {
213 AVOutputFormat *fmt = NULL, *fmt_found;
214 int score_max, score;
215
216 /* specific test for image sequences */
217 #if CONFIG_IMAGE2_MUXER
218 if (!short_name && filename &&
219 av_filename_number_test(filename) &&
220 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
221 return av_guess_format("image2", NULL, NULL);
222 }
223 #endif
224 /* Find the proper file type. */
225 fmt_found = NULL;
226 score_max = 0;
227 while ((fmt = av_oformat_next(fmt))) {
228 score = 0;
229 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
230 score += 100;
231 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
232 score += 10;
233 if (filename && fmt->extensions &&
234 av_match_ext(filename, fmt->extensions)) {
235 score += 5;
236 }
237 if (score > score_max) {
238 score_max = score;
239 fmt_found = fmt;
240 }
241 }
242 return fmt_found;
243 }
244
245 #if FF_API_GUESS_FORMAT
246 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
247 const char *mime_type)
248 {
249 AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
250
251 if (fmt) {
252 AVOutputFormat *stream_fmt;
253 char stream_format_name[64];
254
255 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
256 stream_fmt = av_guess_format(stream_format_name, NULL, NULL);
257
258 if (stream_fmt)
259 fmt = stream_fmt;
260 }
261
262 return fmt;
263 }
264 #endif
265
266 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
267 const char *filename, const char *mime_type, enum AVMediaType type){
268 if(type == AVMEDIA_TYPE_VIDEO){
269 enum CodecID codec_id= CODEC_ID_NONE;
270
271 #if CONFIG_IMAGE2_MUXER
272 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
273 codec_id= av_guess_image2_codec(filename);
274 }
275 #endif
276 if(codec_id == CODEC_ID_NONE)
277 codec_id= fmt->video_codec;
278 return codec_id;
279 }else if(type == AVMEDIA_TYPE_AUDIO)
280 return fmt->audio_codec;
281 else if (type == AVMEDIA_TYPE_SUBTITLE)
282 return fmt->subtitle_codec;
283 else
284 return CODEC_ID_NONE;
285 }
286
287 AVInputFormat *av_find_input_format(const char *short_name)
288 {
289 AVInputFormat *fmt = NULL;
290 while ((fmt = av_iformat_next(fmt))) {
291 if (match_format(short_name, fmt->name))
292 return fmt;
293 }
294 return NULL;
295 }
296
297 #if FF_API_SYMVER && CONFIG_SHARED && HAVE_SYMVER
298 FF_SYMVER(void, av_destruct_packet_nofree, (AVPacket *pkt), "LIBAVFORMAT_52")
299 {
300 av_destruct_packet_nofree(pkt);
301 }
302
303 FF_SYMVER(void, av_destruct_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
304 {
305 av_destruct_packet(pkt);
306 }
307
308 FF_SYMVER(int, av_new_packet, (AVPacket *pkt, int size), "LIBAVFORMAT_52")
309 {
310 return av_new_packet(pkt, size);
311 }
312
313 FF_SYMVER(int, av_dup_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
314 {
315 return av_dup_packet(pkt);
316 }
317
318 FF_SYMVER(void, av_free_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
319 {
320 av_free_packet(pkt);
321 }
322
323 FF_SYMVER(void, av_init_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
324 {
325 av_log(NULL, AV_LOG_WARNING, "Diverting av_*_packet function calls to libavcodec. Recompile to improve performance\n");
326 av_init_packet(pkt);
327 }
328 #endif
329
330 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
331 {
332 int ret= av_new_packet(pkt, size);
333
334 if(ret<0)
335 return ret;
336
337 pkt->pos= avio_tell(s);
338
339 ret= avio_read(s, pkt->data, size);
340 if(ret<=0)
341 av_free_packet(pkt);
342 else
343 av_shrink_packet(pkt, ret);
344
345 return ret;
346 }
347
348 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
349 {
350 int ret;
351 int old_size;
352 if (!pkt->size)
353 return av_get_packet(s, pkt, size);
354 old_size = pkt->size;
355 ret = av_grow_packet(pkt, size);
356 if (ret < 0)
357 return ret;
358 ret = avio_read(s, pkt->data + old_size, size);
359 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
360 return ret;
361 }
362
363
364 int av_filename_number_test(const char *filename)
365 {
366 char buf[1024];
367 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
368 }
369
370 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
371 {
372 AVProbeData lpd = *pd;
373 AVInputFormat *fmt1 = NULL, *fmt;
374 int score;
375
376 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
377 int id3len = ff_id3v2_tag_len(lpd.buf);
378 if (lpd.buf_size > id3len + 16) {
379 lpd.buf += id3len;
380 lpd.buf_size -= id3len;
381 }
382 }
383
384 fmt = NULL;
385 while ((fmt1 = av_iformat_next(fmt1))) {
386 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
387 continue;
388 score = 0;
389 if (fmt1->read_probe) {
390 score = fmt1->read_probe(&lpd);
391 } else if (fmt1->extensions) {
392 if (av_match_ext(lpd.filename, fmt1->extensions)) {
393 score = 50;
394 }
395 }
396 if (score > *score_max) {
397 *score_max = score;
398 fmt = fmt1;
399 }else if (score == *score_max)
400 fmt = NULL;
401 }
402 return fmt;
403 }
404
405 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
406 int score=0;
407 return av_probe_input_format2(pd, is_opened, &score);
408 }
409
410 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
411 {
412 static const struct {
413 const char *name; enum CodecID id; enum AVMediaType type;
414 } fmt_id_type[] = {
415 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
416 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
417 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
418 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
419 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
420 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
421 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
422 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
423 { 0 }
424 };
425 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
426
427 if (fmt) {
428 int i;
429 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
430 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
431 for (i = 0; fmt_id_type[i].name; i++) {
432 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
433 st->codec->codec_id = fmt_id_type[i].id;
434 st->codec->codec_type = fmt_id_type[i].type;
435 break;
436 }
437 }
438 }
439 return !!fmt;
440 }
441
442 /************************************************************/
443 /* input media file */
444
445 /**
446 * Open a media file from an IO stream. 'fmt' must be specified.
447 */
448 int av_open_input_stream(AVFormatContext **ic_ptr,
449 AVIOContext *pb, const char *filename,
450 AVInputFormat *fmt, AVFormatParameters *ap)
451 {
452 int err;
453 AVFormatContext *ic;
454 AVFormatParameters default_ap;
455
456 if(!ap){
457 ap=&default_ap;
458 memset(ap, 0, sizeof(default_ap));
459 }
460
461 if(!ap->prealloced_context)
462 ic = avformat_alloc_context();
463 else
464 ic = *ic_ptr;
465 if (!ic) {
466 err = AVERROR(ENOMEM);
467 goto fail;
468 }
469 ic->iformat = fmt;
470 ic->pb = pb;
471 ic->duration = AV_NOPTS_VALUE;
472 ic->start_time = AV_NOPTS_VALUE;
473 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
474
475 /* allocate private data */
476 if (fmt->priv_data_size > 0) {
477 ic->priv_data = av_mallocz(fmt->priv_data_size);
478 if (!ic->priv_data) {
479 err = AVERROR(ENOMEM);
480 goto fail;
481 }
482 } else {
483 ic->priv_data = NULL;
484 }
485
486 // e.g. AVFMT_NOFILE formats will not have a AVIOContext
487 if (ic->pb)
488 ff_id3v2_read(ic, ID3v2_DEFAULT_MAGIC);
489
490 if (ic->iformat->read_header) {
491 err = ic->iformat->read_header(ic, ap);
492 if (err < 0)
493 goto fail;
494 }
495
496 if (pb && !ic->data_offset)
497 ic->data_offset = avio_tell(ic->pb);
498
499 #if FF_API_OLD_METADATA
500 ff_metadata_demux_compat(ic);
501 #endif
502
503 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
504
505 *ic_ptr = ic;
506 return 0;
507 fail:
508 if (ic) {
509 int i;
510 av_freep(&ic->priv_data);
511 for(i=0;i<ic->nb_streams;i++) {
512 AVStream *st = ic->streams[i];
513 if (st) {
514 av_free(st->priv_data);
515 av_free(st->codec->extradata);
516 av_free(st->codec);
517 av_free(st->info);
518 }
519 av_free(st);
520 }
521 }
522 av_free(ic);
523 *ic_ptr = NULL;
524 return err;
525 }
526
527 /** size of probe buffer, for guessing file type from file contents */
528 #define PROBE_BUF_MIN 2048
529 #define PROBE_BUF_MAX (1<<20)
530
531 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
532 const char *filename, void *logctx,
533 unsigned int offset, unsigned int max_probe_size)
534 {
535 AVProbeData pd = { filename ? filename : "", NULL, -offset };
536 unsigned char *buf = NULL;
537 int ret = 0, probe_size;
538
539 if (!max_probe_size) {
540 max_probe_size = PROBE_BUF_MAX;
541 } else if (max_probe_size > PROBE_BUF_MAX) {
542 max_probe_size = PROBE_BUF_MAX;
543 } else if (max_probe_size < PROBE_BUF_MIN) {
544 return AVERROR(EINVAL);
545 }
546
547 if (offset >= max_probe_size) {
548 return AVERROR(EINVAL);
549 }
550
551 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0;
552 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
553 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
554 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
555
556 if (probe_size < offset) {
557 continue;
558 }
559
560 /* read probe data */
561 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
562 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
563 /* fail if error was not end of file, otherwise, lower score */
564 if (ret != AVERROR_EOF) {
565 av_free(buf);
566 return ret;
567 }
568 score = 0;
569 ret = 0; /* error was end of file, nothing read */
570 }
571 pd.buf_size += ret;
572 pd.buf = &buf[offset];
573
574 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
575
576 /* guess file format */
577 *fmt = av_probe_input_format2(&pd, 1, &score);
578 if(*fmt){
579 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
580 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
581 }else
582 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
583 }
584 }
585
586 if (!*fmt) {
587 av_free(buf);
588 return AVERROR_INVALIDDATA;
589 }
590
591 /* rewind. reuse probe buffer to avoid seeking */
592 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
593 av_free(buf);
594
595 return ret;
596 }
597
598 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
599 AVInputFormat *fmt,
600 int buf_size,
601 AVFormatParameters *ap)
602 {
603 int err;
604 AVProbeData probe_data, *pd = &probe_data;
605 AVIOContext *pb = NULL;
606 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
607
608 pd->filename = "";
609 if (filename)
610 pd->filename = filename;
611 pd->buf = NULL;
612 pd->buf_size = 0;
613
614 if (!fmt) {
615 /* guess format if no file can be opened */
616 fmt = av_probe_input_format(pd, 0);
617 }
618
619 /* Do not open file if the format does not need it. XXX: specific
620 hack needed to handle RTSP/TCP */
621 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
622 /* if no file needed do not try to open one */
623 if ((err=avio_open(&pb, filename, URL_RDONLY)) < 0) {
624 goto fail;
625 }
626 if (buf_size > 0) {
627 ffio_set_buf_size(pb, buf_size);
628 }
629 if (!fmt && (err = av_probe_input_buffer(pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) {
630 goto fail;
631 }
632 }
633
634 /* if still no format found, error */
635 if (!fmt) {
636 err = AVERROR_INVALIDDATA;
637 goto fail;
638 }
639
640 /* check filename in case an image number is expected */
641 if (fmt->flags & AVFMT_NEEDNUMBER) {
642 if (!av_filename_number_test(filename)) {
643 err = AVERROR_NUMEXPECTED;
644 goto fail;
645 }
646 }
647 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
648 if (err)
649 goto fail;
650 return 0;
651 fail:
652 av_freep(&pd->buf);
653 if (pb)
654 avio_close(pb);
655 if (ap && ap->prealloced_context)
656 av_free(*ic_ptr);
657 *ic_ptr = NULL;
658 return err;
659
660 }
661
662 /*******************************************************/
663
664 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
665 AVPacketList **plast_pktl){
666 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
667 if (!pktl)
668 return NULL;
669
670 if (*packet_buffer)
671 (*plast_pktl)->next = pktl;
672 else
673 *packet_buffer = pktl;
674
675 /* add the packet in the buffered packet list */
676 *plast_pktl = pktl;
677 pktl->pkt= *pkt;
678 return &pktl->pkt;
679 }
680
681 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
682 {
683 int ret, i;
684 AVStream *st;
685
686 for(;;){
687 AVPacketList *pktl = s->raw_packet_buffer;
688
689 if (pktl) {
690 *pkt = pktl->pkt;
691 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
692 !s->streams[pkt->stream_index]->probe_packets ||
693 s->raw_packet_buffer_remaining_size < pkt->size){
694 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
695 av_freep(&pd->buf);
696 pd->buf_size = 0;
697 s->raw_packet_buffer = pktl->next;
698 s->raw_packet_buffer_remaining_size += pkt->size;
699 av_free(pktl);
700 return 0;
701 }
702 }
703
704 av_init_packet(pkt);
705 ret= s->iformat->read_packet(s, pkt);
706 if (ret < 0) {
707 if (!pktl || ret == AVERROR(EAGAIN))
708 return ret;
709 for (i = 0; i < s->nb_streams; i++)
710 s->streams[i]->probe_packets = 0;
711 continue;
712 }
713 st= s->streams[pkt->stream_index];
714
715 switch(st->codec->codec_type){
716 case AVMEDIA_TYPE_VIDEO:
717 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
718 break;
719 case AVMEDIA_TYPE_AUDIO:
720 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
721 break;
722 case AVMEDIA_TYPE_SUBTITLE:
723 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
724 break;
725 }
726
727 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
728 !st->probe_packets))
729 return ret;
730
731 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
732 s->raw_packet_buffer_remaining_size -= pkt->size;
733
734 if(st->codec->codec_id == CODEC_ID_PROBE){
735 AVProbeData *pd = &st->probe_data;
736 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
737 --st->probe_packets;
738
739 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
740 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
741 pd->buf_size += pkt->size;
742 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
743
744 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
745 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
746 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
747 if(st->codec->codec_id != CODEC_ID_PROBE){
748 pd->buf_size=0;
749 av_freep(&pd->buf);
750 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
751 }
752 }
753 }
754 }
755 }
756
757 /**********************************************************/
758
759 /**
760 * Get the number of samples of an audio frame. Return -1 on error.
761 */
762 static int get_audio_frame_size(AVCodecContext *enc, int size)
763 {
764 int frame_size;
765
766 if(enc->codec_id == CODEC_ID_VORBIS)
767 return -1;
768
769 if (enc->frame_size <= 1) {
770 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
771
772 if (bits_per_sample) {
773 if (enc->channels == 0)
774 return -1;
775 frame_size = (size << 3) / (bits_per_sample * enc->channels);
776 } else {
777 /* used for example by ADPCM codecs */
778 if (enc->bit_rate == 0)
779 return -1;
780 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
781 }
782 } else {
783 frame_size = enc->frame_size;
784 }
785 return frame_size;
786 }
787
788
789 /**
790 * Return the frame duration in seconds. Return 0 if not available.
791 */
792 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
793 AVCodecParserContext *pc, AVPacket *pkt)
794 {
795 int frame_size;
796
797 *pnum = 0;
798 *pden = 0;
799 switch(st->codec->codec_type) {
800 case AVMEDIA_TYPE_VIDEO:
801 if(st->time_base.num*1000LL > st->time_base.den){
802 *pnum = st->time_base.num;
803 *pden = st->time_base.den;
804 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
805 *pnum = st->codec->time_base.num;
806 *pden = st->codec->time_base.den;
807 if (pc && pc->repeat_pict) {
808 *pnum = (*pnum) * (1 + pc->repeat_pict);
809 }
810 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
811 //Thus if we have no parser in such case leave duration undefined.
812 if(st->codec->ticks_per_frame>1 && !pc){
813 *pnum = *pden = 0;
814 }
815 }
816 break;
817 case AVMEDIA_TYPE_AUDIO:
818 frame_size = get_audio_frame_size(st->codec, pkt->size);
819 if (frame_size <= 0 || st->codec->sample_rate <= 0)
820 break;
821 *pnum = frame_size;
822 *pden = st->codec->sample_rate;
823 break;
824 default:
825 break;
826 }
827 }
828
829 static int is_intra_only(AVCodecContext *enc){
830 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
831 return 1;
832 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
833 switch(enc->codec_id){
834 case CODEC_ID_MJPEG:
835 case CODEC_ID_MJPEGB:
836 case CODEC_ID_LJPEG:
837 case CODEC_ID_RAWVIDEO:
838 case CODEC_ID_DVVIDEO:
839 case CODEC_ID_HUFFYUV:
840 case CODEC_ID_FFVHUFF:
841 case CODEC_ID_ASV1:
842 case CODEC_ID_ASV2:
843 case CODEC_ID_VCR1:
844 case CODEC_ID_DNXHD:
845 case CODEC_ID_JPEG2000:
846 return 1;
847 default: break;
848 }
849 }
850 return 0;
851 }
852
853 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
854 int64_t dts, int64_t pts)
855 {
856 AVStream *st= s->streams[stream_index];
857 AVPacketList *pktl= s->packet_buffer;
858
859 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
860 return;
861
862 st->first_dts= dts - st->cur_dts;
863 st->cur_dts= dts;
864
865 for(; pktl; pktl= pktl->next){
866 if(pktl->pkt.stream_index != stream_index)
867 continue;
868 //FIXME think more about this check
869 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
870 pktl->pkt.pts += st->first_dts;
871
872 if(pktl->pkt.dts != AV_NOPTS_VALUE)
873 pktl->pkt.dts += st->first_dts;
874
875 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
876 st->start_time= pktl->pkt.pts;
877 }
878 if (st->start_time == AV_NOPTS_VALUE)
879 st->start_time = pts;
880 }
881
882 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
883 {
884 AVPacketList *pktl= s->packet_buffer;
885 int64_t cur_dts= 0;
886
887 if(st->first_dts != AV_NOPTS_VALUE){
888 cur_dts= st->first_dts;
889 for(; pktl; pktl= pktl->next){
890 if(pktl->pkt.stream_index == pkt->stream_index){
891 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
892 break;
893 cur_dts -= pkt->duration;
894 }
895 }
896 pktl= s->packet_buffer;
897 st->first_dts = cur_dts;
898 }else if(st->cur_dts)
899 return;
900
901 for(; pktl; pktl= pktl->next){
902 if(pktl->pkt.stream_index != pkt->stream_index)
903 continue;
904 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
905 && !pktl->pkt.duration){
906 pktl->pkt.dts= cur_dts;
907 if(!st->codec->has_b_frames)
908 pktl->pkt.pts= cur_dts;
909 cur_dts += pkt->duration;
910 pktl->pkt.duration= pkt->duration;
911 }else
912 break;
913 }
914 if(st->first_dts == AV_NOPTS_VALUE)
915 st->cur_dts= cur_dts;
916 }
917
918 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
919 AVCodecParserContext *pc, AVPacket *pkt)
920 {
921 int num, den, presentation_delayed, delay, i;
922 int64_t offset;
923
924 if (s->flags & AVFMT_FLAG_NOFILLIN)
925 return;
926
927 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
928 pkt->dts= AV_NOPTS_VALUE;
929
930 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
931 //FIXME Set low_delay = 0 when has_b_frames = 1
932 st->codec->has_b_frames = 1;
933
934 /* do we have a video B-frame ? */
935 delay= st->codec->has_b_frames;
936 presentation_delayed = 0;
937
938 // ignore delay caused by frame threading so that the mpeg2-without-dts
939 // warning will not trigger
940 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
941 delay -= st->codec->thread_count-1;
942
943 /* XXX: need has_b_frame, but cannot get it if the codec is
944 not initialized */
945 if (delay &&
946 pc && pc->pict_type != FF_B_TYPE)
947 presentation_delayed = 1;
948
949 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
950 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
951 pkt->dts -= 1LL<<st->pts_wrap_bits;
952 }
953
954 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
955 // we take the conservative approach and discard both
956 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
957 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
958 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
959 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
960 }
961
962 if (pkt->duration == 0) {
963 compute_frame_duration(&num, &den, st, pc, pkt);
964 if (den && num) {
965 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
966
967 if(pkt->duration != 0 && s->packet_buffer)
968 update_initial_durations(s, st, pkt);
969 }
970 }
971
972 /* correct timestamps with byte offset if demuxers only have timestamps
973 on packet boundaries */
974 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
975 /* this will estimate bitrate based on this frame's duration and size */
976 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
977 if(pkt->pts != AV_NOPTS_VALUE)
978 pkt->pts += offset;
979 if(pkt->dts != AV_NOPTS_VALUE)
980 pkt->dts += offset;
981 }
982
983 if (pc && pc->dts_sync_point >= 0) {
984 // we have synchronization info from the parser
985 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
986 if (den > 0) {
987 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
988 if (pkt->dts != AV_NOPTS_VALUE) {
989 // got DTS from the stream, update reference timestamp
990 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
991 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
992 } else if (st->reference_dts != AV_NOPTS_VALUE) {
993 // compute DTS based on reference timestamp
994 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
995 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
996 }
997 if (pc->dts_sync_point > 0)
998 st->reference_dts = pkt->dts; // new reference
999 }
1000 }
1001
1002 /* This may be redundant, but it should not hurt. */
1003 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1004 presentation_delayed = 1;
1005
1006 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1007 /* interpolate PTS and DTS if they are not present */
1008 //We skip H264 currently because delay and has_b_frames are not reliably set
1009 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1010 if (presentation_delayed) {
1011 /* DTS = decompression timestamp */
1012 /* PTS = presentation timestamp */
1013 if (pkt->dts == AV_NOPTS_VALUE)
1014 pkt->dts = st->last_IP_pts;
1015 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1016 if (pkt->dts == AV_NOPTS_VALUE)
1017 pkt->dts = st->cur_dts;
1018
1019 /* this is tricky: the dts must be incremented by the duration
1020 of the frame we are displaying, i.e. the last I- or P-frame */
1021 if (st->last_IP_duration == 0)
1022 st->last_IP_duration = pkt->duration;
1023 if(pkt->dts != AV_NOPTS_VALUE)
1024 st->cur_dts = pkt->dts + st->last_IP_duration;
1025 st->last_IP_duration = pkt->duration;
1026 st->last_IP_pts= pkt->pts;
1027 /* cannot compute PTS if not present (we can compute it only
1028 by knowing the future */
1029 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1030 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1031 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1032 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1033 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1034 pkt->pts += pkt->duration;
1035 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1036 }
1037 }
1038
1039 /* presentation is not delayed : PTS and DTS are the same */
1040 if(pkt->pts == AV_NOPTS_VALUE)
1041 pkt->pts = pkt->dts;
1042 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1043 if(pkt->pts == AV_NOPTS_VALUE)
1044 pkt->pts = st->cur_dts;
1045 pkt->dts = pkt->pts;
1046 if(pkt->pts != AV_NOPTS_VALUE)
1047 st->cur_dts = pkt->pts + pkt->duration;
1048 }
1049 }
1050
1051 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1052 st->pts_buffer[0]= pkt->pts;
1053 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1054 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1055 if(pkt->dts == AV_NOPTS_VALUE)
1056 pkt->dts= st->pts_buffer[0];
1057 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1058 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1059 }
1060 if(pkt->dts > st->cur_dts)
1061 st->cur_dts = pkt->dts;
1062 }
1063
1064 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1065
1066 /* update flags */
1067 if(is_intra_only(st->codec))
1068 pkt->flags |= AV_PKT_FLAG_KEY;
1069 else if (pc) {
1070 pkt->flags = 0;
1071 /* keyframe computation */
1072 if (pc->key_frame == 1)
1073 pkt->flags |= AV_PKT_FLAG_KEY;
1074 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
1075 pkt->flags |= AV_PKT_FLAG_KEY;
1076 }
1077 if (pc)
1078 pkt->convergence_duration = pc->convergence_duration;
1079 }
1080
1081
1082 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1083 {
1084 AVStream *st;
1085 int len, ret, i;
1086
1087 av_init_packet(pkt);
1088
1089 for(;;) {
1090 /* select current input stream component */
1091 st = s->cur_st;
1092 if (st) {
1093 if (!st->need_parsing || !st->parser) {
1094 /* no parsing needed: we just output the packet as is */
1095 /* raw data support */
1096 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1097 compute_pkt_fields(s, st, NULL, pkt);
1098 s->cur_st = NULL;
1099 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1100 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1101 ff_reduce_index(s, st->index);
1102 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1103 }
1104 break;
1105 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1106 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1107 st->cur_ptr, st->cur_len,
1108 st->cur_pkt.pts, st->cur_pkt.dts,
1109 st->cur_pkt.pos);
1110 st->cur_pkt.pts = AV_NOPTS_VALUE;
1111 st->cur_pkt.dts = AV_NOPTS_VALUE;
1112 /* increment read pointer */
1113 st->cur_ptr += len;
1114 st->cur_len -= len;
1115
1116 /* return packet if any */
1117 if (pkt->size) {
1118 got_packet:
1119 pkt->duration = 0;
1120 pkt->stream_index = st->index;
1121 pkt->pts = st->parser->pts;
1122 pkt->dts = st->parser->dts;
1123 pkt->pos = st->parser->pos;
1124 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1125 s->cur_st = NULL;
1126 pkt->destruct= st->cur_pkt.destruct;
1127 st->cur_pkt.destruct= NULL;
1128 st->cur_pkt.data = NULL;
1129 assert(st->cur_len == 0);
1130 }else{
1131 pkt->destruct = NULL;
1132 }
1133 compute_pkt_fields(s, st, st->parser, pkt);
1134
1135 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1136 ff_reduce_index(s, st->index);
1137 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1138 0, 0, AVINDEX_KEYFRAME);
1139 }
1140
1141 break;
1142 }
1143 } else {
1144 /* free packet */
1145 av_free_packet(&st->cur_pkt);
1146 s->cur_st = NULL;
1147 }
1148 } else {
1149 AVPacket cur_pkt;
1150 /* read next packet */
1151 ret = av_read_packet(s, &cur_pkt);
1152 if (ret < 0) {
1153 if (ret == AVERROR(EAGAIN))
1154 return ret;
1155 /* return the last frames, if any */
1156 for(i = 0; i < s->nb_streams; i++) {
1157 st = s->streams[i];
1158 if (st->parser && st->need_parsing) {
1159 av_parser_parse2(st->parser, st->codec,
1160 &pkt->data, &pkt->size,
1161 NULL, 0,
1162 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1163 AV_NOPTS_VALUE);
1164 if (pkt->size)
1165 goto got_packet;
1166 }
1167 }
1168 /* no more packets: really terminate parsing */
1169 return ret;
1170 }
1171 st = s->streams[cur_pkt.stream_index];
1172 st->cur_pkt= cur_pkt;
1173
1174 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1175 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1176 st->cur_pkt.pts < st->cur_pkt.dts){
1177 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1178 st->cur_pkt.stream_index,
1179 st->cur_pkt.pts,
1180 st->cur_pkt.dts,
1181 st->cur_pkt.size);
1182 // av_free_packet(&st->cur_pkt);
1183 // return -1;
1184 }
1185
1186 if(s->debug & FF_FDEBUG_TS)
1187 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1188 st->cur_pkt.stream_index,
1189 st->cur_pkt.pts,
1190 st->cur_pkt.dts,
1191 st->cur_pkt.size,
1192 st->cur_pkt.duration,
1193 st->cur_pkt.flags);
1194
1195 s->cur_st = st;
1196 st->cur_ptr = st->cur_pkt.data;
1197 st->cur_len = st->cur_pkt.size;
1198 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1199 st->parser = av_parser_init(st->codec->codec_id);
1200 if (!st->parser) {
1201 /* no parser available: just output the raw packets */
1202 st->need_parsing = AVSTREAM_PARSE_NONE;
1203 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1204 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1205 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1206 st->parser->flags |= PARSER_FLAG_ONCE;
1207 }
1208 }
1209 }
1210 }
1211 if(s->debug & FF_FDEBUG_TS)
1212 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1213 pkt->stream_index,
1214 pkt->pts,
1215 pkt->dts,
1216 pkt->size,
1217 pkt->duration,
1218 pkt->flags);
1219
1220 return 0;
1221 }
1222
1223 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1224 {
1225 AVPacketList *pktl;
1226 int eof=0;
1227 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1228
1229 for(;;){
1230 pktl = s->packet_buffer;
1231 if (pktl) {
1232 AVPacket *next_pkt= &pktl->pkt;
1233
1234 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1235 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1236 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1237 if( pktl->pkt.stream_index == next_pkt->stream_index
1238 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1239 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1240 next_pkt->pts= pktl->pkt.dts;
1241 }
1242 pktl= pktl->next;
1243 }
1244 pktl = s->packet_buffer;
1245 }
1246
1247 if( next_pkt->pts != AV_NOPTS_VALUE
1248 || next_pkt->dts == AV_NOPTS_VALUE
1249 || !genpts || eof){
1250 /* read packet from packet buffer, if there is data */
1251 *pkt = *next_pkt;
1252 s->packet_buffer = pktl->next;
1253 av_free(pktl);
1254 return 0;
1255 }
1256 }
1257 if(genpts){
1258 int ret= av_read_frame_internal(s, pkt);
1259 if(ret<0){
1260 if(pktl && ret != AVERROR(EAGAIN)){
1261 eof=1;
1262 continue;
1263 }else
1264 return ret;
1265 }
1266
1267 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1268 &s->packet_buffer_end)) < 0)
1269 return AVERROR(ENOMEM);
1270 }else{
1271 assert(!s->packet_buffer);
1272 return av_read_frame_internal(s, pkt);
1273 }
1274 }
1275 }
1276
1277 /* XXX: suppress the packet queue */
1278 static void flush_packet_queue(AVFormatContext *s)
1279 {
1280 AVPacketList *pktl;
1281
1282 for(;;) {
1283 pktl = s->packet_buffer;
1284 if (!pktl)
1285 break;
1286 s->packet_buffer = pktl->next;
1287 av_free_packet(&pktl->pkt);
1288 av_free(pktl);
1289 }
1290 while(s->raw_packet_buffer){
1291 pktl = s->raw_packet_buffer;
1292 s->raw_packet_buffer = pktl->next;
1293 av_free_packet(&pktl->pkt);
1294 av_free(pktl);
1295 }
1296 s->packet_buffer_end=
1297 s->raw_packet_buffer_end= NULL;
1298 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1299 }
1300
1301 /*******************************************************/
1302 /* seek support */
1303
1304 int av_find_default_stream_index(AVFormatContext *s)
1305 {
1306 int first_audio_index = -1;
1307 int i;
1308 AVStream *st;
1309
1310 if (s->nb_streams <= 0)
1311 return -1;
1312 for(i = 0; i < s->nb_streams; i++) {
1313 st = s->streams[i];
1314 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1315 return i;
1316 }
1317 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1318 first_audio_index = i;
1319 }
1320 return first_audio_index >= 0 ? first_audio_index : 0;
1321 }
1322
1323 /**
1324 * Flush the frame reader.
1325 */
1326 void ff_read_frame_flush(AVFormatContext *s)
1327 {
1328 AVStream *st;
1329 int i, j;
1330
1331 flush_packet_queue(s);
1332
1333 s->cur_st = NULL;
1334
1335 /* for each stream, reset read state */
1336 for(i = 0; i < s->nb_streams; i++) {
1337 st = s->streams[i];
1338
1339 if (st->parser) {
1340 av_parser_close(st->parser);
1341 st->parser = NULL;
1342 av_free_packet(&st->cur_pkt);
1343 }
1344 st->last_IP_pts = AV_NOPTS_VALUE;
1345 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1346 st->reference_dts = AV_NOPTS_VALUE;
1347 /* fail safe */
1348 st->cur_ptr = NULL;
1349 st->cur_len = 0;
1350
1351 st->probe_packets = MAX_PROBE_PACKETS;
1352
1353 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1354 st->pts_buffer[j]= AV_NOPTS_VALUE;
1355 }
1356 }
1357
1358 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1359 int i;
1360
1361 for(i = 0; i < s->nb_streams; i++) {
1362 AVStream *st = s->streams[i];
1363
1364 st->cur_dts = av_rescale(timestamp,
1365 st->time_base.den * (int64_t)ref_st->time_base.num,
1366 st->time_base.num * (int64_t)ref_st->time_base.den);
1367 }
1368 }
1369
1370 void ff_reduce_index(AVFormatContext *s, int stream_index)
1371 {
1372 AVStream *st= s->streams[stream_index];
1373 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1374
1375 if((unsigned)st->nb_index_entries >= max_entries){
1376 int i;
1377 for(i=0; 2*i<st->nb_index_entries; i++)
1378 st->index_entries[i]= st->index_entries[2*i];
1379 st->nb_index_entries= i;
1380 }
1381 }
1382
1383 int ff_add_index_entry(AVIndexEntry **index_entries,
1384 int *nb_index_entries,
1385 unsigned int *index_entries_allocated_size,
1386 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1387 {
1388 AVIndexEntry *entries, *ie;
1389 int index;
1390
1391 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1392 return -1;
1393
1394 entries = av_fast_realloc(*index_entries,
1395 index_entries_allocated_size,
1396 (*nb_index_entries + 1) *
1397 sizeof(AVIndexEntry));
1398 if(!entries)
1399 return -1;
1400
1401 *index_entries= entries;
1402
1403 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1404
1405 if(index<0){
1406 index= (*nb_index_entries)++;
1407 ie= &entries[index];
1408 assert(index==0 || ie[-1].timestamp < timestamp);
1409 }else{
1410 ie= &entries[index];
1411 if(ie->timestamp != timestamp){
1412 if(ie->timestamp <= timestamp)
1413 return -1;
1414 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1415 (*nb_index_entries)++;
1416 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1417 distance= ie->min_distance;
1418 }
1419
1420 ie->pos = pos;
1421 ie->timestamp = timestamp;
1422 ie->min_distance= distance;
1423 ie->size= size;
1424 ie->flags = flags;
1425
1426 return index;
1427 }
1428
1429 int av_add_index_entry(AVStream *st,
1430 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1431 {
1432 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1433 &st->index_entries_allocated_size, pos,
1434 timestamp, size, distance, flags);
1435 }
1436
1437 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1438 int64_t wanted_timestamp, int flags)
1439 {
1440 int a, b, m;
1441 int64_t timestamp;
1442
1443 a = - 1;
1444 b = nb_entries;
1445
1446 //optimize appending index entries at the end
1447 if(b && entries[b-1].timestamp < wanted_timestamp)
1448 a= b-1;
1449
1450 while (b - a > 1) {
1451 m = (a + b) >> 1;
1452 timestamp = entries[m].timestamp;
1453 if(timestamp >= wanted_timestamp)
1454 b = m;
1455 if(timestamp <= wanted_timestamp)
1456 a = m;
1457 }
1458 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1459
1460 if(!(flags & AVSEEK_FLAG_ANY)){
1461 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1462 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1463 }
1464 }
1465
1466 if(m == nb_entries)
1467 return -1;
1468 return m;
1469 }
1470
1471 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1472 int flags)
1473 {
1474 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1475 wanted_timestamp, flags);
1476 }
1477
1478 #define DEBUG_SEEK
1479
1480 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1481 AVInputFormat *avif= s->iformat;
1482 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1483 int64_t ts_min, ts_max, ts;
1484 int index;
1485 int64_t ret;
1486 AVStream *st;
1487
1488 if (stream_index < 0)
1489 return -1;
1490
1491 #ifdef DEBUG_SEEK
1492 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1493 #endif
1494
1495 ts_max=
1496 ts_min= AV_NOPTS_VALUE;
1497 pos_limit= -1; //gcc falsely says it may be uninitialized
1498
1499 st= s->streams[stream_index];
1500 if(st->index_entries){
1501 AVIndexEntry *e;
1502
1503 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1504 index= FFMAX(index, 0);
1505 e= &st->index_entries[index];
1506
1507 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1508 pos_min= e->pos;
1509 ts_min= e->timestamp;
1510 #ifdef DEBUG_SEEK
1511 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1512 pos_min,ts_min);
1513 #endif
1514 }else{
1515 assert(index==0);
1516 }
1517
1518 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1519 assert(index < st->nb_index_entries);
1520 if(index >= 0){
1521 e= &st->index_entries[index];
1522 assert(e->timestamp >= target_ts);
1523 pos_max= e->pos;
1524 ts_max= e->timestamp;
1525 pos_limit= pos_max - e->min_distance;
1526 #ifdef DEBUG_SEEK
1527 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1528 pos_max,pos_limit, ts_max);
1529 #endif
1530 }
1531 }
1532
1533 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1534 if(pos<0)
1535 return -1;
1536
1537 /* do the seek */
1538 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1539 return ret;
1540
1541 av_update_cur_dts(s, st, ts);
1542
1543 return 0;
1544 }
1545
1546 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1547 int64_t pos, ts;
1548 int64_t start_pos, filesize;
1549 int no_change;
1550
1551 #ifdef DEBUG_SEEK
1552 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1553 #endif
1554
1555 if(ts_min == AV_NOPTS_VALUE){
1556 pos_min = s->data_offset;
1557 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1558 if (ts_min == AV_NOPTS_VALUE)
1559 return -1;
1560 }
1561
1562 if(ts_max == AV_NOPTS_VALUE){
1563 int step= 1024;
1564 filesize = avio_size(s->pb);
1565 pos_max = filesize - 1;
1566 do{
1567 pos_max -= step;
1568 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1569 step += step;
1570 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1571 if (ts_max == AV_NOPTS_VALUE)
1572 return -1;
1573
1574 for(;;){
1575 int64_t tmp_pos= pos_max + 1;
1576 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1577 if(tmp_ts == AV_NOPTS_VALUE)
1578 break;
1579 ts_max= tmp_ts;
1580 pos_max= tmp_pos;
1581 if(tmp_pos >= filesize)
1582 break;
1583 }
1584 pos_limit= pos_max;
1585 }
1586
1587 if(ts_min > ts_max){
1588 return -1;
1589 }else if(ts_min == ts_max){
1590 pos_limit= pos_min;
1591 }
1592
1593 no_change=0;
1594 while (pos_min < pos_limit) {
1595 #ifdef DEBUG_SEEK
1596 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1597 pos_min, pos_max,
1598 ts_min, ts_max);
1599 #endif
1600 assert(pos_limit <= pos_max);
1601
1602 if(no_change==0){
1603 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1604 // interpolate position (better than dichotomy)
1605 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1606 + pos_min - approximate_keyframe_distance;
1607 }else if(no_change==1){
1608 // bisection, if interpolation failed to change min or max pos last time
1609 pos = (pos_min + pos_limit)>>1;
1610 }else{
1611 /* linear search if bisection failed, can only happen if there
1612 are very few or no keyframes between min/max */
1613 pos=pos_min;
1614 }
1615 if(pos <= pos_min)
1616 pos= pos_min + 1;
1617 else if(pos > pos_limit)
1618 pos= pos_limit;
1619 start_pos= pos;
1620
1621 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1622 if(pos == pos_max)
1623 no_change++;
1624 else
1625 no_change=0;
1626 #ifdef DEBUG_SEEK
1627 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1628 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1629 start_pos, no_change);
1630 #endif
1631 if(ts == AV_NOPTS_VALUE){
1632 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1633 return -1;
1634 }
1635 assert(ts != AV_NOPTS_VALUE);
1636 if (target_ts <= ts) {
1637 pos_limit = start_pos - 1;
1638 pos_max = pos;
1639 ts_max = ts;
1640 }
1641 if (target_ts >= ts) {
1642 pos_min = pos;
1643 ts_min = ts;
1644 }
1645 }
1646
1647 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1648 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1649 #ifdef DEBUG_SEEK
1650 pos_min = pos;
1651 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1652 pos_min++;
1653 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1654 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1655 pos, ts_min, target_ts, ts_max);
1656 #endif
1657 *ts_ret= ts;
1658 return pos;
1659 }
1660
1661 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1662 int64_t pos_min, pos_max;
1663 #if 0
1664 AVStream *st;
1665
1666 if (stream_index < 0)
1667 return -1;
1668
1669 st= s->streams[stream_index];
1670 #endif
1671
1672 pos_min = s->data_offset;
1673 pos_max = avio_size(s->pb) - 1;
1674
1675 if (pos < pos_min) pos= pos_min;
1676 else if(pos > pos_max) pos= pos_max;
1677
1678 avio_seek(s->pb, pos, SEEK_SET);
1679
1680 #if 0
1681 av_update_cur_dts(s, st, ts);
1682 #endif
1683 return 0;
1684 }
1685
1686 static int av_seek_frame_generic(AVFormatContext *s,
1687 int stream_index, int64_t timestamp, int flags)
1688 {
1689 int index;
1690 int64_t ret;
1691 AVStream *st;
1692 AVIndexEntry *ie;
1693
1694 st = s->streams[stream_index];
1695
1696 index = av_index_search_timestamp(st, timestamp, flags);
1697
1698 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1699 return -1;
1700
1701 if(index < 0 || index==st->nb_index_entries-1){
1702 int i;
1703 AVPacket pkt;
1704
1705 if(st->nb_index_entries){
1706 assert(st->index_entries);
1707 ie= &st->index_entries[st->nb_index_entries-1];
1708 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1709 return ret;
1710 av_update_cur_dts(s, st, ie->timestamp);
1711 }else{
1712 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1713 return ret;
1714 }
1715 for(i=0;; i++) {
1716 int ret;
1717 do{
1718 ret = av_read_frame(s, &pkt);
1719 }while(ret == AVERROR(EAGAIN));
1720 if(ret<0)
1721 break;
1722 av_free_packet(&pkt);
1723 if(stream_index == pkt.stream_index){
1724 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1725 break;
1726 }
1727 }
1728 index = av_index_search_timestamp(st, timestamp, flags);
1729 }
1730 if (index < 0)
1731 return -1;
1732
1733 ff_read_frame_flush(s);
1734 if (s->iformat->read_seek){
1735 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1736 return 0;
1737 }
1738 ie = &st->index_entries[index];
1739 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1740 return ret;
1741 av_update_cur_dts(s, st, ie->timestamp);
1742
1743 return 0;
1744 }
1745
1746 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1747 {
1748 int ret;
1749 AVStream *st;
1750
1751 ff_read_frame_flush(s);
1752
1753 if(flags & AVSEEK_FLAG_BYTE)
1754 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1755
1756 if(stream_index < 0){
1757 stream_index= av_find_default_stream_index(s);
1758 if(stream_index < 0)
1759 return -1;
1760
1761 st= s->streams[stream_index];
1762 /* timestamp for default must be expressed in AV_TIME_BASE units */
1763 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1764 }
1765
1766 /* first, we try the format specific seek */
1767 if (s->iformat->read_seek)
1768 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1769 else
1770 ret = -1;
1771 if (ret >= 0) {
1772 return 0;
1773 }
1774
1775 if(s->iformat->read_timestamp)
1776 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1777 else
1778 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1779 }
1780
1781 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1782 {
1783 if(min_ts > ts || max_ts < ts)
1784 return -1;
1785
1786 ff_read_frame_flush(s);
1787
1788 if (s->iformat->read_seek2)
1789 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1790
1791 if(s->iformat->read_timestamp){
1792 //try to seek via read_timestamp()
1793 }
1794
1795 //Fallback to old API if new is not implemented but old is
1796 //Note the old has somewat different sematics
1797 if(s->iformat->read_seek || 1)
1798 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1799
1800 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1801 }
1802
1803 /*******************************************************/
1804
1805 /**
1806 * Return TRUE if the stream has accurate duration in any stream.
1807 *
1808 * @return TRUE if the stream has accurate duration for at least one component.
1809 */
1810 static int av_has_duration(AVFormatContext *ic)
1811 {
1812 int i;
1813 AVStream *st;
1814
1815 for(i = 0;i < ic->nb_streams; i++) {
1816 st = ic->streams[i];
1817 if (st->duration != AV_NOPTS_VALUE)
1818 return 1;
1819 }
1820 return 0;
1821 }
1822
1823 /**
1824 * Estimate the stream timings from the one of each components.
1825 *
1826 * Also computes the global bitrate if possible.
1827 */
1828 static void av_update_stream_timings(AVFormatContext *ic)
1829 {
1830 int64_t start_time, start_time1, end_time, end_time1;
1831 int64_t duration, duration1;
1832 int i;
1833 AVStream *st;
1834
1835 start_time = INT64_MAX;
1836 end_time = INT64_MIN;
1837 duration = INT64_MIN;
1838 for(i = 0;i < ic->nb_streams; i++) {
1839 st = ic->streams[i];
1840 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1841 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1842 if (start_time1 < start_time)
1843 start_time = start_time1;
1844 if (st->duration != AV_NOPTS_VALUE) {
1845 end_time1 = start_time1
1846 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1847 if (end_time1 > end_time)
1848 end_time = end_time1;
1849 }
1850 }
1851 if (st->duration != AV_NOPTS_VALUE) {
1852 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1853 if (duration1 > duration)
1854 duration = duration1;
1855 }
1856 }
1857 if (start_time != INT64_MAX) {
1858 ic->start_time = start_time;
1859 if (end_time != INT64_MIN) {
1860 if (end_time - start_time > duration)
1861 duration = end_time - start_time;
1862 }
1863 }
1864 if (duration != INT64_MIN) {
1865 ic->duration = duration;
1866 if (ic->file_size > 0) {
1867 /* compute the bitrate */
1868 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1869 (double)ic->duration;
1870 }
1871 }
1872 }
1873
1874 static void fill_all_stream_timings(AVFormatContext *ic)
1875 {
1876 int i;
1877 AVStream *st;
1878
1879 av_update_stream_timings(ic);
1880 for(i = 0;i < ic->nb_streams; i++) {
1881 st = ic->streams[i];
1882 if (st->start_time == AV_NOPTS_VALUE) {
1883 if(ic->start_time != AV_NOPTS_VALUE)
1884 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1885 if(ic->duration != AV_NOPTS_VALUE)
1886 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1887 }
1888 }
1889 }
1890
1891 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1892 {
1893 int64_t filesize, duration;
1894 int bit_rate, i;
1895 AVStream *st;
1896
1897 /* if bit_rate is already set, we believe it */
1898 if (ic->bit_rate <= 0) {
1899 bit_rate = 0;
1900 for(i=0;i<ic->nb_streams;i++) {
1901 st = ic->streams[i];
1902 if (st->codec->bit_rate > 0)
1903 bit_rate += st->codec->bit_rate;
1904 }
1905 ic->bit_rate = bit_rate;
1906 }
1907
1908 /* if duration is already set, we believe it */
1909 if (ic->duration == AV_NOPTS_VALUE &&
1910 ic->bit_rate != 0 &&
1911 ic->file_size != 0) {
1912 filesize = ic->file_size;
1913 if (filesize > 0) {
1914 for(i = 0; i < ic->nb_streams; i++) {
1915 st = ic->streams[i];
1916 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1917 if (st->duration == AV_NOPTS_VALUE)
1918 st->duration = duration;
1919 }
1920 }
1921 }
1922 }
1923
1924 #define DURATION_MAX_READ_SIZE 250000
1925 #define DURATION_MAX_RETRY 3
1926
1927 /* only usable for MPEG-PS streams */
1928 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1929 {
1930 AVPacket pkt1, *pkt = &pkt1;
1931 AVStream *st;
1932 int read_size, i, ret;
1933 int64_t end_time;
1934 int64_t filesize, offset, duration;
1935 int retry=0;
1936
1937 ic->cur_st = NULL;
1938
1939 /* flush packet queue */
1940 flush_packet_queue(ic);
1941
1942 for (i=0; i<ic->nb_streams; i++) {
1943 st = ic->streams[i];
1944 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1945 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1946
1947 if (st->parser) {
1948 av_parser_close(st->parser);
1949 st->parser= NULL;
1950 av_free_packet(&st->cur_pkt);
1951 }
1952 }
1953
1954 /* estimate the end time (duration) */
1955 /* XXX: may need to support wrapping */
1956 filesize = ic->file_size;
1957 end_time = AV_NOPTS_VALUE;
1958 do{
1959 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1960 if (offset < 0)
1961 offset = 0;
1962
1963 avio_seek(ic->pb, offset, SEEK_SET);
1964 read_size = 0;
1965 for(;;) {
1966 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1967 break;
1968
1969 do{
1970 ret = av_read_packet(ic, pkt);
1971 }while(ret == AVERROR(EAGAIN));
1972 if (ret != 0)
1973 break;
1974 read_size += pkt->size;
1975 st = ic->streams[pkt->stream_index];
1976 if (pkt->pts != AV_NOPTS_VALUE &&
1977 (st->start_time != AV_NOPTS_VALUE ||
1978 st->first_dts != AV_NOPTS_VALUE)) {
1979 duration = end_time = pkt->pts;
1980 if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time;
1981 else duration -= st->first_dts;
1982 if (duration < 0)
1983 duration += 1LL<<st->pts_wrap_bits;
1984 if (duration > 0) {
1985 if (st->duration == AV_NOPTS_VALUE ||
1986 st->duration < duration)
1987 st->duration = duration;
1988 }
1989 }
1990 av_free_packet(pkt);
1991 }
1992 }while( end_time==AV_NOPTS_VALUE
1993 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1994 && ++retry <= DURATION_MAX_RETRY);
1995
1996 fill_all_stream_timings(ic);
1997
1998 avio_seek(ic->pb, old_offset, SEEK_SET);
1999 for (i=0; i<ic->nb_streams; i++) {
2000 st= ic->streams[i];
2001 st->cur_dts= st->first_dts;
2002 st->last_IP_pts = AV_NOPTS_VALUE;
2003 }
2004 }
2005
2006 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
2007 {
2008 int64_t file_size;
2009
2010 /* get the file size, if possible */
2011 if (ic->iformat->flags & AVFMT_NOFILE) {
2012 file_size = 0;
2013 } else {
2014 file_size = avio_size(ic->pb);
2015 if (file_size < 0)
2016 file_size = 0;
2017 }
2018 ic->file_size = file_size;
2019
2020 if ((!strcmp(ic->iformat->name, "mpeg") ||
2021 !strcmp(ic->iformat->name, "mpegts")) &&
2022 file_size && !url_is_streamed(ic->pb)) {
2023 /* get accurate estimate from the PTSes */
2024 av_estimate_timings_from_pts(ic, old_offset);
2025 } else if (av_has_duration(ic)) {
2026 /* at least one component has timings - we use them for all
2027 the components */
2028 fill_all_stream_timings(ic);
2029 } else {
2030 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2031 /* less precise: use bitrate info */
2032 av_estimate_timings_from_bit_rate(ic);
2033 }
2034 av_update_stream_timings(ic);
2035
2036 #if 0
2037 {
2038 int i;
2039 AVStream *st;
2040 for(i = 0;i < ic->nb_streams; i++) {
2041 st = ic->streams[i];
2042 printf("%d: start_time: %0.3f duration: %0.3f\n",
2043 i, (double)st->start_time / AV_TIME_BASE,
2044 (double)st->duration / AV_TIME_BASE);
2045 }
2046 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2047 (double)ic->start_time / AV_TIME_BASE,
2048 (double)ic->duration / AV_TIME_BASE,
2049 ic->bit_rate / 1000);
2050 }
2051 #endif
2052 }
2053
2054 static int has_codec_parameters(AVCodecContext *enc)
2055 {
2056 int val;
2057 switch(enc->codec_type) {
2058 case AVMEDIA_TYPE_AUDIO:
2059 val = enc->sample_rate && enc->channels && enc->sample_fmt != AV_SAMPLE_FMT_NONE;
2060 if(!enc->frame_size &&
2061 (enc->codec_id == CODEC_ID_VORBIS ||
2062 enc->codec_id == CODEC_ID_AAC ||
2063 enc->codec_id == CODEC_ID_MP1 ||
2064 enc->codec_id == CODEC_ID_MP2 ||
2065 enc->codec_id == CODEC_ID_MP3 ||
2066 enc->codec_id == CODEC_ID_SPEEX))
2067 return 0;
2068 break;
2069 case AVMEDIA_TYPE_VIDEO:
2070 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
2071 break;
2072 default:
2073 val = 1;
2074 break;
2075 }
2076 return enc->codec_id != CODEC_ID_NONE && val != 0;
2077 }
2078
2079 static int has_decode_delay_been_guessed(AVStream *st)
2080 {
2081 return st->codec->codec_id != CODEC_ID_H264 ||
2082 st->codec_info_nb_frames >= 6 + st->codec->has_b_frames;
2083 }
2084
2085 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
2086 {
2087 int16_t *samples;
2088 AVCodec *codec;
2089 int got_picture, data_size, ret=0;
2090 AVFrame picture;
2091
2092 if(!st->codec->codec){
2093 codec = avcodec_find_decoder(st->codec->codec_id);
2094 if (!codec)
2095 return -1;
2096 ret = avcodec_open(st->codec, codec);
2097 if (ret < 0)
2098 return ret;
2099 }
2100
2101 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){
2102 switch(st->codec->codec_type) {
2103 case AVMEDIA_TYPE_VIDEO:
2104 avcodec_get_frame_defaults(&picture);
2105 ret = avcodec_decode_video2(st->codec, &picture,
2106 &got_picture, avpkt);
2107 break;
2108 case AVMEDIA_TYPE_AUDIO:
2109 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2110 samples = av_malloc(data_size);
2111 if (!samples)
2112 goto fail;
2113 ret = avcodec_decode_audio3(st->codec, samples,
2114 &data_size, avpkt);
2115 av_free(samples);
2116 break;
2117 default:
2118 break;
2119 }
2120 }
2121 fail:
2122 return ret;
2123 }
2124
2125 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2126 {
2127 while (tags->id != CODEC_ID_NONE) {
2128 if (tags->id == id)
2129 return tags->tag;
2130 tags++;
2131 }
2132 return 0;
2133 }
2134
2135 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2136 {
2137 int i;
2138 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2139 if(tag == tags[i].tag)
2140 return tags[i].id;
2141 }
2142 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2143 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag))
2144 return tags[i].id;
2145 }
2146 return CODEC_ID_NONE;
2147 }
2148
2149 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2150 {
2151 int i;
2152 for(i=0; tags && tags[i]; i++){
2153 int tag= ff_codec_get_tag(tags[i], id);
2154 if(tag) return tag;
2155 }
2156 return 0;
2157 }
2158
2159 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2160 {
2161 int i;
2162 for(i=0; tags && tags[i]; i++){
2163 enum CodecID id= ff_codec_get_id(tags[i], tag);
2164 if(id!=CODEC_ID_NONE) return id;
2165 }
2166 return CODEC_ID_NONE;
2167 }
2168
2169 static void compute_chapters_end(AVFormatContext *s)
2170 {
2171 unsigned int i;
2172
2173 for (i=0; i+1<s->nb_chapters; i++)
2174 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2175 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
2176 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
2177 s->chapters[i]->end = s->chapters[i+1]->start;
2178 }
2179
2180 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
2181 assert(s->start_time != AV_NOPTS_VALUE);
2182 assert(s->duration > 0);
2183 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
2184 AV_TIME_BASE_Q,
2185 s->chapters[i]->time_base);
2186 }
2187 }
2188
2189 static int get_std_framerate(int i){
2190 if(i<60*12) return i*1001;
2191 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2192 }
2193
2194 /*
2195 * Is the time base unreliable.
2196 * This is a heuristic to balance between quick acceptance of the values in
2197 * the headers vs. some extra checks.
2198 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2199 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2200 * And there are "variable" fps files this needs to detect as well.
2201 */
2202 static int tb_unreliable(AVCodecContext *c){
2203 if( c->time_base.den >= 101L*c->time_base.num
2204 || c->time_base.den < 5L*c->time_base.num
2205 /* || c->codec_tag == AV_RL32("DIVX")
2206 || c->codec_tag == AV_RL32("XVID")*/
2207 || c->codec_id == CODEC_ID_MPEG2VIDEO
2208 || c->codec_id == CODEC_ID_H264
2209 )
2210 return 1;
2211 return 0;
2212 }
2213
2214 int av_find_stream_info(AVFormatContext *ic)
2215 {
2216 int i, count, ret, read_size, j;
2217 AVStream *st;
2218 AVPacket pkt1, *pkt;
2219 int64_t old_offset = avio_tell(ic->pb);
2220
2221 for(i=0;i<ic->nb_streams;i++) {
2222 AVCodec *codec;
2223 st = ic->streams[i];
2224 if (st->codec->codec_id == CODEC_ID_AAC) {
2225 st->codec->sample_rate = 0;
2226 st->codec->frame_size = 0;
2227 st->codec->channels = 0;
2228 }
2229 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2230 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2231 /* if(!st->time_base.num)
2232 st->time_base= */
2233 if(!st->codec->time_base.num)
2234 st->codec->time_base= st->time_base;
2235 }
2236 //only for the split stuff
2237 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2238 st->parser = av_parser_init(st->codec->codec_id);
2239 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2240 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2241 }
2242 }
2243 assert(!st->codec->codec);
2244 codec = avcodec_find_decoder(st->codec->codec_id);
2245
2246 /* Force decoding of at least one frame of codec data
2247 * this makes sure the codec initializes the channel configuration
2248 * and does not trust the values from the container.
2249 */
2250 if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF)
2251 st->codec->channels = 0;
2252
2253 /* Ensure that subtitle_header is properly set. */
2254 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2255 && codec && !st->codec->codec)
2256 avcodec_open(st->codec, codec);
2257
2258 //try to just open decoders, in case this is enough to get parameters
2259 if(!has_codec_parameters(st->codec)){
2260 if (codec && !st->codec->codec)
2261 avcodec_open(st->codec, codec);
2262 }
2263 }
2264
2265 for (i=0; i<ic->nb_streams; i++) {
2266 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2267 }
2268
2269 count = 0;
2270 read_size = 0;
2271 for(;;) {
2272 if(url_interrupt_cb()){
2273 ret= AVERROR_EXIT;
2274 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2275 break;
2276 }
2277
2278 /* check if one codec still needs to be handled */
2279 for(i=0;i<ic->nb_streams;i++) {
2280 st = ic->streams[i];
2281 if (!has_codec_parameters(st->codec))
2282 break;
2283 /* variable fps and no guess at the real fps */
2284 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2285 && st->info->duration_count<20 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2286 break;
2287 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2288 break;
2289 if(st->first_dts == AV_NOPTS_VALUE)
2290 break;
2291 }
2292 if (i == ic->nb_streams) {
2293 /* NOTE: if the format has no header, then we need to read
2294 some packets to get most of the streams, so we cannot
2295 stop here */
2296 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2297 /* if we found the info for all the codecs, we can stop */
2298 ret = count;
2299 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2300 break;
2301 }
2302 }
2303 /* we did not get all the codec info, but we read too much data */
2304 if (read_size >= ic->probesize) {
2305 ret = count;
2306 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2307 break;
2308 }
2309
2310 /* NOTE: a new stream can be added there if no header in file
2311 (AVFMTCTX_NOHEADER) */
2312 ret = av_read_frame_internal(ic, &pkt1);
2313 if (ret < 0 && ret != AVERROR(EAGAIN)) {
2314 /* EOF or error */
2315 ret = -1; /* we could not have all the codec parameters before EOF */
2316 for(i=0;i<ic->nb_streams;i++) {
2317 st = ic->streams[i];
2318 if (!has_codec_parameters(st->codec)){
2319 char buf[256];
2320 avcodec_string(buf, sizeof(buf), st->codec, 0);
2321 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2322 } else {
2323 ret = 0;
2324 }
2325 }
2326 break;
2327 }
2328
2329 if (ret == AVERROR(EAGAIN))
2330 continue;
2331
2332 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2333 if ((ret = av_dup_packet(pkt)) < 0)
2334 goto find_stream_info_err;
2335
2336 read_size += pkt->size;
2337
2338 st = ic->streams[pkt->stream_index];
2339 if (st->codec_info_nb_frames>1) {
2340 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2341 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2342 break;
2343 }
2344 st->info->codec_info_duration += pkt->duration;
2345 }
2346 {
2347 int64_t last = st->info->last_dts;
2348 int64_t duration= pkt->dts - last;
2349
2350 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2351 double dur= duration * av_q2d(st->time_base);
2352
2353 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2354 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2355 if (st->info->duration_count < 2)
2356 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2357 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2358 int framerate= get_std_framerate(i);
2359 int ticks= lrintf(dur*framerate/(1001*12));
2360 double error= dur - ticks*1001*12/(double)framerate;
2361 st->info->duration_error[i] += error*error;
2362 }
2363 st->info->duration_count++;
2364 // ignore the first 4 values, they might have some random jitter
2365 if (st->info->duration_count > 3)
2366 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2367 }
2368 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2369 st->info->last_dts = pkt->dts;
2370 }
2371 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2372 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2373 if(i){
2374 st->codec->extradata_size= i;
2375 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2376 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2377 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2378 }
2379 }
2380
2381 /* if still no information, we try to open the codec and to
2382 decompress the frame. We try to avoid that in most cases as
2383 it takes longer and uses more memory. For MPEG-4, we need to
2384 decompress for QuickTime. */
2385 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st))
2386 try_decode_frame(st, pkt);
2387
2388 st->codec_info_nb_frames++;
2389 count++;
2390 }
2391
2392 // close codecs which were opened in try_decode_frame()
2393 for(i=0;i<ic->nb_streams;i++) {
2394 st = ic->streams[i];
2395 if(st->codec->codec)
2396 avcodec_close(st->codec);
2397 }
2398 for(i=0;i<ic->nb_streams;i++) {
2399 st = ic->streams[i];
2400 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2401 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2402 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2403 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2404 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2405 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2406 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2407
2408 // the check for tb_unreliable() is not completely correct, since this is not about handling
2409 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2410 // ipmovie.c produces.
2411 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2412 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2413 if (st->info->duration_count && !st->r_frame_rate.num
2414 && tb_unreliable(st->codec) /*&&
2415 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2416 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2417 int num = 0;
2418 double best_error= 2*av_q2d(st->time_base);
2419 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2420
2421 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2422 double error = st->info->duration_error[j] * get_std_framerate(j);
2423 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2424 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2425 if(error < best_error){
2426 best_error= error;
2427 num = get_std_framerate(j);
2428 }
2429 }
2430 // do not increase frame rate by more than 1 % in order to match a standard rate.
2431 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2432 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2433 }
2434
2435 if (!st->r_frame_rate.num){
2436 if( st->codec->time_base.den * (int64_t)st->time_base.num
2437 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2438 st->r_frame_rate.num = st->codec->time_base.den;
2439 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2440 }else{
2441 st->r_frame_rate.num = st->time_base.den;
2442 st->r_frame_rate.den = st->time_base.num;
2443 }
2444 }
2445 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2446 if(!st->codec->bits_per_coded_sample)
2447 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2448 }
2449 }
2450
2451 av_estimate_timings(ic, old_offset);
2452
2453 compute_chapters_end(ic);
2454
2455 #if 0
2456 /* correct DTS for B-frame streams with no timestamps */
2457 for(i=0;i<ic->nb_streams;i++) {
2458 st = ic->streams[i];
2459 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2460 if(b-frames){
2461 ppktl = &ic->packet_buffer;
2462 while(ppkt1){
2463 if(ppkt1->stream_index != i)
2464 continue;
2465 if(ppkt1->pkt->dts < 0)
2466 break;
2467 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2468 break;
2469 ppkt1->pkt->dts -= delta;
2470 ppkt1= ppkt1->next;
2471 }
2472 if(ppkt1)
2473 continue;
2474 st->cur_dts -= delta;
2475 }
2476 }
2477 }
2478 #endif
2479
2480 find_stream_info_err:
2481 for (i=0; i < ic->nb_streams; i++)
2482 av_freep(&ic->streams[i]->info);
2483 return ret;
2484 }
2485
2486 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2487 {
2488 int i, j;
2489
2490 for (i = 0; i < ic->nb_programs; i++)
2491 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2492 if (ic->programs[i]->stream_index[j] == s)
2493 return ic->programs[i];
2494 return NULL;
2495 }
2496
2497 int av_find_best_stream(AVFormatContext *ic,
2498 enum AVMediaType type,
2499 int wanted_stream_nb,
2500 int related_stream,
2501 AVCodec **decoder_ret,
2502 int flags)
2503 {
2504 int i, nb_streams = ic->nb_streams, stream_number = 0;
2505 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2506 unsigned *program = NULL;
2507 AVCodec *decoder = NULL, *best_decoder = NULL;
2508
2509 if (related_stream >= 0 && wanted_stream_nb < 0) {
2510 AVProgram *p = find_program_from_stream(ic, related_stream);
2511 if (p) {
2512 program = p->stream_index;
2513 nb_streams = p->nb_stream_indexes;
2514 }
2515 }
2516 for (i = 0; i < nb_streams; i++) {
2517 AVStream *st = ic->streams[program ? program[i] : i];
2518 AVCodecContext *avctx = st->codec;
2519 if (avctx->codec_type != type)
2520 continue;
2521 if (wanted_stream_nb >= 0 && stream_number++ != wanted_stream_nb)
2522 continue;
2523 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2524 continue;
2525 if (decoder_ret) {
2526 decoder = avcodec_find_decoder(ic->streams[i]->codec->codec_id);
2527 if (!decoder) {
2528 if (ret < 0)
2529 ret = AVERROR_DECODER_NOT_FOUND;
2530 continue;
2531 }
2532 }
2533 if (best_count >= st->codec_info_nb_frames)
2534 continue;
2535 best_count = st->codec_info_nb_frames;
2536 ret = program ? program[i] : i;
2537 best_decoder = decoder;
2538 if (program && i == nb_streams - 1 && ret < 0) {
2539 program = NULL;
2540 nb_streams = ic->nb_streams;
2541 i = 0; /* no related stream found, try again with everything */
2542 }
2543 }
2544 if (decoder_ret)
2545 *decoder_ret = best_decoder;
2546 return ret;
2547 }
2548
2549 /*******************************************************/
2550
2551 int av_read_play(AVFormatContext *s)
2552 {
2553 if (s->iformat->read_play)
2554 return s->iformat->read_play(s);
2555 if (s->pb)
2556 return av_url_read_fpause(s->pb, 0);
2557 return AVERROR(ENOSYS);
2558 }
2559
2560 int av_read_pause(AVFormatContext *s)
2561 {
2562 if (s->iformat->read_pause)
2563 return s->iformat->read_pause(s);
2564 if (s->pb)
2565 return av_url_read_fpause(s->pb, 1);
2566 return AVERROR(ENOSYS);
2567 }
2568
2569 void av_close_input_stream(AVFormatContext *s)
2570 {
2571 flush_packet_queue(s);
2572 if (s->iformat->read_close)
2573 s->iformat->read_close(s);
2574 avformat_free_context(s);
2575 }
2576
2577 void avformat_free_context(AVFormatContext *s)
2578 {
2579 int i;
2580 AVStream *st;
2581
2582 for(i=0;i<s->nb_streams;i++) {
2583 /* free all data in a stream component */
2584 st = s->streams[i];
2585 if (st->parser) {
2586 av_parser_close(st->parser);
2587 av_free_packet(&st->cur_pkt);
2588 }
2589 av_metadata_free(&st->metadata);
2590 av_free(st->index_entries);
2591 av_free(st->codec->extradata);
2592 av_free(st->codec->subtitle_header);
2593 av_free(st->codec);
2594 #if FF_API_OLD_METADATA
2595 av_free(st->filename);
2596 #endif
2597 av_free(st->priv_data);
2598 av_free(st->info);
2599 av_free(st);
2600 }
2601 for(i=s->nb_programs-1; i>=0; i--) {
2602 #if FF_API_OLD_METADATA
2603 av_freep(&s->programs[i]->provider_name);
2604 av_freep(&s->programs[i]->name);
2605 #endif
2606 av_metadata_free(&s->programs[i]->metadata);
2607 av_freep(&s->programs[i]->stream_index);
2608 av_freep(&s->programs[i]);
2609 }
2610 av_freep(&s->programs);
2611 av_freep(&s->priv_data);
2612 while(s->nb_chapters--) {
2613 #if FF_API_OLD_METADATA
2614 av_free(s->chapters[s->nb_chapters]->title);
2615 #endif
2616 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2617 av_free(s->chapters[s->nb_chapters]);
2618 }
2619 av_freep(&s->chapters);
2620 av_metadata_free(&s->metadata);
2621 av_freep(&s->key);
2622 av_free(s);
2623 }
2624
2625 void av_close_input_file(AVFormatContext *s)
2626 {
2627 AVIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2628 av_close_input_stream(s);
2629 if (pb)
2630 avio_close(pb);
2631 }
2632
2633 AVStream *av_new_stream(AVFormatContext *s, int id)
2634 {
2635 AVStream *st;
2636 int i;
2637
2638 #if FF_API_MAX_STREAMS
2639 if (s->nb_streams >= MAX_STREAMS){
2640 av_log(s, AV_LOG_ERROR, "Too many streams\n");
2641 return NULL;
2642 }
2643 #else
2644 AVStream **streams;
2645
2646 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2647 return NULL;
2648 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2649 if (!streams)
2650 return NULL;
2651 s->streams = streams;
2652 #endif
2653
2654 st = av_mallocz(sizeof(AVStream));
2655 if (!st)
2656 return NULL;
2657 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2658 av_free(st);
2659 return NULL;
2660 }
2661
2662 st->codec= avcodec_alloc_context();
2663 if (s->iformat) {
2664 /* no default bitrate if decoding */
2665 st->codec->bit_rate = 0;
2666 }
2667 st->index = s->nb_streams;
2668 st->id = id;
2669 st->start_time = AV_NOPTS_VALUE;
2670 st->duration = AV_NOPTS_VALUE;
2671 /* we set the current DTS to 0 so that formats without any timestamps
2672 but durations get some timestamps, formats with some unknown
2673 timestamps have their first few packets buffered and the
2674 timestamps corrected before they are returned to the user */
2675 st->cur_dts = 0;
2676 st->first_dts = AV_NOPTS_VALUE;
2677 st->probe_packets = MAX_PROBE_PACKETS;
2678
2679 /* default pts setting is MPEG-like */
2680 av_set_pts_info(st, 33, 1, 90000);
2681 st->last_IP_pts = AV_NOPTS_VALUE;
2682 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2683 st->pts_buffer[i]= AV_NOPTS_VALUE;
2684 st->reference_dts = AV_NOPTS_VALUE;
2685
2686 st->sample_aspect_ratio = (AVRational){0,1};
2687
2688 s->streams[s->nb_streams++] = st;
2689 return st;
2690 }
2691
2692 AVProgram *av_new_program(AVFormatContext *ac, int id)
2693 {
2694 AVProgram *program=NULL;
2695 int i;
2696
2697 #ifdef DEBUG_SI
2698 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2699 #endif
2700
2701 for(i=0; i<ac->nb_programs; i++)
2702 if(ac->programs[i]->id == id)
2703 program = ac->programs[i];
2704
2705 if(!program){
2706 program = av_mallocz(sizeof(AVProgram));
2707 if (!program)
2708 return NULL;
2709 dynarray_add(&ac->programs, &ac->nb_programs, program);
2710 program->discard = AVDISCARD_NONE;
2711 }
2712 program->id = id;
2713
2714 return program;
2715 }
2716
2717 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2718 {
2719 AVChapter *chapter = NULL;
2720 int i;
2721
2722 for(i=0; i<s->nb_chapters; i++)
2723 if(s->chapters[i]->id == id)
2724 chapter = s->chapters[i];
2725
2726 if(!chapter){
2727 chapter= av_mallocz(sizeof(AVChapter));
2728 if(!chapter)
2729 return NULL;
2730 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2731 }
2732 #if FF_API_OLD_METADATA
2733 av_free(chapter->title);
2734 #endif
2735 av_metadata_set2(&chapter->metadata, "title", title, 0);
2736 chapter->id = id;
2737 chapter->time_base= time_base;
2738 chapter->start = start;
2739 chapter->end = end;
2740
2741 return chapter;
2742 }
2743
2744 /************************************************************/
2745 /* output media file */
2746
2747 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2748 {
2749 int ret;
2750
2751 if (s->oformat->priv_data_size > 0) {
2752 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2753 if (!s->priv_data)
2754 return AVERROR(ENOMEM);
2755 if (s->oformat->priv_class) {
2756 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2757 av_opt_set_defaults(s->priv_data);
2758 }
2759 } else
2760 s->priv_data = NULL;
2761
2762 if (s->oformat->set_parameters) {
2763 ret = s->oformat->set_parameters(s, ap);
2764 if (ret < 0)
2765 return ret;
2766 }
2767 return 0;
2768 }
2769
2770 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2771 {
2772 const AVCodecTag *avctag;
2773 int n;
2774 enum CodecID id = CODEC_ID_NONE;
2775 unsigned int tag = 0;
2776
2777 /**
2778 * Check that tag + id is in the table
2779 * If neither is in the table -> OK
2780 * If tag is in the table with another id -> FAIL
2781 * If id is in the table with another tag -> FAIL unless strict < normal
2782 */
2783 for (n = 0; s->oformat->codec_tag[n]; n++) {
2784 avctag = s->oformat->codec_tag[n];
2785 while (avctag->id != CODEC_ID_NONE) {
2786 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) {
2787 id = avctag->id;
2788 if (id == st->codec->codec_id)
2789 return 1;
2790 }
2791 if (avctag->id == st->codec->codec_id)
2792 tag = avctag->tag;
2793 avctag++;
2794 }
2795 }
2796 if (id != CODEC_ID_NONE)
2797 return 0;
2798 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2799 return 0;
2800 return 1;
2801 }
2802
2803 int av_write_header(AVFormatContext *s)
2804 {
2805 int ret, i;
2806 AVStream *st;
2807
2808 // some sanity checks
2809 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2810 av_log(s, AV_LOG_ERROR, "no streams\n");
2811 return AVERROR(EINVAL);
2812 }
2813
2814 for(i=0;i<s->nb_streams;i++) {
2815 st = s->streams[i];
2816
2817 switch (st->codec->codec_type) {
2818 case AVMEDIA_TYPE_AUDIO:
2819 if(st->codec->sample_rate<=0){
2820 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2821 return AVERROR(EINVAL);
2822 }
2823 if(!st->codec->block_align)
2824 st->codec->block_align = st->codec->channels *
2825 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2826 break;
2827 case AVMEDIA_TYPE_VIDEO:
2828 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2829 av_log(s, AV_LOG_ERROR, "time base not set\n");
2830 return AVERROR(EINVAL);
2831 }
2832 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2833 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2834 return AVERROR(EINVAL);
2835 }
2836 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2837 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2838 return AVERROR(EINVAL);
2839 }
2840 break;
2841 }
2842
2843 if(s->oformat->codec_tag){
2844 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2845 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2846 st->codec->codec_tag= 0;
2847 }
2848 if(st->codec->codec_tag){
2849 if (!validate_codec_tag(s, st)) {
2850 char tagbuf[32];
2851 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2852 av_log(s, AV_LOG_ERROR,
2853 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2854 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2855 return AVERROR_INVALIDDATA;
2856 }
2857 }else
2858 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2859 }
2860
2861 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2862 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2863 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2864 }
2865
2866 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2867 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2868 if (!s->priv_data)
2869 return AVERROR(ENOMEM);
2870 }
2871
2872 #if FF_API_OLD_METADATA
2873 ff_metadata_mux_compat(s);
2874 #endif
2875
2876 /* set muxer identification string */
2877 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2878 av_metadata_set2(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2879 }
2880
2881 if(s->oformat->write_header){
2882 ret = s->oformat->write_header(s);
2883 if (ret < 0)
2884 return ret;
2885 }
2886
2887 /* init PTS generation */
2888 for(i=0;i<s->nb_streams;i++) {
2889 int64_t den = AV_NOPTS_VALUE;
2890 st = s->streams[i];
2891
2892 switch (st->codec->codec_type) {
2893 case AVMEDIA_TYPE_AUDIO:
2894 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2895 break;
2896 case AVMEDIA_TYPE_VIDEO:
2897 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2898 break;
2899 default:
2900 break;
2901 }
2902 if (den != AV_NOPTS_VALUE) {
2903 if (den <= 0)
2904 return AVERROR_INVALIDDATA;
2905 av_frac_init(&st->pts, 0, 0, den);
2906 }
2907 }
2908 return 0;
2909 }
2910
2911 //FIXME merge with compute_pkt_fields
2912 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2913 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2914 int num, den, frame_size, i;
2915
2916 av_dlog(s, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
2917 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2918
2919 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2920 return -1;*/
2921
2922 /* duration field */
2923 if (pkt->duration == 0) {
2924 compute_frame_duration(&num, &den, st, NULL, pkt);
2925 if (den && num) {
2926 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2927 }
2928 }
2929
2930 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2931 pkt->pts= pkt->dts;
2932
2933 //XXX/FIXME this is a temporary hack until all encoders output pts
2934 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2935 pkt->dts=
2936 // pkt->pts= st->cur_dts;
2937 pkt->pts= st->pts.val;
2938 }
2939
2940 //calculate dts from pts
2941 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2942 st->pts_buffer[0]= pkt->pts;
2943 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2944 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2945 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2946 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2947
2948 pkt->dts= st->pts_buffer[0];
2949 }
2950
2951 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2952 av_log(s, AV_LOG_ERROR,
2953 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
2954 st->index, st->cur_dts, pkt->dts);
2955 return -1;
2956 }
2957 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2958 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
2959 return -1;
2960 }
2961
2962 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2963 st->cur_dts= pkt->dts;
2964 st->pts.val= pkt->dts;
2965
2966 /* update pts */
2967 switch (st->codec->codec_type) {
2968 case AVMEDIA_TYPE_AUDIO:
2969 frame_size = get_audio_frame_size(st->codec, pkt->size);
2970
2971 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2972 likely equal to the encoder delay, but it would be better if we
2973 had the real timestamps from the encoder */
2974 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2975 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2976 }
2977 break;
2978 case AVMEDIA_TYPE_VIDEO:
2979 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2980 break;
2981 default:
2982 break;
2983 }
2984 return 0;
2985 }
2986
2987 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2988 {
2989 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2990
2991 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2992 return ret;
2993
2994 ret= s->oformat->write_packet(s, pkt);
2995 return ret;
2996 }
2997
2998 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2999 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3000 {
3001 AVPacketList **next_point, *this_pktl;
3002
3003 this_pktl = av_mallocz(sizeof(AVPacketList));
3004 this_pktl->pkt= *pkt;
3005 pkt->destruct= NULL; // do not free original but only the copy
3006 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3007
3008 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3009 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3010 }else
3011 next_point = &s->packet_buffer;
3012
3013 if(*next_point){
3014 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3015 while(!compare(s, &(*next_point)->pkt, pkt)){
3016 next_point= &(*next_point)->next;
3017 }
3018 goto next_non_null;
3019 }else{
3020 next_point = &(s->packet_buffer_end->next);
3021 }
3022 }
3023 assert(!*next_point);
3024
3025 s->packet_buffer_end= this_pktl;
3026 next_non_null:
3027
3028 this_pktl->next= *next_point;
3029
3030 s->streams[pkt->stream_index]->last_in_packet_buffer=
3031 *next_point= this_pktl;
3032 }
3033
3034 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3035 {
3036 AVStream *st = s->streams[ pkt ->stream_index];
3037 AVStream *st2= s->streams[ next->stream_index];
3038 int64_t a= st2->time_base.num * (int64_t)st ->time_base.den;
3039 int64_t b= st ->time_base.num * (int64_t)st2->time_base.den;
3040 return av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN) < next->dts;
3041 }
3042
3043 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3044 AVPacketList *pktl;
3045 int stream_count=0;
3046 int i;
3047
3048 if(pkt){
3049 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3050 }
3051
3052 for(i=0; i < s->nb_streams; i++)
3053 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3054
3055 if(stream_count && (s->nb_streams == stream_count || flush)){
3056 pktl= s->packet_buffer;
3057 *out= pktl->pkt;
3058
3059 s->packet_buffer= pktl->next;
3060 if(!s->packet_buffer)
3061 s->packet_buffer_end= NULL;
3062
3063 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3064 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3065 av_freep(&pktl);
3066 return 1;
3067 }else{
3068 av_init_packet(out);
3069 return 0;
3070 }
3071 }
3072
3073 /**
3074 * Interleave an AVPacket correctly so it can be muxed.
3075 * @param out the interleaved packet will be output here
3076 * @param in the input packet
3077 * @param flush 1 if no further packets are available as input and all
3078 * remaining packets should be output
3079 * @return 1 if a packet was output, 0 if no packet could be output,
3080 * < 0 if an error occurred
3081 */
3082 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3083 if(s->oformat->interleave_packet)
3084 return s->oformat->interleave_packet(s, out, in, flush);
3085 else
3086 return av_interleave_packet_per_dts(s, out, in, flush);
3087 }
3088
3089 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3090 AVStream *st= s->streams[ pkt->stream_index];
3091
3092 //FIXME/XXX/HACK drop zero sized packets
3093 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3094 return 0;
3095
3096 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
3097 if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3098 return -1;
3099
3100 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3101 return -1;
3102
3103 for(;;){
3104 AVPacket opkt;
3105 int ret= av_interleave_packet(s, &opkt, pkt, 0);
3106 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3107 return ret;
3108
3109 ret= s->oformat->write_packet(s, &opkt);
3110
3111 av_free_packet(&opkt);
3112 pkt= NULL;
3113
3114 if(ret<0)
3115 return ret;
3116 }
3117 }
3118
3119 int av_write_trailer(AVFormatContext *s)
3120 {
3121 int ret, i;
3122
3123 for(;;){
3124 AVPacket pkt;