Make hex_to_data a lavf internal function
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "internal.h"
23 #include "libavcodec/internal.h"
24 #include "libavcodec/opt.h"
25 #include "metadata.h"
26 #include "libavutil/avstring.h"
27 #include "riff.h"
28 #include "audiointerleave.h"
29 #include <sys/time.h>
30 #include <time.h>
31 #include <strings.h>
32 #include <stdarg.h>
33 #if CONFIG_NETWORK
34 #include "network.h"
35 #endif
36
37 #undef NDEBUG
38 #include <assert.h>
39
40 /**
41 * @file
42 * various utility functions for use within FFmpeg
43 */
44
45 unsigned avformat_version(void)
46 {
47 return LIBAVFORMAT_VERSION_INT;
48 }
49
50 const char *avformat_configuration(void)
51 {
52 return FFMPEG_CONFIGURATION;
53 }
54
55 const char *avformat_license(void)
56 {
57 #define LICENSE_PREFIX "libavformat license: "
58 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
59 }
60
61 /* fraction handling */
62
63 /**
64 * f = val + (num / den) + 0.5.
65 *
66 * 'num' is normalized so that it is such as 0 <= num < den.
67 *
68 * @param f fractional number
69 * @param val integer value
70 * @param num must be >= 0
71 * @param den must be >= 1
72 */
73 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
74 {
75 num += (den >> 1);
76 if (num >= den) {
77 val += num / den;
78 num = num % den;
79 }
80 f->val = val;
81 f->num = num;
82 f->den = den;
83 }
84
85 /**
86 * Fractional addition to f: f = f + (incr / f->den).
87 *
88 * @param f fractional number
89 * @param incr increment, can be positive or negative
90 */
91 static void av_frac_add(AVFrac *f, int64_t incr)
92 {
93 int64_t num, den;
94
95 num = f->num + incr;
96 den = f->den;
97 if (num < 0) {
98 f->val += num / den;
99 num = num % den;
100 if (num < 0) {
101 num += den;
102 f->val--;
103 }
104 } else if (num >= den) {
105 f->val += num / den;
106 num = num % den;
107 }
108 f->num = num;
109 }
110
111 /** head of registered input format linked list */
112 AVInputFormat *first_iformat = NULL;
113 /** head of registered output format linked list */
114 AVOutputFormat *first_oformat = NULL;
115
116 AVInputFormat *av_iformat_next(AVInputFormat *f)
117 {
118 if(f) return f->next;
119 else return first_iformat;
120 }
121
122 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
123 {
124 if(f) return f->next;
125 else return first_oformat;
126 }
127
128 void av_register_input_format(AVInputFormat *format)
129 {
130 AVInputFormat **p;
131 p = &first_iformat;
132 while (*p != NULL) p = &(*p)->next;
133 *p = format;
134 format->next = NULL;
135 }
136
137 void av_register_output_format(AVOutputFormat *format)
138 {
139 AVOutputFormat **p;
140 p = &first_oformat;
141 while (*p != NULL) p = &(*p)->next;
142 *p = format;
143 format->next = NULL;
144 }
145
146 int av_match_ext(const char *filename, const char *extensions)
147 {
148 const char *ext, *p;
149 char ext1[32], *q;
150
151 if(!filename)
152 return 0;
153
154 ext = strrchr(filename, '.');
155 if (ext) {
156 ext++;
157 p = extensions;
158 for(;;) {
159 q = ext1;
160 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
161 *q++ = *p++;
162 *q = '\0';
163 if (!strcasecmp(ext1, ext))
164 return 1;
165 if (*p == '\0')
166 break;
167 p++;
168 }
169 }
170 return 0;
171 }
172
173 static int match_format(const char *name, const char *names)
174 {
175 const char *p;
176 int len, namelen;
177
178 if (!name || !names)
179 return 0;
180
181 namelen = strlen(name);
182 while ((p = strchr(names, ','))) {
183 len = FFMAX(p - names, namelen);
184 if (!strncasecmp(name, names, len))
185 return 1;
186 names = p+1;
187 }
188 return !strcasecmp(name, names);
189 }
190
191 #if LIBAVFORMAT_VERSION_MAJOR < 53
192 AVOutputFormat *guess_format(const char *short_name, const char *filename,
193 const char *mime_type)
194 {
195 return av_guess_format(short_name, filename, mime_type);
196 }
197 #endif
198
199 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
200 const char *mime_type)
201 {
202 AVOutputFormat *fmt, *fmt_found;
203 int score_max, score;
204
205 /* specific test for image sequences */
206 #if CONFIG_IMAGE2_MUXER
207 if (!short_name && filename &&
208 av_filename_number_test(filename) &&
209 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
210 return av_guess_format("image2", NULL, NULL);
211 }
212 #endif
213 /* Find the proper file type. */
214 fmt_found = NULL;
215 score_max = 0;
216 fmt = first_oformat;
217 while (fmt != NULL) {
218 score = 0;
219 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
220 score += 100;
221 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
222 score += 10;
223 if (filename && fmt->extensions &&
224 av_match_ext(filename, fmt->extensions)) {
225 score += 5;
226 }
227 if (score > score_max) {
228 score_max = score;
229 fmt_found = fmt;
230 }
231 fmt = fmt->next;
232 }
233 return fmt_found;
234 }
235
236 #if LIBAVFORMAT_VERSION_MAJOR < 53
237 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
238 const char *mime_type)
239 {
240 AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
241
242 if (fmt) {
243 AVOutputFormat *stream_fmt;
244 char stream_format_name[64];
245
246 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
247 stream_fmt = av_guess_format(stream_format_name, NULL, NULL);
248
249 if (stream_fmt)
250 fmt = stream_fmt;
251 }
252
253 return fmt;
254 }
255 #endif
256
257 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
258 const char *filename, const char *mime_type, enum AVMediaType type){
259 if(type == AVMEDIA_TYPE_VIDEO){
260 enum CodecID codec_id= CODEC_ID_NONE;
261
262 #if CONFIG_IMAGE2_MUXER
263 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
264 codec_id= av_guess_image2_codec(filename);
265 }
266 #endif
267 if(codec_id == CODEC_ID_NONE)
268 codec_id= fmt->video_codec;
269 return codec_id;
270 }else if(type == AVMEDIA_TYPE_AUDIO)
271 return fmt->audio_codec;
272 else
273 return CODEC_ID_NONE;
274 }
275
276 AVInputFormat *av_find_input_format(const char *short_name)
277 {
278 AVInputFormat *fmt;
279 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
280 if (match_format(short_name, fmt->name))
281 return fmt;
282 }
283 return NULL;
284 }
285
286 #if LIBAVFORMAT_VERSION_MAJOR < 53 && CONFIG_SHARED && HAVE_SYMVER
287 FF_SYMVER(void, av_destruct_packet_nofree, (AVPacket *pkt), "LIBAVFORMAT_52")
288 {
289 av_destruct_packet_nofree(pkt);
290 }
291
292 FF_SYMVER(void, av_destruct_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
293 {
294 av_destruct_packet(pkt);
295 }
296
297 FF_SYMVER(int, av_new_packet, (AVPacket *pkt, int size), "LIBAVFORMAT_52")
298 {
299 return av_new_packet(pkt, size);
300 }
301
302 FF_SYMVER(int, av_dup_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
303 {
304 return av_dup_packet(pkt);
305 }
306
307 FF_SYMVER(void, av_free_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
308 {
309 av_free_packet(pkt);
310 }
311
312 FF_SYMVER(void, av_init_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
313 {
314 av_log(NULL, AV_LOG_WARNING, "Diverting av_*_packet function calls to libavcodec. Recompile to improve performance\n");
315 av_init_packet(pkt);
316 }
317 #endif
318
319 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
320 {
321 int ret= av_new_packet(pkt, size);
322
323 if(ret<0)
324 return ret;
325
326 pkt->pos= url_ftell(s);
327
328 ret= get_buffer(s, pkt->data, size);
329 if(ret<=0)
330 av_free_packet(pkt);
331 else
332 av_shrink_packet(pkt, ret);
333
334 return ret;
335 }
336
337
338 int av_filename_number_test(const char *filename)
339 {
340 char buf[1024];
341 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
342 }
343
344 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
345 {
346 AVInputFormat *fmt1, *fmt;
347 int score;
348
349 fmt = NULL;
350 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
351 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
352 continue;
353 score = 0;
354 if (fmt1->read_probe) {
355 score = fmt1->read_probe(pd);
356 } else if (fmt1->extensions) {
357 if (av_match_ext(pd->filename, fmt1->extensions)) {
358 score = 50;
359 }
360 }
361 if (score > *score_max) {
362 *score_max = score;
363 fmt = fmt1;
364 }else if (score == *score_max)
365 fmt = NULL;
366 }
367 return fmt;
368 }
369
370 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
371 int score=0;
372 return av_probe_input_format2(pd, is_opened, &score);
373 }
374
375 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
376 {
377 static const struct {
378 const char *name; enum CodecID id; enum AVMediaType type;
379 } fmt_id_type[] = {
380 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
381 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
382 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
383 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
384 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
385 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
386 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
387 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
388 { 0 }
389 };
390 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
391
392 if (fmt) {
393 int i;
394 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
395 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
396 for (i = 0; fmt_id_type[i].name; i++) {
397 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
398 st->codec->codec_id = fmt_id_type[i].id;
399 st->codec->codec_type = fmt_id_type[i].type;
400 break;
401 }
402 }
403 }
404 return !!fmt;
405 }
406
407 /************************************************************/
408 /* input media file */
409
410 /**
411 * Open a media file from an IO stream. 'fmt' must be specified.
412 */
413 int av_open_input_stream(AVFormatContext **ic_ptr,
414 ByteIOContext *pb, const char *filename,
415 AVInputFormat *fmt, AVFormatParameters *ap)
416 {
417 int err;
418 AVFormatContext *ic;
419 AVFormatParameters default_ap;
420
421 if(!ap){
422 ap=&default_ap;
423 memset(ap, 0, sizeof(default_ap));
424 }
425
426 if(!ap->prealloced_context)
427 ic = avformat_alloc_context();
428 else
429 ic = *ic_ptr;
430 if (!ic) {
431 err = AVERROR(ENOMEM);
432 goto fail;
433 }
434 ic->iformat = fmt;
435 ic->pb = pb;
436 ic->duration = AV_NOPTS_VALUE;
437 ic->start_time = AV_NOPTS_VALUE;
438 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
439
440 /* allocate private data */
441 if (fmt->priv_data_size > 0) {
442 ic->priv_data = av_mallocz(fmt->priv_data_size);
443 if (!ic->priv_data) {
444 err = AVERROR(ENOMEM);
445 goto fail;
446 }
447 } else {
448 ic->priv_data = NULL;
449 }
450
451 if (ic->iformat->read_header) {
452 err = ic->iformat->read_header(ic, ap);
453 if (err < 0)
454 goto fail;
455 }
456
457 if (pb && !ic->data_offset)
458 ic->data_offset = url_ftell(ic->pb);
459
460 #if LIBAVFORMAT_VERSION_MAJOR < 53
461 ff_metadata_demux_compat(ic);
462 #endif
463
464 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
465
466 *ic_ptr = ic;
467 return 0;
468 fail:
469 if (ic) {
470 int i;
471 av_freep(&ic->priv_data);
472 for(i=0;i<ic->nb_streams;i++) {
473 AVStream *st = ic->streams[i];
474 if (st) {
475 av_free(st->priv_data);
476 av_free(st->codec->extradata);
477 }
478 av_free(st);
479 }
480 }
481 av_free(ic);
482 *ic_ptr = NULL;
483 return err;
484 }
485
486 /** size of probe buffer, for guessing file type from file contents */
487 #define PROBE_BUF_MIN 2048
488 #define PROBE_BUF_MAX (1<<20)
489
490 int ff_probe_input_buffer(ByteIOContext **pb, AVInputFormat **fmt,
491 const char *filename, void *logctx,
492 unsigned int offset, unsigned int max_probe_size)
493 {
494 AVProbeData pd = { filename ? filename : "", NULL, -offset };
495 unsigned char *buf = NULL;
496 int ret = 0, probe_size;
497
498 if (!max_probe_size) {
499 max_probe_size = PROBE_BUF_MAX;
500 } else if (max_probe_size > PROBE_BUF_MAX) {
501 max_probe_size = PROBE_BUF_MAX;
502 } else if (max_probe_size < PROBE_BUF_MIN) {
503 return AVERROR(EINVAL);
504 }
505
506 if (offset >= max_probe_size) {
507 return AVERROR(EINVAL);
508 }
509
510 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0;
511 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
512 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
513 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
514
515 if (probe_size < offset) {
516 continue;
517 }
518
519 /* read probe data */
520 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
521 if ((ret = get_buffer(*pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
522 /* fail if error was not end of file, otherwise, lower score */
523 if (ret != AVERROR_EOF) {
524 av_free(buf);
525 return ret;
526 }
527 score = 0;
528 ret = 0; /* error was end of file, nothing read */
529 }
530 pd.buf_size += ret;
531 pd.buf = &buf[offset];
532
533 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
534
535 /* guess file format */
536 *fmt = av_probe_input_format2(&pd, 1, &score);
537 if(*fmt){
538 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
539 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
540 }else
541 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
542 }
543 }
544
545 if (!*fmt) {
546 av_free(buf);
547 return AVERROR_INVALIDDATA;
548 }
549
550 /* rewind. reuse probe buffer to avoid seeking */
551 if ((ret = ff_rewind_with_probe_data(*pb, buf, pd.buf_size)) < 0)
552 av_free(buf);
553
554 return ret;
555 }
556
557 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
558 AVInputFormat *fmt,
559 int buf_size,
560 AVFormatParameters *ap)
561 {
562 int err;
563 AVProbeData probe_data, *pd = &probe_data;
564 ByteIOContext *pb = NULL;
565 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
566
567 pd->filename = "";
568 if (filename)
569 pd->filename = filename;
570 pd->buf = NULL;
571 pd->buf_size = 0;
572
573 if (!fmt) {
574 /* guess format if no file can be opened */
575 fmt = av_probe_input_format(pd, 0);
576 }
577
578 /* Do not open file if the format does not need it. XXX: specific
579 hack needed to handle RTSP/TCP */
580 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
581 /* if no file needed do not try to open one */
582 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
583 goto fail;
584 }
585 if (buf_size > 0) {
586 url_setbufsize(pb, buf_size);
587 }
588 if (!fmt && (err = ff_probe_input_buffer(&pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) {
589 goto fail;
590 }
591 }
592
593 /* if still no format found, error */
594 if (!fmt) {
595 err = AVERROR_INVALIDDATA;
596 goto fail;
597 }
598
599 /* check filename in case an image number is expected */
600 if (fmt->flags & AVFMT_NEEDNUMBER) {
601 if (!av_filename_number_test(filename)) {
602 err = AVERROR_NUMEXPECTED;
603 goto fail;
604 }
605 }
606 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
607 if (err)
608 goto fail;
609 return 0;
610 fail:
611 av_freep(&pd->buf);
612 if (pb)
613 url_fclose(pb);
614 if (ap && ap->prealloced_context)
615 av_free(*ic_ptr);
616 *ic_ptr = NULL;
617 return err;
618
619 }
620
621 /*******************************************************/
622
623 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
624 AVPacketList **plast_pktl){
625 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
626 if (!pktl)
627 return NULL;
628
629 if (*packet_buffer)
630 (*plast_pktl)->next = pktl;
631 else
632 *packet_buffer = pktl;
633
634 /* add the packet in the buffered packet list */
635 *plast_pktl = pktl;
636 pktl->pkt= *pkt;
637 return &pktl->pkt;
638 }
639
640 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
641 {
642 int ret, i;
643 AVStream *st;
644
645 for(;;){
646 AVPacketList *pktl = s->raw_packet_buffer;
647
648 if (pktl) {
649 *pkt = pktl->pkt;
650 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
651 !s->streams[pkt->stream_index]->probe_packets ||
652 s->raw_packet_buffer_remaining_size < pkt->size){
653 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
654 av_freep(&pd->buf);
655 pd->buf_size = 0;
656 s->raw_packet_buffer = pktl->next;
657 s->raw_packet_buffer_remaining_size += pkt->size;
658 av_free(pktl);
659 return 0;
660 }
661 }
662
663 av_init_packet(pkt);
664 ret= s->iformat->read_packet(s, pkt);
665 if (ret < 0) {
666 if (!pktl || ret == AVERROR(EAGAIN))
667 return ret;
668 for (i = 0; i < s->nb_streams; i++)
669 s->streams[i]->probe_packets = 0;
670 continue;
671 }
672 st= s->streams[pkt->stream_index];
673
674 switch(st->codec->codec_type){
675 case AVMEDIA_TYPE_VIDEO:
676 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
677 break;
678 case AVMEDIA_TYPE_AUDIO:
679 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
680 break;
681 case AVMEDIA_TYPE_SUBTITLE:
682 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
683 break;
684 }
685
686 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
687 !st->probe_packets))
688 return ret;
689
690 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
691 s->raw_packet_buffer_remaining_size -= pkt->size;
692
693 if(st->codec->codec_id == CODEC_ID_PROBE){
694 AVProbeData *pd = &st->probe_data;
695 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
696 --st->probe_packets;
697
698 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
699 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
700 pd->buf_size += pkt->size;
701 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
702
703 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
704 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
705 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
706 if(st->codec->codec_id != CODEC_ID_PROBE){
707 pd->buf_size=0;
708 av_freep(&pd->buf);
709 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
710 }
711 }
712 }
713 }
714 }
715
716 /**********************************************************/
717
718 /**
719 * Get the number of samples of an audio frame. Return -1 on error.
720 */
721 static int get_audio_frame_size(AVCodecContext *enc, int size)
722 {
723 int frame_size;
724
725 if(enc->codec_id == CODEC_ID_VORBIS)
726 return -1;
727
728 if (enc->frame_size <= 1) {
729 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
730
731 if (bits_per_sample) {
732 if (enc->channels == 0)
733 return -1;
734 frame_size = (size << 3) / (bits_per_sample * enc->channels);
735 } else {
736 /* used for example by ADPCM codecs */
737 if (enc->bit_rate == 0)
738 return -1;
739 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
740 }
741 } else {
742 frame_size = enc->frame_size;
743 }
744 return frame_size;
745 }
746
747
748 /**
749 * Return the frame duration in seconds. Return 0 if not available.
750 */
751 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
752 AVCodecParserContext *pc, AVPacket *pkt)
753 {
754 int frame_size;
755
756 *pnum = 0;
757 *pden = 0;
758 switch(st->codec->codec_type) {
759 case AVMEDIA_TYPE_VIDEO:
760 if(st->time_base.num*1000LL > st->time_base.den){
761 *pnum = st->time_base.num;
762 *pden = st->time_base.den;
763 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
764 *pnum = st->codec->time_base.num;
765 *pden = st->codec->time_base.den;
766 if (pc && pc->repeat_pict) {
767 *pnum = (*pnum) * (1 + pc->repeat_pict);
768 }
769 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
770 //Thus if we have no parser in such case leave duration undefined.
771 if(st->codec->ticks_per_frame>1 && !pc){
772 *pnum = *pden = 0;
773 }
774 }
775 break;
776 case AVMEDIA_TYPE_AUDIO:
777 frame_size = get_audio_frame_size(st->codec, pkt->size);
778 if (frame_size < 0)
779 break;
780 *pnum = frame_size;
781 *pden = st->codec->sample_rate;
782 break;
783 default:
784 break;
785 }
786 }
787
788 static int is_intra_only(AVCodecContext *enc){
789 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
790 return 1;
791 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
792 switch(enc->codec_id){
793 case CODEC_ID_MJPEG:
794 case CODEC_ID_MJPEGB:
795 case CODEC_ID_LJPEG:
796 case CODEC_ID_RAWVIDEO:
797 case CODEC_ID_DVVIDEO:
798 case CODEC_ID_HUFFYUV:
799 case CODEC_ID_FFVHUFF:
800 case CODEC_ID_ASV1:
801 case CODEC_ID_ASV2:
802 case CODEC_ID_VCR1:
803 case CODEC_ID_DNXHD:
804 case CODEC_ID_JPEG2000:
805 return 1;
806 default: break;
807 }
808 }
809 return 0;
810 }
811
812 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
813 int64_t dts, int64_t pts)
814 {
815 AVStream *st= s->streams[stream_index];
816 AVPacketList *pktl= s->packet_buffer;
817
818 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
819 return;
820
821 st->first_dts= dts - st->cur_dts;
822 st->cur_dts= dts;
823
824 for(; pktl; pktl= pktl->next){
825 if(pktl->pkt.stream_index != stream_index)
826 continue;
827 //FIXME think more about this check
828 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
829 pktl->pkt.pts += st->first_dts;
830
831 if(pktl->pkt.dts != AV_NOPTS_VALUE)
832 pktl->pkt.dts += st->first_dts;
833
834 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
835 st->start_time= pktl->pkt.pts;
836 }
837 if (st->start_time == AV_NOPTS_VALUE)
838 st->start_time = pts;
839 }
840
841 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
842 {
843 AVPacketList *pktl= s->packet_buffer;
844 int64_t cur_dts= 0;
845
846 if(st->first_dts != AV_NOPTS_VALUE){
847 cur_dts= st->first_dts;
848 for(; pktl; pktl= pktl->next){
849 if(pktl->pkt.stream_index == pkt->stream_index){
850 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
851 break;
852 cur_dts -= pkt->duration;
853 }
854 }
855 pktl= s->packet_buffer;
856 st->first_dts = cur_dts;
857 }else if(st->cur_dts)
858 return;
859
860 for(; pktl; pktl= pktl->next){
861 if(pktl->pkt.stream_index != pkt->stream_index)
862 continue;
863 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
864 && !pktl->pkt.duration){
865 pktl->pkt.dts= cur_dts;
866 if(!st->codec->has_b_frames)
867 pktl->pkt.pts= cur_dts;
868 cur_dts += pkt->duration;
869 pktl->pkt.duration= pkt->duration;
870 }else
871 break;
872 }
873 if(st->first_dts == AV_NOPTS_VALUE)
874 st->cur_dts= cur_dts;
875 }
876
877 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
878 AVCodecParserContext *pc, AVPacket *pkt)
879 {
880 int num, den, presentation_delayed, delay, i;
881 int64_t offset;
882
883 if (s->flags & AVFMT_FLAG_NOFILLIN)
884 return;
885
886 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
887 pkt->dts= AV_NOPTS_VALUE;
888
889 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
890 //FIXME Set low_delay = 0 when has_b_frames = 1
891 st->codec->has_b_frames = 1;
892
893 /* do we have a video B-frame ? */
894 delay= st->codec->has_b_frames;
895 presentation_delayed = 0;
896 /* XXX: need has_b_frame, but cannot get it if the codec is
897 not initialized */
898 if (delay &&
899 pc && pc->pict_type != FF_B_TYPE)
900 presentation_delayed = 1;
901
902 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
903 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
904 pkt->dts -= 1LL<<st->pts_wrap_bits;
905 }
906
907 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
908 // we take the conservative approach and discard both
909 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
910 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
911 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
912 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
913 }
914
915 if (pkt->duration == 0) {
916 compute_frame_duration(&num, &den, st, pc, pkt);
917 if (den && num) {
918 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
919
920 if(pkt->duration != 0 && s->packet_buffer)
921 update_initial_durations(s, st, pkt);
922 }
923 }
924
925 /* correct timestamps with byte offset if demuxers only have timestamps
926 on packet boundaries */
927 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
928 /* this will estimate bitrate based on this frame's duration and size */
929 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
930 if(pkt->pts != AV_NOPTS_VALUE)
931 pkt->pts += offset;
932 if(pkt->dts != AV_NOPTS_VALUE)
933 pkt->dts += offset;
934 }
935
936 if (pc && pc->dts_sync_point >= 0) {
937 // we have synchronization info from the parser
938 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
939 if (den > 0) {
940 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
941 if (pkt->dts != AV_NOPTS_VALUE) {
942 // got DTS from the stream, update reference timestamp
943 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
944 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
945 } else if (st->reference_dts != AV_NOPTS_VALUE) {
946 // compute DTS based on reference timestamp
947 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
948 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
949 }
950 if (pc->dts_sync_point > 0)
951 st->reference_dts = pkt->dts; // new reference
952 }
953 }
954
955 /* This may be redundant, but it should not hurt. */
956 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
957 presentation_delayed = 1;
958
959 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
960 /* interpolate PTS and DTS if they are not present */
961 //We skip H264 currently because delay and has_b_frames are not reliably set
962 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
963 if (presentation_delayed) {
964 /* DTS = decompression timestamp */
965 /* PTS = presentation timestamp */
966 if (pkt->dts == AV_NOPTS_VALUE)
967 pkt->dts = st->last_IP_pts;
968 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
969 if (pkt->dts == AV_NOPTS_VALUE)
970 pkt->dts = st->cur_dts;
971
972 /* this is tricky: the dts must be incremented by the duration
973 of the frame we are displaying, i.e. the last I- or P-frame */
974 if (st->last_IP_duration == 0)
975 st->last_IP_duration = pkt->duration;
976 if(pkt->dts != AV_NOPTS_VALUE)
977 st->cur_dts = pkt->dts + st->last_IP_duration;
978 st->last_IP_duration = pkt->duration;
979 st->last_IP_pts= pkt->pts;
980 /* cannot compute PTS if not present (we can compute it only
981 by knowing the future */
982 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
983 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
984 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
985 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
986 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
987 pkt->pts += pkt->duration;
988 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
989 }
990 }
991
992 /* presentation is not delayed : PTS and DTS are the same */
993 if(pkt->pts == AV_NOPTS_VALUE)
994 pkt->pts = pkt->dts;
995 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
996 if(pkt->pts == AV_NOPTS_VALUE)
997 pkt->pts = st->cur_dts;
998 pkt->dts = pkt->pts;
999 if(pkt->pts != AV_NOPTS_VALUE)
1000 st->cur_dts = pkt->pts + pkt->duration;
1001 }
1002 }
1003
1004 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1005 st->pts_buffer[0]= pkt->pts;
1006 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1007 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1008 if(pkt->dts == AV_NOPTS_VALUE)
1009 pkt->dts= st->pts_buffer[0];
1010 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1011 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1012 }
1013 if(pkt->dts > st->cur_dts)
1014 st->cur_dts = pkt->dts;
1015 }
1016
1017 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1018
1019 /* update flags */
1020 if(is_intra_only(st->codec))
1021 pkt->flags |= AV_PKT_FLAG_KEY;
1022 else if (pc) {
1023 pkt->flags = 0;
1024 /* keyframe computation */
1025 if (pc->key_frame == 1)
1026 pkt->flags |= AV_PKT_FLAG_KEY;
1027 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
1028 pkt->flags |= AV_PKT_FLAG_KEY;
1029 }
1030 if (pc)
1031 pkt->convergence_duration = pc->convergence_duration;
1032 }
1033
1034
1035 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1036 {
1037 AVStream *st;
1038 int len, ret, i;
1039
1040 av_init_packet(pkt);
1041
1042 for(;;) {
1043 /* select current input stream component */
1044 st = s->cur_st;
1045 if (st) {
1046 if (!st->need_parsing || !st->parser) {
1047 /* no parsing needed: we just output the packet as is */
1048 /* raw data support */
1049 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1050 compute_pkt_fields(s, st, NULL, pkt);
1051 s->cur_st = NULL;
1052 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1053 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1054 ff_reduce_index(s, st->index);
1055 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1056 }
1057 break;
1058 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1059 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1060 st->cur_ptr, st->cur_len,
1061 st->cur_pkt.pts, st->cur_pkt.dts,
1062 st->cur_pkt.pos);
1063 st->cur_pkt.pts = AV_NOPTS_VALUE;
1064 st->cur_pkt.dts = AV_NOPTS_VALUE;
1065 /* increment read pointer */
1066 st->cur_ptr += len;
1067 st->cur_len -= len;
1068
1069 /* return packet if any */
1070 if (pkt->size) {
1071 got_packet:
1072 pkt->duration = 0;
1073 pkt->stream_index = st->index;
1074 pkt->pts = st->parser->pts;
1075 pkt->dts = st->parser->dts;
1076 pkt->pos = st->parser->pos;
1077 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1078 s->cur_st = NULL;
1079 pkt->destruct= st->cur_pkt.destruct;
1080 st->cur_pkt.destruct= NULL;
1081 st->cur_pkt.data = NULL;
1082 assert(st->cur_len == 0);
1083 }else{
1084 pkt->destruct = NULL;
1085 }
1086 compute_pkt_fields(s, st, st->parser, pkt);
1087
1088 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1089 ff_reduce_index(s, st->index);
1090 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1091 0, 0, AVINDEX_KEYFRAME);
1092 }
1093
1094 break;
1095 }
1096 } else {
1097 /* free packet */
1098 av_free_packet(&st->cur_pkt);
1099 s->cur_st = NULL;
1100 }
1101 } else {
1102 AVPacket cur_pkt;
1103 /* read next packet */
1104 ret = av_read_packet(s, &cur_pkt);
1105 if (ret < 0) {
1106 if (ret == AVERROR(EAGAIN))
1107 return ret;
1108 /* return the last frames, if any */
1109 for(i = 0; i < s->nb_streams; i++) {
1110 st = s->streams[i];
1111 if (st->parser && st->need_parsing) {
1112 av_parser_parse2(st->parser, st->codec,
1113 &pkt->data, &pkt->size,
1114 NULL, 0,
1115 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1116 AV_NOPTS_VALUE);
1117 if (pkt->size)
1118 goto got_packet;
1119 }
1120 }
1121 /* no more packets: really terminate parsing */
1122 return ret;
1123 }
1124 st = s->streams[cur_pkt.stream_index];
1125 st->cur_pkt= cur_pkt;
1126
1127 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1128 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1129 st->cur_pkt.pts < st->cur_pkt.dts){
1130 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1131 st->cur_pkt.stream_index,
1132 st->cur_pkt.pts,
1133 st->cur_pkt.dts,
1134 st->cur_pkt.size);
1135 // av_free_packet(&st->cur_pkt);
1136 // return -1;
1137 }
1138
1139 if(s->debug & FF_FDEBUG_TS)
1140 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1141 st->cur_pkt.stream_index,
1142 st->cur_pkt.pts,
1143 st->cur_pkt.dts,
1144 st->cur_pkt.size,
1145 st->cur_pkt.duration,
1146 st->cur_pkt.flags);
1147
1148 s->cur_st = st;
1149 st->cur_ptr = st->cur_pkt.data;
1150 st->cur_len = st->cur_pkt.size;
1151 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1152 st->parser = av_parser_init(st->codec->codec_id);
1153 if (!st->parser) {
1154 /* no parser available: just output the raw packets */
1155 st->need_parsing = AVSTREAM_PARSE_NONE;
1156 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1157 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1158 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1159 st->parser->flags |= PARSER_FLAG_ONCE;
1160 }
1161 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1162 st->parser->next_frame_offset=
1163 st->parser->cur_offset= st->cur_pkt.pos;
1164 }
1165 }
1166 }
1167 }
1168 if(s->debug & FF_FDEBUG_TS)
1169 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1170 pkt->stream_index,
1171 pkt->pts,
1172 pkt->dts,
1173 pkt->size,
1174 pkt->duration,
1175 pkt->flags);
1176
1177 return 0;
1178 }
1179
1180 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1181 {
1182 AVPacketList *pktl;
1183 int eof=0;
1184 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1185
1186 for(;;){
1187 pktl = s->packet_buffer;
1188 if (pktl) {
1189 AVPacket *next_pkt= &pktl->pkt;
1190
1191 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1192 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1193 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1194 if( pktl->pkt.stream_index == next_pkt->stream_index
1195 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1196 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1197 next_pkt->pts= pktl->pkt.dts;
1198 }
1199 pktl= pktl->next;
1200 }
1201 pktl = s->packet_buffer;
1202 }
1203
1204 if( next_pkt->pts != AV_NOPTS_VALUE
1205 || next_pkt->dts == AV_NOPTS_VALUE
1206 || !genpts || eof){
1207 /* read packet from packet buffer, if there is data */
1208 *pkt = *next_pkt;
1209 s->packet_buffer = pktl->next;
1210 av_free(pktl);
1211 return 0;
1212 }
1213 }
1214 if(genpts){
1215 int ret= av_read_frame_internal(s, pkt);
1216 if(ret<0){
1217 if(pktl && ret != AVERROR(EAGAIN)){
1218 eof=1;
1219 continue;
1220 }else
1221 return ret;
1222 }
1223
1224 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1225 &s->packet_buffer_end)) < 0)
1226 return AVERROR(ENOMEM);
1227 }else{
1228 assert(!s->packet_buffer);
1229 return av_read_frame_internal(s, pkt);
1230 }
1231 }
1232 }
1233
1234 /* XXX: suppress the packet queue */
1235 static void flush_packet_queue(AVFormatContext *s)
1236 {
1237 AVPacketList *pktl;
1238
1239 for(;;) {
1240 pktl = s->packet_buffer;
1241 if (!pktl)
1242 break;
1243 s->packet_buffer = pktl->next;
1244 av_free_packet(&pktl->pkt);
1245 av_free(pktl);
1246 }
1247 while(s->raw_packet_buffer){
1248 pktl = s->raw_packet_buffer;
1249 s->raw_packet_buffer = pktl->next;
1250 av_free_packet(&pktl->pkt);
1251 av_free(pktl);
1252 }
1253 s->packet_buffer_end=
1254 s->raw_packet_buffer_end= NULL;
1255 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1256 }
1257
1258 /*******************************************************/
1259 /* seek support */
1260
1261 int av_find_default_stream_index(AVFormatContext *s)
1262 {
1263 int first_audio_index = -1;
1264 int i;
1265 AVStream *st;
1266
1267 if (s->nb_streams <= 0)
1268 return -1;
1269 for(i = 0; i < s->nb_streams; i++) {
1270 st = s->streams[i];
1271 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1272 return i;
1273 }
1274 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1275 first_audio_index = i;
1276 }
1277 return first_audio_index >= 0 ? first_audio_index : 0;
1278 }
1279
1280 /**
1281 * Flush the frame reader.
1282 */
1283 void ff_read_frame_flush(AVFormatContext *s)
1284 {
1285 AVStream *st;
1286 int i, j;
1287
1288 flush_packet_queue(s);
1289
1290 s->cur_st = NULL;
1291
1292 /* for each stream, reset read state */
1293 for(i = 0; i < s->nb_streams; i++) {
1294 st = s->streams[i];
1295
1296 if (st->parser) {
1297 av_parser_close(st->parser);
1298 st->parser = NULL;
1299 av_free_packet(&st->cur_pkt);
1300 }
1301 st->last_IP_pts = AV_NOPTS_VALUE;
1302 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1303 st->reference_dts = AV_NOPTS_VALUE;
1304 /* fail safe */
1305 st->cur_ptr = NULL;
1306 st->cur_len = 0;
1307
1308 st->probe_packets = MAX_PROBE_PACKETS;
1309
1310 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1311 st->pts_buffer[j]= AV_NOPTS_VALUE;
1312 }
1313 }
1314
1315 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1316 int i;
1317
1318 for(i = 0; i < s->nb_streams; i++) {
1319 AVStream *st = s->streams[i];
1320
1321 st->cur_dts = av_rescale(timestamp,
1322 st->time_base.den * (int64_t)ref_st->time_base.num,
1323 st->time_base.num * (int64_t)ref_st->time_base.den);
1324 }
1325 }
1326
1327 void ff_reduce_index(AVFormatContext *s, int stream_index)
1328 {
1329 AVStream *st= s->streams[stream_index];
1330 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1331
1332 if((unsigned)st->nb_index_entries >= max_entries){
1333 int i;
1334 for(i=0; 2*i<st->nb_index_entries; i++)
1335 st->index_entries[i]= st->index_entries[2*i];
1336 st->nb_index_entries= i;
1337 }
1338 }
1339
1340 int av_add_index_entry(AVStream *st,
1341 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1342 {
1343 AVIndexEntry *entries, *ie;
1344 int index;
1345
1346 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1347 return -1;
1348
1349 entries = av_fast_realloc(st->index_entries,
1350 &st->index_entries_allocated_size,
1351 (st->nb_index_entries + 1) *
1352 sizeof(AVIndexEntry));
1353 if(!entries)
1354 return -1;
1355
1356 st->index_entries= entries;
1357
1358 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1359
1360 if(index<0){
1361 index= st->nb_index_entries++;
1362 ie= &entries[index];
1363 assert(index==0 || ie[-1].timestamp < timestamp);
1364 }else{
1365 ie= &entries[index];
1366 if(ie->timestamp != timestamp){
1367 if(ie->timestamp <= timestamp)
1368 return -1;
1369 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1370 st->nb_index_entries++;
1371 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1372 distance= ie->min_distance;
1373 }
1374
1375 ie->pos = pos;
1376 ie->timestamp = timestamp;
1377 ie->min_distance= distance;
1378 ie->size= size;
1379 ie->flags = flags;
1380
1381 return index;
1382 }
1383
1384 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1385 int flags)
1386 {
1387 AVIndexEntry *entries= st->index_entries;
1388 int nb_entries= st->nb_index_entries;
1389 int a, b, m;
1390 int64_t timestamp;
1391
1392 a = - 1;
1393 b = nb_entries;
1394
1395 //optimize appending index entries at the end
1396 if(b && entries[b-1].timestamp < wanted_timestamp)
1397 a= b-1;
1398
1399 while (b - a > 1) {
1400 m = (a + b) >> 1;
1401 timestamp = entries[m].timestamp;
1402 if(timestamp >= wanted_timestamp)
1403 b = m;
1404 if(timestamp <= wanted_timestamp)
1405 a = m;
1406 }
1407 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1408
1409 if(!(flags & AVSEEK_FLAG_ANY)){
1410 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1411 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1412 }
1413 }
1414
1415 if(m == nb_entries)
1416 return -1;
1417 return m;
1418 }
1419
1420 #define DEBUG_SEEK
1421
1422 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1423 AVInputFormat *avif= s->iformat;
1424 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1425 int64_t ts_min, ts_max, ts;
1426 int index;
1427 int64_t ret;
1428 AVStream *st;
1429
1430 if (stream_index < 0)
1431 return -1;
1432
1433 #ifdef DEBUG_SEEK
1434 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1435 #endif
1436
1437 ts_max=
1438 ts_min= AV_NOPTS_VALUE;
1439 pos_limit= -1; //gcc falsely says it may be uninitialized
1440
1441 st= s->streams[stream_index];
1442 if(st->index_entries){
1443 AVIndexEntry *e;
1444
1445 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1446 index= FFMAX(index, 0);
1447 e= &st->index_entries[index];
1448
1449 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1450 pos_min= e->pos;
1451 ts_min= e->timestamp;
1452 #ifdef DEBUG_SEEK
1453 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1454 pos_min,ts_min);
1455 #endif
1456 }else{
1457 assert(index==0);
1458 }
1459
1460 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1461 assert(index < st->nb_index_entries);
1462 if(index >= 0){
1463 e= &st->index_entries[index];
1464 assert(e->timestamp >= target_ts);
1465 pos_max= e->pos;
1466 ts_max= e->timestamp;
1467 pos_limit= pos_max - e->min_distance;
1468 #ifdef DEBUG_SEEK
1469 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1470 pos_max,pos_limit, ts_max);
1471 #endif
1472 }
1473 }
1474
1475 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1476 if(pos<0)
1477 return -1;
1478
1479 /* do the seek */
1480 if ((ret = url_fseek(s->pb, pos, SEEK_SET)) < 0)
1481 return ret;
1482
1483 av_update_cur_dts(s, st, ts);
1484
1485 return 0;
1486 }
1487
1488 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1489 int64_t pos, ts;
1490 int64_t start_pos, filesize;
1491 int no_change;
1492
1493 #ifdef DEBUG_SEEK
1494 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1495 #endif
1496
1497 if(ts_min == AV_NOPTS_VALUE){
1498 pos_min = s->data_offset;
1499 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1500 if (ts_min == AV_NOPTS_VALUE)
1501 return -1;
1502 }
1503
1504 if(ts_max == AV_NOPTS_VALUE){
1505 int step= 1024;
1506 filesize = url_fsize(s->pb);
1507 pos_max = filesize - 1;
1508 do{
1509 pos_max -= step;
1510 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1511 step += step;
1512 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1513 if (ts_max == AV_NOPTS_VALUE)
1514 return -1;
1515
1516 for(;;){
1517 int64_t tmp_pos= pos_max + 1;
1518 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1519 if(tmp_ts == AV_NOPTS_VALUE)
1520 break;
1521 ts_max= tmp_ts;
1522 pos_max= tmp_pos;
1523 if(tmp_pos >= filesize)
1524 break;
1525 }
1526 pos_limit= pos_max;
1527 }
1528
1529 if(ts_min > ts_max){
1530 return -1;
1531 }else if(ts_min == ts_max){
1532 pos_limit= pos_min;
1533 }
1534
1535 no_change=0;
1536 while (pos_min < pos_limit) {
1537 #ifdef DEBUG_SEEK
1538 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1539 pos_min, pos_max,
1540 ts_min, ts_max);
1541 #endif
1542 assert(pos_limit <= pos_max);
1543
1544 if(no_change==0){
1545 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1546 // interpolate position (better than dichotomy)
1547 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1548 + pos_min - approximate_keyframe_distance;
1549 }else if(no_change==1){
1550 // bisection, if interpolation failed to change min or max pos last time
1551 pos = (pos_min + pos_limit)>>1;
1552 }else{
1553 /* linear search if bisection failed, can only happen if there
1554 are very few or no keyframes between min/max */
1555 pos=pos_min;
1556 }
1557 if(pos <= pos_min)
1558 pos= pos_min + 1;
1559 else if(pos > pos_limit)
1560 pos= pos_limit;
1561 start_pos= pos;
1562
1563 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1564 if(pos == pos_max)
1565 no_change++;
1566 else
1567 no_change=0;
1568 #ifdef DEBUG_SEEK
1569 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1570 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1571 start_pos, no_change);
1572 #endif
1573 if(ts == AV_NOPTS_VALUE){
1574 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1575 return -1;
1576 }
1577 assert(ts != AV_NOPTS_VALUE);
1578 if (target_ts <= ts) {
1579 pos_limit = start_pos - 1;
1580 pos_max = pos;
1581 ts_max = ts;
1582 }
1583 if (target_ts >= ts) {
1584 pos_min = pos;
1585 ts_min = ts;
1586 }
1587 }
1588
1589 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1590 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1591 #ifdef DEBUG_SEEK
1592 pos_min = pos;
1593 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1594 pos_min++;
1595 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1596 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1597 pos, ts_min, target_ts, ts_max);
1598 #endif
1599 *ts_ret= ts;
1600 return pos;
1601 }
1602
1603 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1604 int64_t pos_min, pos_max;
1605 #if 0
1606 AVStream *st;
1607
1608 if (stream_index < 0)
1609 return -1;
1610
1611 st= s->streams[stream_index];
1612 #endif
1613
1614 pos_min = s->data_offset;
1615 pos_max = url_fsize(s->pb) - 1;
1616
1617 if (pos < pos_min) pos= pos_min;
1618 else if(pos > pos_max) pos= pos_max;
1619
1620 url_fseek(s->pb, pos, SEEK_SET);
1621
1622 #if 0
1623 av_update_cur_dts(s, st, ts);
1624 #endif
1625 return 0;
1626 }
1627
1628 static int av_seek_frame_generic(AVFormatContext *s,
1629 int stream_index, int64_t timestamp, int flags)
1630 {
1631 int index;
1632 int64_t ret;
1633 AVStream *st;
1634 AVIndexEntry *ie;
1635
1636 st = s->streams[stream_index];
1637
1638 index = av_index_search_timestamp(st, timestamp, flags);
1639
1640 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1641 return -1;
1642
1643 if(index < 0 || index==st->nb_index_entries-1){
1644 int i;
1645 AVPacket pkt;
1646
1647 if(st->nb_index_entries){
1648 assert(st->index_entries);
1649 ie= &st->index_entries[st->nb_index_entries-1];
1650 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1651 return ret;
1652 av_update_cur_dts(s, st, ie->timestamp);
1653 }else{
1654 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1655 return ret;
1656 }
1657 for(i=0;; i++) {
1658 int ret;
1659 do{
1660 ret = av_read_frame(s, &pkt);
1661 }while(ret == AVERROR(EAGAIN));
1662 if(ret<0)
1663 break;
1664 av_free_packet(&pkt);
1665 if(stream_index == pkt.stream_index){
1666 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1667 break;
1668 }
1669 }
1670 index = av_index_search_timestamp(st, timestamp, flags);
1671 }
1672 if (index < 0)
1673 return -1;
1674
1675 ff_read_frame_flush(s);
1676 if (s->iformat->read_seek){
1677 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1678 return 0;
1679 }
1680 ie = &st->index_entries[index];
1681 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1682 return ret;
1683 av_update_cur_dts(s, st, ie->timestamp);
1684
1685 return 0;
1686 }
1687
1688 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1689 {
1690 int ret;
1691 AVStream *st;
1692
1693 ff_read_frame_flush(s);
1694
1695 if(flags & AVSEEK_FLAG_BYTE)
1696 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1697
1698 if(stream_index < 0){
1699 stream_index= av_find_default_stream_index(s);
1700 if(stream_index < 0)
1701 return -1;
1702
1703 st= s->streams[stream_index];
1704 /* timestamp for default must be expressed in AV_TIME_BASE units */
1705 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1706 }
1707
1708 /* first, we try the format specific seek */
1709 if (s->iformat->read_seek)
1710 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1711 else
1712 ret = -1;
1713 if (ret >= 0) {
1714 return 0;
1715 }
1716
1717 if(s->iformat->read_timestamp)
1718 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1719 else
1720 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1721 }
1722
1723 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1724 {
1725 if(min_ts > ts || max_ts < ts)
1726 return -1;
1727
1728 ff_read_frame_flush(s);
1729
1730 if (s->iformat->read_seek2)
1731 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1732
1733 if(s->iformat->read_timestamp){
1734 //try to seek via read_timestamp()
1735 }
1736
1737 //Fallback to old API if new is not implemented but old is
1738 //Note the old has somewat different sematics
1739 if(s->iformat->read_seek || 1)
1740 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1741
1742 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1743 }
1744
1745 /*******************************************************/
1746
1747 /**
1748 * Return TRUE if the stream has accurate duration in any stream.
1749 *
1750 * @return TRUE if the stream has accurate duration for at least one component.
1751 */
1752 static int av_has_duration(AVFormatContext *ic)
1753 {
1754 int i;
1755 AVStream *st;
1756
1757 for(i = 0;i < ic->nb_streams; i++) {
1758 st = ic->streams[i];
1759 if (st->duration != AV_NOPTS_VALUE)
1760 return 1;
1761 }
1762 return 0;
1763 }
1764
1765 /**
1766 * Estimate the stream timings from the one of each components.
1767 *
1768 * Also computes the global bitrate if possible.
1769 */
1770 static void av_update_stream_timings(AVFormatContext *ic)
1771 {
1772 int64_t start_time, start_time1, end_time, end_time1;
1773 int64_t duration, duration1;
1774 int i;
1775 AVStream *st;
1776
1777 start_time = INT64_MAX;
1778 end_time = INT64_MIN;
1779 duration = INT64_MIN;
1780 for(i = 0;i < ic->nb_streams; i++) {
1781 st = ic->streams[i];
1782 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1783 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1784 if (start_time1 < start_time)
1785 start_time = start_time1;
1786 if (st->duration != AV_NOPTS_VALUE) {
1787 end_time1 = start_time1
1788 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1789 if (end_time1 > end_time)
1790 end_time = end_time1;
1791 }
1792 }
1793 if (st->duration != AV_NOPTS_VALUE) {
1794 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1795 if (duration1 > duration)
1796 duration = duration1;
1797 }
1798 }
1799 if (start_time != INT64_MAX) {
1800 ic->start_time = start_time;
1801 if (end_time != INT64_MIN) {
1802 if (end_time - start_time > duration)
1803 duration = end_time - start_time;
1804 }
1805 }
1806 if (duration != INT64_MIN) {
1807 ic->duration = duration;
1808 if (ic->file_size > 0) {
1809 /* compute the bitrate */
1810 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1811 (double)ic->duration;
1812 }
1813 }
1814 }
1815
1816 static void fill_all_stream_timings(AVFormatContext *ic)
1817 {
1818 int i;
1819 AVStream *st;
1820
1821 av_update_stream_timings(ic);
1822 for(i = 0;i < ic->nb_streams; i++) {
1823 st = ic->streams[i];
1824 if (st->start_time == AV_NOPTS_VALUE) {
1825 if(ic->start_time != AV_NOPTS_VALUE)
1826 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1827 if(ic->duration != AV_NOPTS_VALUE)
1828 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1829 }
1830 }
1831 }
1832
1833 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1834 {
1835 int64_t filesize, duration;
1836 int bit_rate, i;
1837 AVStream *st;
1838
1839 /* if bit_rate is already set, we believe it */
1840 if (ic->bit_rate == 0) {
1841 bit_rate = 0;
1842 for(i=0;i<ic->nb_streams;i++) {
1843 st = ic->streams[i];
1844 bit_rate += st->codec->bit_rate;
1845 }
1846 ic->bit_rate = bit_rate;
1847 }
1848
1849 /* if duration is already set, we believe it */
1850 if (ic->duration == AV_NOPTS_VALUE &&
1851 ic->bit_rate != 0 &&
1852 ic->file_size != 0) {
1853 filesize = ic->file_size;
1854 if (filesize > 0) {
1855 for(i = 0; i < ic->nb_streams; i++) {
1856 st = ic->streams[i];
1857 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1858 if (st->duration == AV_NOPTS_VALUE)
1859 st->duration = duration;
1860 }
1861 }
1862 }
1863 }
1864
1865 #define DURATION_MAX_READ_SIZE 250000
1866 #define DURATION_MAX_RETRY 3
1867
1868 /* only usable for MPEG-PS streams */
1869 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1870 {
1871 AVPacket pkt1, *pkt = &pkt1;
1872 AVStream *st;
1873 int read_size, i, ret;
1874 int64_t end_time, start_time[MAX_STREAMS];
1875 int64_t filesize, offset, duration;
1876 int retry=0;
1877
1878 ic->cur_st = NULL;
1879
1880 /* flush packet queue */
1881 flush_packet_queue(ic);
1882
1883 for(i=0;i<ic->nb_streams;i++) {
1884 st = ic->streams[i];
1885 if(st->start_time != AV_NOPTS_VALUE){
1886 start_time[i]= st->start_time;
1887 }else if(st->first_dts != AV_NOPTS_VALUE){
1888 start_time[i]= st->first_dts;
1889 }else
1890 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1891
1892 if (st->parser) {
1893 av_parser_close(st->parser);
1894 st->parser= NULL;
1895 av_free_packet(&st->cur_pkt);
1896 }
1897 }
1898
1899 /* estimate the end time (duration) */
1900 /* XXX: may need to support wrapping */
1901 filesize = ic->file_size;
1902 end_time = AV_NOPTS_VALUE;
1903 do{
1904 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1905 if (offset < 0)
1906 offset = 0;
1907
1908 url_fseek(ic->pb, offset, SEEK_SET);
1909 read_size = 0;
1910 for(;;) {
1911 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1912 break;
1913
1914 do{
1915 ret = av_read_packet(ic, pkt);
1916 }while(ret == AVERROR(EAGAIN));
1917 if (ret != 0)
1918 break;
1919 read_size += pkt->size;
1920 st = ic->streams[pkt->stream_index];
1921 if (pkt->pts != AV_NOPTS_VALUE &&
1922 start_time[pkt->stream_index] != AV_NOPTS_VALUE) {
1923 end_time = pkt->pts;
1924 duration = end_time - start_time[pkt->stream_index];
1925 if (duration < 0)
1926 duration += 1LL<<st->pts_wrap_bits;
1927 if (duration > 0) {
1928 if (st->duration == AV_NOPTS_VALUE ||
1929 st->duration < duration)
1930 st->duration = duration;
1931 }
1932 }
1933 av_free_packet(pkt);
1934 }
1935 }while( end_time==AV_NOPTS_VALUE
1936 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1937 && ++retry <= DURATION_MAX_RETRY);
1938
1939 fill_all_stream_timings(ic);
1940
1941 url_fseek(ic->pb, old_offset, SEEK_SET);
1942 for(i=0; i<ic->nb_streams; i++){
1943 st= ic->streams[i];
1944 st->cur_dts= st->first_dts;
1945 st->last_IP_pts = AV_NOPTS_VALUE;
1946 }
1947 }
1948
1949 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1950 {
1951 int64_t file_size;
1952
1953 /* get the file size, if possible */
1954 if (ic->iformat->flags & AVFMT_NOFILE) {
1955 file_size = 0;
1956 } else {
1957 file_size = url_fsize(ic->pb);
1958 if (file_size < 0)
1959 file_size = 0;
1960 }
1961 ic->file_size = file_size;
1962
1963 if ((!strcmp(ic->iformat->name, "mpeg") ||
1964 !strcmp(ic->iformat->name, "mpegts")) &&
1965 file_size && !url_is_streamed(ic->pb)) {
1966 /* get accurate estimate from the PTSes */
1967 av_estimate_timings_from_pts(ic, old_offset);
1968 } else if (av_has_duration(ic)) {
1969 /* at least one component has timings - we use them for all
1970 the components */
1971 fill_all_stream_timings(ic);
1972 } else {
1973 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
1974 /* less precise: use bitrate info */
1975 av_estimate_timings_from_bit_rate(ic);
1976 }
1977 av_update_stream_timings(ic);
1978
1979 #if 0
1980 {
1981 int i;
1982 AVStream *st;
1983 for(i = 0;i < ic->nb_streams; i++) {
1984 st = ic->streams[i];
1985 printf("%d: start_time: %0.3f duration: %0.3f\n",
1986 i, (double)st->start_time / AV_TIME_BASE,
1987 (double)st->duration / AV_TIME_BASE);
1988 }
1989 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1990 (double)ic->start_time / AV_TIME_BASE,
1991 (double)ic->duration / AV_TIME_BASE,
1992 ic->bit_rate / 1000);
1993 }
1994 #endif
1995 }
1996
1997 static int has_codec_parameters(AVCodecContext *enc)
1998 {
1999 int val;
2000 switch(enc->codec_type) {
2001 case AVMEDIA_TYPE_AUDIO:
2002 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
2003 if(!enc->frame_size &&
2004 (enc->codec_id == CODEC_ID_VORBIS ||
2005 enc->codec_id == CODEC_ID_AAC ||
2006 enc->codec_id == CODEC_ID_MP1 ||
2007 enc->codec_id == CODEC_ID_MP2 ||
2008 enc->codec_id == CODEC_ID_MP3 ||
2009 enc->codec_id == CODEC_ID_SPEEX))
2010 return 0;
2011 break;
2012 case AVMEDIA_TYPE_VIDEO:
2013 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
2014 break;
2015 default:
2016 val = 1;
2017 break;
2018 }
2019 return enc->codec_id != CODEC_ID_NONE && val != 0;
2020 }
2021
2022 static int has_decode_delay_been_guessed(AVStream *st)
2023 {
2024 return st->codec->codec_id != CODEC_ID_H264 ||
2025 st->codec_info_nb_frames >= 4 + st->codec->has_b_frames;
2026 }
2027
2028 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
2029 {
2030 int16_t *samples;
2031 AVCodec *codec;
2032 int got_picture, data_size, ret=0;
2033 AVFrame picture;
2034
2035 if(!st->codec->codec){
2036 codec = avcodec_find_decoder(st->codec->codec_id);
2037 if (!codec)
2038 return -1;
2039 ret = avcodec_open(st->codec, codec);
2040 if (ret < 0)
2041 return ret;
2042 }
2043
2044 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){
2045 switch(st->codec->codec_type) {
2046 case AVMEDIA_TYPE_VIDEO:
2047 avcodec_get_frame_defaults(&picture);
2048 ret = avcodec_decode_video2(st->codec, &picture,
2049 &got_picture, avpkt);
2050 break;
2051 case AVMEDIA_TYPE_AUDIO:
2052 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2053 samples = av_malloc(data_size);
2054 if (!samples)
2055 goto fail;
2056 ret = avcodec_decode_audio3(st->codec, samples,
2057 &data_size, avpkt);
2058 av_free(samples);
2059 break;
2060 default:
2061 break;
2062 }
2063 }
2064 fail:
2065 return ret;
2066 }
2067
2068 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2069 {
2070 while (tags->id != CODEC_ID_NONE) {
2071 if (tags->id == id)
2072 return tags->tag;
2073 tags++;
2074 }
2075 return 0;
2076 }
2077
2078 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2079 {
2080 int i;
2081 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2082 if(tag == tags[i].tag)
2083 return tags[i].id;
2084 }
2085 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2086 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag))
2087 return tags[i].id;
2088 }
2089 return CODEC_ID_NONE;
2090 }
2091
2092 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2093 {
2094 int i;
2095 for(i=0; tags && tags[i]; i++){
2096 int tag= ff_codec_get_tag(tags[i], id);
2097 if(tag) return tag;
2098 }
2099 return 0;
2100 }
2101
2102 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2103 {
2104 int i;
2105 for(i=0; tags && tags[i]; i++){
2106 enum CodecID id= ff_codec_get_id(tags[i], tag);
2107 if(id!=CODEC_ID_NONE) return id;
2108 }
2109 return CODEC_ID_NONE;
2110 }
2111
2112 static void compute_chapters_end(AVFormatContext *s)
2113 {
2114 unsigned int i;
2115
2116 for (i=0; i+1<s->nb_chapters; i++)
2117 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2118 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
2119 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
2120 s->chapters[i]->end = s->chapters[i+1]->start;
2121 }
2122
2123 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
2124 assert(s->start_time != AV_NOPTS_VALUE);
2125 assert(s->duration > 0);
2126 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
2127 AV_TIME_BASE_Q,
2128 s->chapters[i]->time_base);
2129 }
2130 }
2131
2132 #define MAX_STD_TIMEBASES (60*12+5)
2133 static int get_std_framerate(int i){
2134 if(i<60*12) return i*1001;
2135 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2136 }
2137
2138 /*
2139 * Is the time base unreliable.
2140 * This is a heuristic to balance between quick acceptance of the values in
2141 * the headers vs. some extra checks.
2142 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2143 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2144 * And there are "variable" fps files this needs to detect as well.
2145 */
2146 static int tb_unreliable(AVCodecContext *c){
2147 if( c->time_base.den >= 101L*c->time_base.num
2148 || c->time_base.den < 5L*c->time_base.num
2149 /* || c->codec_tag == AV_RL32("DIVX")
2150 || c->codec_tag == AV_RL32("XVID")*/
2151 || c->codec_id == CODEC_ID_MPEG2VIDEO
2152 || c->codec_id == CODEC_ID_H264
2153 )
2154 return 1;
2155 return 0;
2156 }
2157
2158 int av_find_stream_info(AVFormatContext *ic)
2159 {
2160 int i, count, ret, read_size, j;
2161 AVStream *st;
2162 AVPacket pkt1, *pkt;
2163 int64_t last_dts[MAX_STREAMS];
2164 int64_t duration_gcd[MAX_STREAMS]={0};
2165 int duration_count[MAX_STREAMS]={0};
2166 double (*duration_error)[MAX_STD_TIMEBASES];
2167 int64_t old_offset = url_ftell(ic->pb);
2168 int64_t codec_info_duration[MAX_STREAMS]={0};
2169
2170 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2171 if (!duration_error) return AVERROR(ENOMEM);
2172
2173 for(i=0;i<ic->nb_streams;i++) {
2174 st = ic->streams[i];
2175 if (st->codec->codec_id == CODEC_ID_AAC) {
2176 st->codec->sample_rate = 0;
2177 st->codec->frame_size = 0;
2178 st->codec->channels = 0;
2179 }
2180 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
2181 /* if(!st->time_base.num)
2182 st->time_base= */
2183 if(!st->codec->time_base.num)
2184 st->codec->time_base= st->time_base;
2185 }
2186 //only for the split stuff
2187 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2188 st->parser = av_parser_init(st->codec->codec_id);
2189 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2190 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2191 }
2192 }
2193 assert(!st->codec->codec);
2194 //try to just open decoders, in case this is enough to get parameters
2195 if(!has_codec_parameters(st->codec)){
2196 AVCodec *codec = avcodec_find_decoder(st->codec->codec_id);
2197 if (codec)
2198 avcodec_open(st->codec, codec);
2199 }
2200 }
2201
2202 for(i=0;i<MAX_STREAMS;i++){
2203 last_dts[i]= AV_NOPTS_VALUE;
2204 }
2205
2206 count = 0;
2207 read_size = 0;
2208 for(;;) {
2209 if(url_interrupt_cb()){
2210 ret= AVERROR(EINTR);
2211 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2212 break;
2213 }
2214
2215 /* check if one codec still needs to be handled */
2216 for(i=0;i<ic->nb_streams;i++) {
2217 st = ic->streams[i];
2218 if (!has_codec_parameters(st->codec))
2219 break;
2220 /* variable fps and no guess at the real fps */
2221 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2222 && duration_count[i]<20 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2223 break;
2224 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2225 break;
2226 if(st->first_dts == AV_NOPTS_VALUE)
2227 break;
2228 }
2229 if (i == ic->nb_streams) {
2230 /* NOTE: if the format has no header, then we need to read
2231 some packets to get most of the streams, so we cannot
2232 stop here */
2233 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2234 /* if we found the info for all the codecs, we can stop */
2235 ret = count;
2236 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2237 break;
2238 }
2239 }
2240 /* we did not get all the codec info, but we read too much data */
2241 if (read_size >= ic->probesize) {
2242 ret = count;
2243 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2244 break;
2245 }
2246
2247 /* NOTE: a new stream can be added there if no header in file
2248 (AVFMTCTX_NOHEADER) */
2249 ret = av_read_frame_internal(ic, &pkt1);
2250 if(ret == AVERROR(EAGAIN))
2251 continue;
2252 if (ret < 0) {
2253 /* EOF or error */
2254 ret = -1; /* we could not have all the codec parameters before EOF */
2255 for(i=0;i<ic->nb_streams;i++) {
2256 st = ic->streams[i];
2257 if (!has_codec_parameters(st->codec)){
2258 char buf[256];
2259 avcodec_string(buf, sizeof(buf), st->codec, 0);
2260 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2261 } else {
2262 ret = 0;
2263 }
2264 }
2265 break;
2266 }
2267
2268 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2269 if(av_dup_packet(pkt) < 0) {
2270 av_free(duration_error);
2271 return AVERROR(ENOMEM);
2272 }
2273
2274 read_size += pkt->size;
2275
2276 st = ic->streams[pkt->stream_index];
2277 if(st->codec_info_nb_frames>1) {
2278 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
2279 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2280 break;
2281 }
2282 codec_info_duration[st->index] += pkt->duration;
2283 }
2284 {
2285 int index= pkt->stream_index;
2286 int64_t last= last_dts[index];
2287 int64_t duration= pkt->dts - last;
2288
2289 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2290 double dur= duration * av_q2d(st->time_base);
2291
2292 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2293 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2294 if(duration_count[index] < 2)
2295 memset(duration_error[index], 0, sizeof(*duration_error));
2296 for(i=1; i<MAX_STD_TIMEBASES; i++){
2297 int framerate= get_std_framerate(i);
2298 int ticks= lrintf(dur*framerate/(1001*12));
2299 double error= dur - ticks*1001*12/(double)framerate;
2300 duration_error[index][i] += error*error;
2301 }
2302 duration_count[index]++;
2303 // ignore the first 4 values, they might have some random jitter
2304 if (duration_count[index] > 3)
2305 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2306 }
2307 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2308 last_dts[pkt->stream_index]= pkt->dts;
2309 }
2310 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2311 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2312 if(i){
2313 st->codec->extradata_size= i;
2314 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2315 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2316 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2317 }
2318 }
2319
2320 /* if still no information, we try to open the codec and to
2321 decompress the frame. We try to avoid that in most cases as
2322 it takes longer and uses more memory. For MPEG-4, we need to
2323 decompress for QuickTime. */
2324 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st))
2325 try_decode_frame(st, pkt);
2326
2327 st->codec_info_nb_frames++;
2328 count++;
2329 }
2330
2331 // close codecs which were opened in try_decode_frame()
2332 for(i=0;i<ic->nb_streams;i++) {
2333 st = ic->streams[i];
2334 if(st->codec->codec)
2335 avcodec_close(st->codec);
2336 }
2337 for(i=0;i<ic->nb_streams;i++) {
2338 st = ic->streams[i];
2339 if(st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && codec_info_duration[i])
2340 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2341 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2342 codec_info_duration[i] *(int64_t)st->time_base.num, 60000);
2343 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2344 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2345 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2346
2347 // the check for tb_unreliable() is not completely correct, since this is not about handling
2348 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2349 // ipmovie.c produces.
2350 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1 && !st->r_frame_rate.num)
2351 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2352 if(duration_count[i] && !st->r_frame_rate.num
2353 && tb_unreliable(st->codec) /*&&
2354 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2355 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2356 int num = 0;
2357 double best_error= 2*av_q2d(st->time_base);
2358 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2359
2360 for(j=1; j<MAX_STD_TIMEBASES; j++){
2361 double error= duration_error[i][j] * get_std_framerate(j);
2362 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2363 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2364 if(error < best_error){
2365 best_error= error;
2366 num = get_std_framerate(j);
2367 }
2368 }
2369 // do not increase frame rate by more than 1 % in order to match a standard rate.
2370 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2371 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2372 }
2373
2374 if (!st->r_frame_rate.num){
2375 if( st->codec->time_base.den * (int64_t)st->time_base.num
2376 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2377 st->r_frame_rate.num = st->codec->time_base.den;
2378 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2379 }else{
2380 st->r_frame_rate.num = st->time_base.den;
2381 st->r_frame_rate.den = st->time_base.num;
2382 }
2383 }
2384 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2385 if(!st->codec->bits_per_coded_sample)
2386 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2387 }
2388 }
2389
2390 av_estimate_timings(ic, old_offset);
2391
2392 compute_chapters_end(ic);
2393
2394 #if 0
2395 /* correct DTS for B-frame streams with no timestamps */
2396 for(i=0;i<ic->nb_streams;i++) {
2397 st = ic->streams[i];
2398 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2399 if(b-frames){
2400 ppktl = &ic->packet_buffer;
2401 while(ppkt1){
2402 if(ppkt1->stream_index != i)
2403 continue;
2404 if(ppkt1->pkt->dts < 0)
2405 break;
2406 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2407 break;
2408 ppkt1->pkt->dts -= delta;
2409 ppkt1= ppkt1->next;
2410 }
2411 if(ppkt1)
2412 continue;
2413 st->cur_dts -= delta;
2414 }
2415 }
2416 }
2417 #endif
2418
2419 av_free(duration_error);
2420
2421 return ret;
2422 }
2423
2424 /*******************************************************/
2425
2426 int av_read_play(AVFormatContext *s)
2427 {
2428 if (s->iformat->read_play)
2429 return s->iformat->read_play(s);
2430 if (s->pb)
2431 return av_url_read_fpause(s->pb, 0);
2432 return AVERROR(ENOSYS);
2433 }
2434
2435 int av_read_pause(AVFormatContext *s)
2436 {
2437 if (s->iformat->read_pause)
2438 return s->iformat->read_pause(s);
2439 if (s->pb)
2440 return av_url_read_fpause(s->pb, 1);
2441 return AVERROR(ENOSYS);
2442 }
2443
2444 void av_close_input_stream(AVFormatContext *s)
2445 {
2446 int i;
2447 AVStream *st;
2448
2449 if (s->iformat->read_close)
2450 s->iformat->read_close(s);
2451 for(i=0;i<s->nb_streams;i++) {
2452 /* free all data in a stream component */
2453 st = s->streams[i];
2454 if (st->parser) {
2455 av_parser_close(st->parser);
2456 av_free_packet(&st->cur_pkt);
2457 }
2458 av_metadata_free(&st->metadata);
2459 av_free(st->index_entries);
2460 av_free(st->codec->extradata);
2461 av_free(st->codec);
2462 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2463 av_free(st->filename);
2464 #endif
2465 av_free(st->priv_data);
2466 av_free(st);
2467 }
2468 for(i=s->nb_programs-1; i>=0; i--) {
2469 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2470 av_freep(&s->programs[i]->provider_name);
2471 av_freep(&s->programs[i]->name);
2472 #endif
2473 av_metadata_free(&s->programs[i]->metadata);
2474 av_freep(&s->programs[i]->stream_index);
2475 av_freep(&s->programs[i]);
2476 }
2477 av_freep(&s->programs);
2478 flush_packet_queue(s);
2479 av_freep(&s->priv_data);
2480 while(s->nb_chapters--) {
2481 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2482 av_free(s->chapters[s->nb_chapters]->title);
2483 #endif
2484 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2485 av_free(s->chapters[s->nb_chapters]);
2486 }
2487 av_freep(&s->chapters);
2488 av_metadata_free(&s->metadata);
2489 av_free(s);
2490 }
2491
2492 void av_close_input_file(AVFormatContext *s)
2493 {
2494 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2495 av_close_input_stream(s);
2496 if (pb)
2497 url_fclose(pb);
2498 }
2499
2500 AVStream *av_new_stream(AVFormatContext *s, int id)
2501 {
2502 AVStream *st;
2503 int i;
2504
2505 if (s->nb_streams >= MAX_STREAMS){
2506 av_log(s, AV_LOG_ERROR, "Too many streams\n");
2507 return NULL;
2508 }
2509
2510 st = av_mallocz(sizeof(AVStream));
2511 if (!st)
2512 return NULL;
2513
2514 st->codec= avcodec_alloc_context();
2515 if (s->iformat) {
2516 /* no default bitrate if decoding */
2517 st->codec->bit_rate = 0;
2518 }
2519 st->index = s->nb_streams;
2520 st->id = id;
2521 st->start_time = AV_NOPTS_VALUE;
2522 st->duration = AV_NOPTS_VALUE;
2523 /* we set the current DTS to 0 so that formats without any timestamps
2524 but durations get some timestamps, formats with some unknown
2525 timestamps have their first few packets buffered and the
2526 timestamps corrected before they are returned to the user */
2527 st->cur_dts = 0;
2528 st->first_dts = AV_NOPTS_VALUE;
2529 st->probe_packets = MAX_PROBE_PACKETS;
2530
2531 /* default pts setting is MPEG-like */
2532 av_set_pts_info(st, 33, 1, 90000);
2533 st->last_IP_pts = AV_NOPTS_VALUE;
2534 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2535 st->pts_buffer[i]= AV_NOPTS_VALUE;
2536 st->reference_dts = AV_NOPTS_VALUE;
2537
2538 st->sample_aspect_ratio = (AVRational){0,1};
2539
2540 s->streams[s->nb_streams++] = st;
2541 return st;
2542 }
2543
2544 AVProgram *av_new_program(AVFormatContext *ac, int id)
2545 {
2546 AVProgram *program=NULL;
2547 int i;
2548
2549 #ifdef DEBUG_SI
2550 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2551 #endif
2552
2553 for(i=0; i<ac->nb_programs; i++)
2554 if(ac->programs[i]->id == id)
2555 program = ac->programs[i];
2556
2557 if(!program){
2558 program = av_mallocz(sizeof(AVProgram));
2559 if (!program)
2560 return NULL;
2561 dynarray_add(&ac->programs, &ac->nb_programs, program);
2562 program->discard = AVDISCARD_NONE;
2563 }
2564 program->id = id;
2565
2566 return program;
2567 }
2568
2569 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2570 {
2571 AVChapter *chapter = NULL;
2572 int i;
2573
2574 for(i=0; i<s->nb_chapters; i++)
2575 if(s->chapters[i]->id == id)
2576 chapter = s->chapters[i];
2577
2578 if(!chapter){
2579 chapter= av_mallocz(sizeof(AVChapter));
2580 if(!chapter)
2581 return NULL;
2582 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2583 }
2584 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2585 av_free(chapter->title);
2586 #endif
2587 av_metadata_set2(&chapter->metadata, "title", title, 0);
2588 chapter->id = id;
2589 chapter->time_base= time_base;
2590 chapter->start = start;
2591 chapter->end = end;
2592
2593 return chapter;
2594 }
2595
2596 /************************************************************/
2597 /* output media file */
2598
2599 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2600 {
2601 int ret;
2602
2603 if (s->oformat->priv_data_size > 0) {
2604 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2605 if (!s->priv_data)
2606 return AVERROR(ENOMEM);
2607 } else
2608 s->priv_data = NULL;
2609
2610 if (s->oformat->set_parameters) {
2611 ret = s->oformat->set_parameters(s, ap);
2612 if (ret < 0)
2613 return ret;
2614 }
2615 return 0;
2616 }
2617
2618 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2619 {
2620 const AVCodecTag *avctag;
2621 int n;
2622 enum CodecID id = CODEC_ID_NONE;
2623 unsigned int tag = 0;
2624
2625 /**
2626 * Check that tag + id is in the table
2627 * If neither is in the table -> OK
2628 * If tag is in the table with another id -> FAIL
2629 * If id is in the table with another tag -> FAIL unless strict < normal
2630 */
2631 for (n = 0; s->oformat->codec_tag[n]; n++) {
2632 avctag = s->oformat->codec_tag[n];
2633 while (avctag->id != CODEC_ID_NONE) {
2634 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) {
2635 id = avctag->id;
2636 if (id == st->codec->codec_id)
2637 return 1;
2638 }
2639 if (avctag->id == st->codec->codec_id)
2640 tag = avctag->tag;
2641 avctag++;
2642 }
2643 }
2644 if (id != CODEC_ID_NONE)
2645 return 0;
2646 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2647 return 0;
2648 return 1;
2649 }
2650
2651 int av_write_header(AVFormatContext *s)
2652 {
2653 int ret, i;
2654 AVStream *st;
2655
2656 // some sanity checks
2657 if (s->nb_streams == 0) {
2658 av_log(s, AV_LOG_ERROR, "no streams\n");
2659 return AVERROR(EINVAL);
2660 }
2661
2662 for(i=0;i<s->nb_streams;i++) {
2663 st = s->streams[i];
2664
2665 switch (st->codec->codec_type) {
2666 case AVMEDIA_TYPE_AUDIO:
2667 if(st->codec->sample_rate<=0){
2668 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2669 return AVERROR(EINVAL);
2670 }
2671 if(!st->codec->block_align)
2672 st->codec->block_align = st->codec->channels *
2673 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2674 break;
2675 case AVMEDIA_TYPE_VIDEO:
2676 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2677 av_log(s, AV_LOG_ERROR, "time base not set\n");
2678 return AVERROR(EINVAL);
2679 }
2680 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2681 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2682 return AVERROR(EINVAL);
2683 }
2684 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2685 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2686 return AVERROR(EINVAL);
2687 }
2688 break;
2689 }
2690
2691 if(s->oformat->codec_tag){
2692 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2693 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2694 st->codec->codec_tag= 0;
2695 }
2696 if(st->codec->codec_tag){
2697 if (!validate_codec_tag(s, st)) {
2698 char tagbuf[32];
2699 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2700 av_log(s, AV_LOG_ERROR,
2701 "Tag %s/0x%08x incompatible with output codec '%s'\n",
2702 tagbuf, st->codec->codec_tag, st->codec->codec->name);
2703 return AVERROR_INVALIDDATA;
2704 }
2705 }else
2706 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2707 }
2708
2709 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2710 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2711 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2712 }
2713
2714 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2715 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2716 if (!s->priv_data)
2717 return AVERROR(ENOMEM);
2718 }
2719
2720 #if LIBAVFORMAT_VERSION_MAJOR < 53
2721 ff_metadata_mux_compat(s);
2722 #endif
2723
2724 /* set muxer identification string */
2725 if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2726 AVMetadata *m;
2727 AVMetadataTag *t;
2728
2729 if (!(m = av_mallocz(sizeof(AVMetadata))))
2730 return AVERROR(ENOMEM);
2731 av_metadata_set2(&m, "encoder", LIBAVFORMAT_IDENT, 0);
2732 metadata_conv(&m, s->oformat->metadata_conv, NULL);
2733 if ((t = av_metadata_get(m, "", NULL, AV_METADATA_IGNORE_SUFFIX)))
2734 av_metadata_set2(&s->metadata, t->key, t->value, 0);
2735 av_metadata_free(&m);
2736 }
2737
2738 if(s->oformat->write_header){
2739 ret = s->oformat->write_header(s);
2740 if (ret < 0)
2741 return ret;
2742 }
2743
2744 /* init PTS generation */
2745 for(i=0;i<s->nb_streams;i++) {
2746 int64_t den = AV_NOPTS_VALUE;
2747 st = s->streams[i];
2748
2749 switch (st->codec->codec_type) {
2750 case AVMEDIA_TYPE_AUDIO:
2751 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2752 break;
2753 case AVMEDIA_TYPE_VIDEO:
2754 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2755 break;
2756 default:
2757 break;
2758 }
2759 if (den != AV_NOPTS_VALUE) {
2760 if (den <= 0)
2761 return AVERROR_INVALIDDATA;
2762 av_frac_init(&st->pts, 0, 0, den);
2763 }
2764 }
2765 return 0;
2766 }
2767
2768 //FIXME merge with compute_pkt_fields
2769 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2770 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2771 int num, den, frame_size, i;
2772
2773 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2774
2775 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2776 return -1;*/
2777
2778 /* duration field */
2779 if (pkt->duration == 0) {
2780 compute_frame_duration(&num, &den, st, NULL, pkt);
2781 if (den && num) {
2782 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2783 }
2784 }
2785
2786 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2787 pkt->pts= pkt->dts;
2788
2789 //XXX/FIXME this is a temporary hack until all encoders output pts
2790 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2791 pkt->dts=
2792 // pkt->pts= st->cur_dts;
2793 pkt->pts= st->pts.val;
2794 }
2795
2796 //calculate dts from pts
2797 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2798 st->pts_buffer[0]= pkt->pts;
2799 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2800 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2801 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2802 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2803
2804 pkt->dts= st->pts_buffer[0];
2805 }
2806
2807 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2808 av_log(s, AV_LOG_ERROR,
2809 "st:%d error, non monotone timestamps %"PRId64" >= %"PRId64"\n",
2810 st->index, st->cur_dts, pkt->dts);
2811 return -1;
2812 }
2813 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2814 av_log(s, AV_LOG_ERROR, "st:%d error, pts < dts\n", st->index);
2815 return -1;
2816 }
2817
2818 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2819 st->cur_dts= pkt->dts;
2820 st->pts.val= pkt->dts;
2821
2822 /* update pts */
2823 switch (st->codec->codec_type) {
2824 case AVMEDIA_TYPE_AUDIO:
2825 frame_size = get_audio_frame_size(st->codec, pkt->size);
2826
2827 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2828 likely equal to the encoder delay, but it would be better if we
2829 had the real timestamps from the encoder */
2830 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2831 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2832 }
2833 break;
2834 case AVMEDIA_TYPE_VIDEO:
2835 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2836 break;
2837 default:
2838 break;
2839 }
2840 return 0;
2841 }
2842
2843 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2844 {
2845 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2846
2847 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2848 return ret;
2849
2850 ret= s->oformat->write_packet(s, pkt);
2851 if(!ret)
2852 ret= url_ferror(s->pb);
2853 return ret;
2854 }
2855
2856 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2857 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2858 {
2859 AVPacketList **next_point, *this_pktl;
2860
2861 this_pktl = av_mallocz(sizeof(AVPacketList));
2862 this_pktl->pkt= *pkt;
2863 pkt->destruct= NULL; // do not free original but only the copy
2864 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2865
2866 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
2867 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
2868 }else
2869 next_point = &s->packet_buffer;
2870
2871 if(*next_point){
2872 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
2873 while(!compare(s, &(*next_point)->pkt, pkt)){
2874 next_point= &(*next_point)->next;
2875 }
2876 goto next_non_null;
2877 }else{
2878 next_point = &(s->packet_buffer_end->next);
2879 }
2880 }
2881 assert(!*next_point);
2882
2883 s->packet_buffer_end= this_pktl;
2884 next_non_null:
2885
2886 this_pktl->next= *next_point;
2887
2888 s->streams[pkt->stream_index]->last_in_packet_buffer=
2889 *next_point= this_pktl;
2890 }
2891
2892 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2893 {
2894 AVStream *st = s->streams[ pkt ->stream_index];
2895 AVStream *st2= s->streams[ next->stream_index];
2896 int64_t a= st2->time_base.num * (int64_t)st ->time_base.den;
2897 int64_t b= st ->time_base.num * (int64_t)st2->time_base.den;
2898 return av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN) < next->dts;
2899 }
2900
2901 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2902 AVPacketList *pktl;
2903 int stream_count=0;
2904 int i;
2905
2906 if(pkt){
2907 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2908 }
2909
2910 for(i=0; i < s->nb_streams; i++)
2911 stream_count+= !!s->streams[i]->last_in_packet_buffer;
2912
2913 if(stream_count && (s->nb_streams == stream_count || flush)){
2914 pktl= s->packet_buffer;
2915 *out= pktl->pkt;
2916
2917 s->packet_buffer= pktl->next;
2918 if(!s->packet_buffer)
2919 s->packet_buffer_end= NULL;
2920
2921 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
2922 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
2923 av_freep(&pktl);
2924 return 1;
2925 }else{
2926 av_init_packet(out);
2927 return 0;
2928 }
2929 }
2930
2931 /**
2932 * Interleave an AVPacket correctly so it can be muxed.
2933 * @param out the interleaved packet will be output here
2934 * @param in the input packet
2935 * @param flush 1 if no further packets are available as input and all
2936 * remaining packets should be output
2937 * @return 1 if a packet was output, 0 if no packet could be output,
2938 * < 0 if an error occurred
2939 */
2940 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2941 if(s->oformat->interleave_packet)
2942 return s->oformat->interleave_packet(s, out, in, flush);
2943 else
2944 return av_interleave_packet_per_dts(s, out, in, flush);
2945 }
2946
2947 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2948 AVStream *st= s->streams[ pkt->stream_index];
2949
2950 //FIXME/XXX/HACK drop zero sized packets
2951 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
2952 return 0;
2953
2954 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2955 if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2956 return -1;
2957
2958 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2959 return -1;
2960
2961 for(;;){
2962 AVPacket opkt;
2963 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2964 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2965 return ret;
2966
2967 ret= s->oformat->write_packet(s, &opkt);
2968
2969 av_free_packet(&opkt);
2970 pkt= NULL;
2971
2972 if(ret<0)
2973 return ret;
2974 if(url_ferror(s->pb))
2975 return url_ferror(s->pb);
2976 }
2977 }
2978
2979 int av_write_trailer(AVFormatContext *s)
2980 {
2981 int ret, i;
2982
2983 for(;;){
2984 AVPacket pkt;
2985 ret= av_interleave_packet(s, &pkt, NULL, 1);
2986 if(ret<0) //FIXME cleanup needed for ret<0 ?
2987 goto fail;
2988 if(!ret)
2989 break;
2990
2991 ret= s->oformat->write_packet(s, &pkt);
2992
2993 av_free_packet(&pkt);
2994
2995 if(ret<0)
2996 goto fail;
2997 if(url_ferror(s->pb))
2998 goto fail;
2999 }
3000
3001 if(s->oformat->write_trailer)
3002 ret = s->oformat->write_trailer(s);
3003 fail:
3004 if(ret == 0)
3005 ret=url_ferror(s->pb);
3006 for(i=0;i<s->nb_streams;i++) {
3007 av_freep(&s->streams[i]->priv_data);
3008 av_freep(&s->streams[i]->index_entries);
3009 }
3010 av_freep(&s->priv_data);
3011 return ret;
3012 }
3013
3014 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3015 {
3016 int i, j;
3017 AVProgram *program=NULL;
3018 void *tmp;
3019
3020 if (idx >= ac->nb_streams) {
3021 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3022 return;
3023 }
3024
3025 for(i=0; i<ac->nb_programs; i++){
3026 if(ac->programs[i]->id != progid)
3027 continue;
3028 program = ac->programs[i];
3029 for(j=0; j<program->nb_stream_indexes; j++)
3030 if(program->stream_index[j] == idx)
3031 return;
3032
3033 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3034 if(!tmp)
3035 return;
3036 program->stream_index = tmp;
3037 program->stream_index[program->nb_stream_indexes++] = idx;
3038 return;
3039 }
3040 }
3041
3042 static void print_fps(double d, const char *postfix){
3043 uint64_t v= lrintf(d*100);
3044 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3045 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3046 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3047 }
3048
3049 static void dump_metadata(void *ctx, AVMetadata *m, const char *indent)
3050 {
3051 if(m && !(m->count == 1 && av_metadata_get(m, "language", NULL, 0))){
3052 AVMetadataTag *tag=NULL;
3053
3054 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3055 while((tag=av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
3056 if(strcmp("language", tag->key))
3057 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3058 }
3059 }
3060 }
3061
3062 /* "user interface" functions */
3063 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3064 {
3065 char buf[256];
3066 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3067 AVStream *st = ic->streams[i];
3068 int g = av_gcd(st->time_base.num, st->time_base.den);
3069 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
3070 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3071 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3072 /* the pid is an important information, so we display it */
3073 /* XXX: add a generic system */
3074 if (flags & AVFMT_SHOW_IDS)
3075 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3076 if (lang)
3077 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3078 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3079 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3080 if (st->sample_aspect_ratio.num && // default
3081 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3082 AVRational display_aspect_ratio;
3083 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3084 st->codec->width*st->sample_aspect_ratio.num,
3085 st->codec->height*st->sample_aspect_ratio.den,
3086 1024*1024);
3087 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3088 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3089 display_aspect_ratio.num, display_aspect_ratio.den);
3090 }
3091 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3092 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3093 print_fps(av_q2d(st->avg_frame_rate), "fps");
3094 if(st->r_frame_rate.den && st->r_frame_rate.num)
3095 print_fps(av_q2d(st->r_frame_rate), "tbr");
3096 if(st->time_base.den && st->time_base.num)
3097 print_fps(1/av_q2d(st->time_base), "tbn");
3098 if(st->codec->time_base.den && st->codec->time_base.num)
3099 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3100 }
3101 av_log(NULL, AV_LOG_INFO, "\n");
3102 dump_metadata(NULL, st->metadata, " ");
3103 }
3104
3105 void dump_format(AVFormatContext *ic,
3106 int index,
3107 const char *url,
3108 int is_output)