78139764c058159d58e9c4925176262596203c08
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* #define DEBUG */
23
24 #include "avformat.h"
25 #include "avio_internal.h"
26 #include "internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/pixdesc.h"
31 #include "metadata.h"
32 #include "id3v2.h"
33 #include "libavutil/avstring.h"
34 #include "libavutil/mathematics.h"
35 #include "riff.h"
36 #include "audiointerleave.h"
37 #include "url.h"
38 #include <sys/time.h>
39 #include <time.h>
40 #include <stdarg.h>
41 #if CONFIG_NETWORK
42 #include "network.h"
43 #endif
44
45 #undef NDEBUG
46 #include <assert.h>
47
48 /**
49 * @file
50 * various utility functions for use within Libav
51 */
52
53 unsigned avformat_version(void)
54 {
55 return LIBAVFORMAT_VERSION_INT;
56 }
57
58 const char *avformat_configuration(void)
59 {
60 return LIBAV_CONFIGURATION;
61 }
62
63 const char *avformat_license(void)
64 {
65 #define LICENSE_PREFIX "libavformat license: "
66 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
67 }
68
69 /* fraction handling */
70
71 /**
72 * f = val + (num / den) + 0.5.
73 *
74 * 'num' is normalized so that it is such as 0 <= num < den.
75 *
76 * @param f fractional number
77 * @param val integer value
78 * @param num must be >= 0
79 * @param den must be >= 1
80 */
81 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
82 {
83 num += (den >> 1);
84 if (num >= den) {
85 val += num / den;
86 num = num % den;
87 }
88 f->val = val;
89 f->num = num;
90 f->den = den;
91 }
92
93 /**
94 * Fractional addition to f: f = f + (incr / f->den).
95 *
96 * @param f fractional number
97 * @param incr increment, can be positive or negative
98 */
99 static void frac_add(AVFrac *f, int64_t incr)
100 {
101 int64_t num, den;
102
103 num = f->num + incr;
104 den = f->den;
105 if (num < 0) {
106 f->val += num / den;
107 num = num % den;
108 if (num < 0) {
109 num += den;
110 f->val--;
111 }
112 } else if (num >= den) {
113 f->val += num / den;
114 num = num % den;
115 }
116 f->num = num;
117 }
118
119 /** head of registered input format linked list */
120 static AVInputFormat *first_iformat = NULL;
121 /** head of registered output format linked list */
122 static AVOutputFormat *first_oformat = NULL;
123
124 AVInputFormat *av_iformat_next(AVInputFormat *f)
125 {
126 if(f) return f->next;
127 else return first_iformat;
128 }
129
130 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
131 {
132 if(f) return f->next;
133 else return first_oformat;
134 }
135
136 void av_register_input_format(AVInputFormat *format)
137 {
138 AVInputFormat **p;
139 p = &first_iformat;
140 while (*p != NULL) p = &(*p)->next;
141 *p = format;
142 format->next = NULL;
143 }
144
145 void av_register_output_format(AVOutputFormat *format)
146 {
147 AVOutputFormat **p;
148 p = &first_oformat;
149 while (*p != NULL) p = &(*p)->next;
150 *p = format;
151 format->next = NULL;
152 }
153
154 int av_match_ext(const char *filename, const char *extensions)
155 {
156 const char *ext, *p;
157 char ext1[32], *q;
158
159 if(!filename)
160 return 0;
161
162 ext = strrchr(filename, '.');
163 if (ext) {
164 ext++;
165 p = extensions;
166 for(;;) {
167 q = ext1;
168 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
169 *q++ = *p++;
170 *q = '\0';
171 if (!av_strcasecmp(ext1, ext))
172 return 1;
173 if (*p == '\0')
174 break;
175 p++;
176 }
177 }
178 return 0;
179 }
180
181 static int match_format(const char *name, const char *names)
182 {
183 const char *p;
184 int len, namelen;
185
186 if (!name || !names)
187 return 0;
188
189 namelen = strlen(name);
190 while ((p = strchr(names, ','))) {
191 len = FFMAX(p - names, namelen);
192 if (!av_strncasecmp(name, names, len))
193 return 1;
194 names = p+1;
195 }
196 return !av_strcasecmp(name, names);
197 }
198
199 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
200 const char *mime_type)
201 {
202 AVOutputFormat *fmt = NULL, *fmt_found;
203 int score_max, score;
204
205 /* specific test for image sequences */
206 #if CONFIG_IMAGE2_MUXER
207 if (!short_name && filename &&
208 av_filename_number_test(filename) &&
209 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
210 return av_guess_format("image2", NULL, NULL);
211 }
212 #endif
213 /* Find the proper file type. */
214 fmt_found = NULL;
215 score_max = 0;
216 while ((fmt = av_oformat_next(fmt))) {
217 score = 0;
218 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
219 score += 100;
220 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
221 score += 10;
222 if (filename && fmt->extensions &&
223 av_match_ext(filename, fmt->extensions)) {
224 score += 5;
225 }
226 if (score > score_max) {
227 score_max = score;
228 fmt_found = fmt;
229 }
230 }
231 return fmt_found;
232 }
233
234 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
235 const char *filename, const char *mime_type, enum AVMediaType type){
236 if(type == AVMEDIA_TYPE_VIDEO){
237 enum CodecID codec_id= CODEC_ID_NONE;
238
239 #if CONFIG_IMAGE2_MUXER
240 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
241 codec_id= ff_guess_image2_codec(filename);
242 }
243 #endif
244 if(codec_id == CODEC_ID_NONE)
245 codec_id= fmt->video_codec;
246 return codec_id;
247 }else if(type == AVMEDIA_TYPE_AUDIO)
248 return fmt->audio_codec;
249 else if (type == AVMEDIA_TYPE_SUBTITLE)
250 return fmt->subtitle_codec;
251 else
252 return CODEC_ID_NONE;
253 }
254
255 AVInputFormat *av_find_input_format(const char *short_name)
256 {
257 AVInputFormat *fmt = NULL;
258 while ((fmt = av_iformat_next(fmt))) {
259 if (match_format(short_name, fmt->name))
260 return fmt;
261 }
262 return NULL;
263 }
264
265
266 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
267 {
268 int ret= av_new_packet(pkt, size);
269
270 if(ret<0)
271 return ret;
272
273 pkt->pos= avio_tell(s);
274
275 ret= avio_read(s, pkt->data, size);
276 if(ret<=0)
277 av_free_packet(pkt);
278 else
279 av_shrink_packet(pkt, ret);
280
281 return ret;
282 }
283
284 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
285 {
286 int ret;
287 int old_size;
288 if (!pkt->size)
289 return av_get_packet(s, pkt, size);
290 old_size = pkt->size;
291 ret = av_grow_packet(pkt, size);
292 if (ret < 0)
293 return ret;
294 ret = avio_read(s, pkt->data + old_size, size);
295 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
296 return ret;
297 }
298
299
300 int av_filename_number_test(const char *filename)
301 {
302 char buf[1024];
303 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
304 }
305
306 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
307 {
308 AVProbeData lpd = *pd;
309 AVInputFormat *fmt1 = NULL, *fmt;
310 int score, id3 = 0;
311
312 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
313 int id3len = ff_id3v2_tag_len(lpd.buf);
314 if (lpd.buf_size > id3len + 16) {
315 lpd.buf += id3len;
316 lpd.buf_size -= id3len;
317 }
318 id3 = 1;
319 }
320
321 fmt = NULL;
322 while ((fmt1 = av_iformat_next(fmt1))) {
323 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
324 continue;
325 score = 0;
326 if (fmt1->read_probe) {
327 score = fmt1->read_probe(&lpd);
328 } else if (fmt1->extensions) {
329 if (av_match_ext(lpd.filename, fmt1->extensions)) {
330 score = 50;
331 }
332 }
333 if (score > *score_max) {
334 *score_max = score;
335 fmt = fmt1;
336 }else if (score == *score_max)
337 fmt = NULL;
338 }
339
340 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
341 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_MAX/4) {
342 while ((fmt = av_iformat_next(fmt)))
343 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
344 *score_max = AVPROBE_SCORE_MAX/4;
345 break;
346 }
347 }
348
349 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4-1) {
350 while ((fmt = av_iformat_next(fmt)))
351 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
352 *score_max = AVPROBE_SCORE_MAX/4-1;
353 break;
354 }
355 }
356
357 return fmt;
358 }
359
360 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
361 int score=0;
362 return av_probe_input_format2(pd, is_opened, &score);
363 }
364
365 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
366 {
367 static const struct {
368 const char *name; enum CodecID id; enum AVMediaType type;
369 } fmt_id_type[] = {
370 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
371 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
372 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
373 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
374 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
375 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
376 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
377 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
378 { 0 }
379 };
380 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
381
382 if (fmt) {
383 int i;
384 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
385 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
386 for (i = 0; fmt_id_type[i].name; i++) {
387 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
388 st->codec->codec_id = fmt_id_type[i].id;
389 st->codec->codec_type = fmt_id_type[i].type;
390 break;
391 }
392 }
393 }
394 return !!fmt;
395 }
396
397 /************************************************************/
398 /* input media file */
399
400 #if FF_API_FORMAT_PARAMETERS
401 static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
402 {
403 char buf[1024];
404 AVDictionary *opts = NULL;
405
406 if (!ap)
407 return NULL;
408
409 if (ap->time_base.num) {
410 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
411 av_dict_set(&opts, "framerate", buf, 0);
412 }
413 if (ap->sample_rate) {
414 snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
415 av_dict_set(&opts, "sample_rate", buf, 0);
416 }
417 if (ap->channels) {
418 snprintf(buf, sizeof(buf), "%d", ap->channels);
419 av_dict_set(&opts, "channels", buf, 0);
420 }
421 if (ap->width || ap->height) {
422 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
423 av_dict_set(&opts, "video_size", buf, 0);
424 }
425 if (ap->pix_fmt != PIX_FMT_NONE) {
426 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
427 }
428 if (ap->channel) {
429 snprintf(buf, sizeof(buf), "%d", ap->channel);
430 av_dict_set(&opts, "channel", buf, 0);
431 }
432 if (ap->standard) {
433 av_dict_set(&opts, "standard", ap->standard, 0);
434 }
435 if (ap->mpeg2ts_compute_pcr) {
436 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
437 }
438 if (ap->initial_pause) {
439 av_dict_set(&opts, "initial_pause", "1", 0);
440 }
441 return opts;
442 }
443
444 /**
445 * Open a media file from an IO stream. 'fmt' must be specified.
446 */
447 int av_open_input_stream(AVFormatContext **ic_ptr,
448 AVIOContext *pb, const char *filename,
449 AVInputFormat *fmt, AVFormatParameters *ap)
450 {
451 int err;
452 AVDictionary *opts;
453 AVFormatContext *ic;
454 AVFormatParameters default_ap;
455
456 if(!ap){
457 ap=&default_ap;
458 memset(ap, 0, sizeof(default_ap));
459 }
460 opts = convert_format_parameters(ap);
461
462 if(!ap->prealloced_context)
463 ic = avformat_alloc_context();
464 else
465 ic = *ic_ptr;
466 if (!ic) {
467 err = AVERROR(ENOMEM);
468 goto fail;
469 }
470 if (pb && fmt && fmt->flags & AVFMT_NOFILE)
471 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
472 "will be ignored with AVFMT_NOFILE format.\n");
473 else
474 ic->pb = pb;
475
476 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0)
477 goto fail;
478 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above
479
480 fail:
481 *ic_ptr = ic;
482 av_dict_free(&opts);
483 return err;
484 }
485 #endif
486
487 /** size of probe buffer, for guessing file type from file contents */
488 #define PROBE_BUF_MIN 2048
489 #define PROBE_BUF_MAX (1<<20)
490
491 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
492 const char *filename, void *logctx,
493 unsigned int offset, unsigned int max_probe_size)
494 {
495 AVProbeData pd = { filename ? filename : "", NULL, -offset };
496 unsigned char *buf = NULL;
497 int ret = 0, probe_size;
498
499 if (!max_probe_size) {
500 max_probe_size = PROBE_BUF_MAX;
501 } else if (max_probe_size > PROBE_BUF_MAX) {
502 max_probe_size = PROBE_BUF_MAX;
503 } else if (max_probe_size < PROBE_BUF_MIN) {
504 return AVERROR(EINVAL);
505 }
506
507 if (offset >= max_probe_size) {
508 return AVERROR(EINVAL);
509 }
510
511 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
512 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
513 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
514 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
515
516 if (probe_size < offset) {
517 continue;
518 }
519
520 /* read probe data */
521 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
522 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
523 /* fail if error was not end of file, otherwise, lower score */
524 if (ret != AVERROR_EOF) {
525 av_free(buf);
526 return ret;
527 }
528 score = 0;
529 ret = 0; /* error was end of file, nothing read */
530 }
531 pd.buf_size += ret;
532 pd.buf = &buf[offset];
533
534 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
535
536 /* guess file format */
537 *fmt = av_probe_input_format2(&pd, 1, &score);
538 if(*fmt){
539 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
540 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
541 }else
542 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
543 }
544 }
545
546 if (!*fmt) {
547 av_free(buf);
548 return AVERROR_INVALIDDATA;
549 }
550
551 /* rewind. reuse probe buffer to avoid seeking */
552 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
553 av_free(buf);
554
555 return ret;
556 }
557
558 #if FF_API_FORMAT_PARAMETERS
559 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
560 AVInputFormat *fmt,
561 int buf_size,
562 AVFormatParameters *ap)
563 {
564 int err;
565 AVDictionary *opts = convert_format_parameters(ap);
566
567 if (!ap || !ap->prealloced_context)
568 *ic_ptr = NULL;
569
570 err = avformat_open_input(ic_ptr, filename, fmt, &opts);
571
572 av_dict_free(&opts);
573 return err;
574 }
575 #endif
576
577 /* open input file and probe the format if necessary */
578 static int init_input(AVFormatContext *s, const char *filename)
579 {
580 int ret;
581 AVProbeData pd = {filename, NULL, 0};
582
583 if (s->pb) {
584 s->flags |= AVFMT_FLAG_CUSTOM_IO;
585 if (!s->iformat)
586 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
587 else if (s->iformat->flags & AVFMT_NOFILE)
588 return AVERROR(EINVAL);
589 return 0;
590 }
591
592 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
593 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
594 return 0;
595
596 if ((ret = avio_open(&s->pb, filename, AVIO_FLAG_READ)) < 0)
597 return ret;
598 if (s->iformat)
599 return 0;
600 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
601 }
602
603 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
604 {
605 AVFormatContext *s = *ps;
606 int ret = 0;
607 AVFormatParameters ap = { { 0 } };
608 AVDictionary *tmp = NULL;
609
610 if (!s && !(s = avformat_alloc_context()))
611 return AVERROR(ENOMEM);
612 if (fmt)
613 s->iformat = fmt;
614
615 if (options)
616 av_dict_copy(&tmp, *options, 0);
617
618 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
619 goto fail;
620
621 if ((ret = init_input(s, filename)) < 0)
622 goto fail;
623
624 /* check filename in case an image number is expected */
625 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
626 if (!av_filename_number_test(filename)) {
627 ret = AVERROR(EINVAL);
628 goto fail;
629 }
630 }
631
632 s->duration = s->start_time = AV_NOPTS_VALUE;
633 av_strlcpy(s->filename, filename, sizeof(s->filename));
634
635 /* allocate private data */
636 if (s->iformat->priv_data_size > 0) {
637 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
638 ret = AVERROR(ENOMEM);
639 goto fail;
640 }
641 if (s->iformat->priv_class) {
642 *(const AVClass**)s->priv_data = s->iformat->priv_class;
643 av_opt_set_defaults(s->priv_data);
644 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
645 goto fail;
646 }
647 }
648
649 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
650 if (s->pb)
651 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
652
653 if (s->iformat->read_header)
654 if ((ret = s->iformat->read_header(s, &ap)) < 0)
655 goto fail;
656
657 if (s->pb && !s->data_offset)
658 s->data_offset = avio_tell(s->pb);
659
660 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
661
662 if (options) {
663 av_dict_free(options);
664 *options = tmp;
665 }
666 *ps = s;
667 return 0;
668
669 fail:
670 av_dict_free(&tmp);
671 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
672 avio_close(s->pb);
673 avformat_free_context(s);
674 *ps = NULL;
675 return ret;
676 }
677
678 /*******************************************************/
679
680 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
681 AVPacketList **plast_pktl){
682 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
683 if (!pktl)
684 return NULL;
685
686 if (*packet_buffer)
687 (*plast_pktl)->next = pktl;
688 else
689 *packet_buffer = pktl;
690
691 /* add the packet in the buffered packet list */
692 *plast_pktl = pktl;
693 pktl->pkt= *pkt;
694 return &pktl->pkt;
695 }
696
697 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
698 {
699 int ret, i;
700 AVStream *st;
701
702 for(;;){
703 AVPacketList *pktl = s->raw_packet_buffer;
704
705 if (pktl) {
706 *pkt = pktl->pkt;
707 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
708 !s->streams[pkt->stream_index]->probe_packets ||
709 s->raw_packet_buffer_remaining_size < pkt->size){
710 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
711 av_freep(&pd->buf);
712 pd->buf_size = 0;
713 s->raw_packet_buffer = pktl->next;
714 s->raw_packet_buffer_remaining_size += pkt->size;
715 av_free(pktl);
716 return 0;
717 }
718 }
719
720 av_init_packet(pkt);
721 ret= s->iformat->read_packet(s, pkt);
722 if (ret < 0) {
723 if (!pktl || ret == AVERROR(EAGAIN))
724 return ret;
725 for (i = 0; i < s->nb_streams; i++)
726 s->streams[i]->probe_packets = 0;
727 continue;
728 }
729
730 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
731 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
732 av_log(s, AV_LOG_WARNING,
733 "Dropped corrupted packet (stream = %d)\n",
734 pkt->stream_index);
735 continue;
736 }
737
738 st= s->streams[pkt->stream_index];
739
740 switch(st->codec->codec_type){
741 case AVMEDIA_TYPE_VIDEO:
742 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
743 break;
744 case AVMEDIA_TYPE_AUDIO:
745 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
746 break;
747 case AVMEDIA_TYPE_SUBTITLE:
748 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
749 break;
750 }
751
752 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
753 !st->probe_packets))
754 return ret;
755
756 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
757 s->raw_packet_buffer_remaining_size -= pkt->size;
758
759 if(st->codec->codec_id == CODEC_ID_PROBE){
760 AVProbeData *pd = &st->probe_data;
761 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
762 --st->probe_packets;
763
764 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
765 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
766 pd->buf_size += pkt->size;
767 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
768
769 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
770 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
771 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
772 if(st->codec->codec_id != CODEC_ID_PROBE){
773 pd->buf_size=0;
774 av_freep(&pd->buf);
775 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
776 }
777 }
778 }
779 }
780 }
781
782 /**********************************************************/
783
784 /**
785 * Get the number of samples of an audio frame. Return -1 on error.
786 */
787 static int get_audio_frame_size(AVCodecContext *enc, int size)
788 {
789 int frame_size;
790
791 if(enc->codec_id == CODEC_ID_VORBIS)
792 return -1;
793
794 if (enc->frame_size <= 1) {
795 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
796
797 if (bits_per_sample) {
798 if (enc->channels == 0)
799 return -1;
800 frame_size = (size << 3) / (bits_per_sample * enc->channels);
801 } else {
802 /* used for example by ADPCM codecs */
803 if (enc->bit_rate == 0)
804 return -1;
805 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
806 }
807 } else {
808 frame_size = enc->frame_size;
809 }
810 return frame_size;
811 }
812
813
814 /**
815 * Return the frame duration in seconds. Return 0 if not available.
816 */
817 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
818 AVCodecParserContext *pc, AVPacket *pkt)
819 {
820 int frame_size;
821
822 *pnum = 0;
823 *pden = 0;
824 switch(st->codec->codec_type) {
825 case AVMEDIA_TYPE_VIDEO:
826 if(st->time_base.num*1000LL > st->time_base.den){
827 *pnum = st->time_base.num;
828 *pden = st->time_base.den;
829 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
830 *pnum = st->codec->time_base.num;
831 *pden = st->codec->time_base.den;
832 if (pc && pc->repeat_pict) {
833 *pnum = (*pnum) * (1 + pc->repeat_pict);
834 }
835 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
836 //Thus if we have no parser in such case leave duration undefined.
837 if(st->codec->ticks_per_frame>1 && !pc){
838 *pnum = *pden = 0;
839 }
840 }
841 break;
842 case AVMEDIA_TYPE_AUDIO:
843 frame_size = get_audio_frame_size(st->codec, pkt->size);
844 if (frame_size <= 0 || st->codec->sample_rate <= 0)
845 break;
846 *pnum = frame_size;
847 *pden = st->codec->sample_rate;
848 break;
849 default:
850 break;
851 }
852 }
853
854 static int is_intra_only(AVCodecContext *enc){
855 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
856 return 1;
857 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
858 switch(enc->codec_id){
859 case CODEC_ID_MJPEG:
860 case CODEC_ID_MJPEGB:
861 case CODEC_ID_LJPEG:
862 case CODEC_ID_PRORES:
863 case CODEC_ID_RAWVIDEO:
864 case CODEC_ID_DVVIDEO:
865 case CODEC_ID_HUFFYUV:
866 case CODEC_ID_FFVHUFF:
867 case CODEC_ID_ASV1:
868 case CODEC_ID_ASV2:
869 case CODEC_ID_VCR1:
870 case CODEC_ID_DNXHD:
871 case CODEC_ID_JPEG2000:
872 return 1;
873 default: break;
874 }
875 }
876 return 0;
877 }
878
879 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
880 int64_t dts, int64_t pts)
881 {
882 AVStream *st= s->streams[stream_index];
883 AVPacketList *pktl= s->packet_buffer;
884
885 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
886 return;
887
888 st->first_dts= dts - st->cur_dts;
889 st->cur_dts= dts;
890
891 for(; pktl; pktl= pktl->next){
892 if(pktl->pkt.stream_index != stream_index)
893 continue;
894 //FIXME think more about this check
895 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
896 pktl->pkt.pts += st->first_dts;
897
898 if(pktl->pkt.dts != AV_NOPTS_VALUE)
899 pktl->pkt.dts += st->first_dts;
900
901 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
902 st->start_time= pktl->pkt.pts;
903 }
904 if (st->start_time == AV_NOPTS_VALUE)
905 st->start_time = pts;
906 }
907
908 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
909 {
910 AVPacketList *pktl= s->packet_buffer;
911 int64_t cur_dts= 0;
912
913 if(st->first_dts != AV_NOPTS_VALUE){
914 cur_dts= st->first_dts;
915 for(; pktl; pktl= pktl->next){
916 if(pktl->pkt.stream_index == pkt->stream_index){
917 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
918 break;
919 cur_dts -= pkt->duration;
920 }
921 }
922 pktl= s->packet_buffer;
923 st->first_dts = cur_dts;
924 }else if(st->cur_dts)
925 return;
926
927 for(; pktl; pktl= pktl->next){
928 if(pktl->pkt.stream_index != pkt->stream_index)
929 continue;
930 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
931 && !pktl->pkt.duration){
932 pktl->pkt.dts= cur_dts;
933 if(!st->codec->has_b_frames)
934 pktl->pkt.pts= cur_dts;
935 cur_dts += pkt->duration;
936 pktl->pkt.duration= pkt->duration;
937 }else
938 break;
939 }
940 if(st->first_dts == AV_NOPTS_VALUE)
941 st->cur_dts= cur_dts;
942 }
943
944 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
945 AVCodecParserContext *pc, AVPacket *pkt)
946 {
947 int num, den, presentation_delayed, delay, i;
948 int64_t offset;
949
950 if (s->flags & AVFMT_FLAG_NOFILLIN)
951 return;
952
953 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
954 pkt->dts= AV_NOPTS_VALUE;
955
956 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
957 //FIXME Set low_delay = 0 when has_b_frames = 1
958 st->codec->has_b_frames = 1;
959
960 /* do we have a video B-frame ? */
961 delay= st->codec->has_b_frames;
962 presentation_delayed = 0;
963
964 // ignore delay caused by frame threading so that the mpeg2-without-dts
965 // warning will not trigger
966 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
967 delay -= st->codec->thread_count-1;
968
969 /* XXX: need has_b_frame, but cannot get it if the codec is
970 not initialized */
971 if (delay &&
972 pc && pc->pict_type != AV_PICTURE_TYPE_B)
973 presentation_delayed = 1;
974
975 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
976 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
977 pkt->dts -= 1LL<<st->pts_wrap_bits;
978 }
979
980 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
981 // we take the conservative approach and discard both
982 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
983 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
984 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
985 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
986 }
987
988 if (pkt->duration == 0) {
989 compute_frame_duration(&num, &den, st, pc, pkt);
990 if (den && num) {
991 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
992
993 if(pkt->duration != 0 && s->packet_buffer)
994 update_initial_durations(s, st, pkt);
995 }
996 }
997
998 /* correct timestamps with byte offset if demuxers only have timestamps
999 on packet boundaries */
1000 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1001 /* this will estimate bitrate based on this frame's duration and size */
1002 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1003 if(pkt->pts != AV_NOPTS_VALUE)
1004 pkt->pts += offset;
1005 if(pkt->dts != AV_NOPTS_VALUE)
1006 pkt->dts += offset;
1007 }
1008
1009 if (pc && pc->dts_sync_point >= 0) {
1010 // we have synchronization info from the parser
1011 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1012 if (den > 0) {
1013 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1014 if (pkt->dts != AV_NOPTS_VALUE) {
1015 // got DTS from the stream, update reference timestamp
1016 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1017 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1018 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1019 // compute DTS based on reference timestamp
1020 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1021 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1022 }
1023 if (pc->dts_sync_point > 0)
1024 st->reference_dts = pkt->dts; // new reference
1025 }
1026 }
1027
1028 /* This may be redundant, but it should not hurt. */
1029 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1030 presentation_delayed = 1;
1031
1032 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1033 /* interpolate PTS and DTS if they are not present */
1034 //We skip H264 currently because delay and has_b_frames are not reliably set
1035 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1036 if (presentation_delayed) {
1037 /* DTS = decompression timestamp */
1038 /* PTS = presentation timestamp */
1039 if (pkt->dts == AV_NOPTS_VALUE)
1040 pkt->dts = st->last_IP_pts;
1041 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1042 if (pkt->dts == AV_NOPTS_VALUE)
1043 pkt->dts = st->cur_dts;
1044
1045 /* this is tricky: the dts must be incremented by the duration
1046 of the frame we are displaying, i.e. the last I- or P-frame */
1047 if (st->last_IP_duration == 0)
1048 st->last_IP_duration = pkt->duration;
1049 if(pkt->dts != AV_NOPTS_VALUE)
1050 st->cur_dts = pkt->dts + st->last_IP_duration;
1051 st->last_IP_duration = pkt->duration;
1052 st->last_IP_pts= pkt->pts;
1053 /* cannot compute PTS if not present (we can compute it only
1054 by knowing the future */
1055 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1056 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1057 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1058 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1059 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1060 pkt->pts += pkt->duration;
1061 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1062 }
1063 }
1064
1065 /* presentation is not delayed : PTS and DTS are the same */
1066 if(pkt->pts == AV_NOPTS_VALUE)
1067 pkt->pts = pkt->dts;
1068 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1069 if(pkt->pts == AV_NOPTS_VALUE)
1070 pkt->pts = st->cur_dts;
1071 pkt->dts = pkt->pts;
1072 if(pkt->pts != AV_NOPTS_VALUE)
1073 st->cur_dts = pkt->pts + pkt->duration;
1074 }
1075 }
1076
1077 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1078 st->pts_buffer[0]= pkt->pts;
1079 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1080 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1081 if(pkt->dts == AV_NOPTS_VALUE)
1082 pkt->dts= st->pts_buffer[0];
1083 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1084 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1085 }
1086 if(pkt->dts > st->cur_dts)
1087 st->cur_dts = pkt->dts;
1088 }
1089
1090 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1091
1092 /* update flags */
1093 if(is_intra_only(st->codec))
1094 pkt->flags |= AV_PKT_FLAG_KEY;
1095 else if (pc) {
1096 pkt->flags = 0;
1097 /* keyframe computation */
1098 if (pc->key_frame == 1)
1099 pkt->flags |= AV_PKT_FLAG_KEY;
1100 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1101 pkt->flags |= AV_PKT_FLAG_KEY;
1102 }
1103 if (pc)
1104 pkt->convergence_duration = pc->convergence_duration;
1105 }
1106
1107
1108 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1109 {
1110 AVStream *st;
1111 int len, ret, i;
1112
1113 av_init_packet(pkt);
1114
1115 for(;;) {
1116 /* select current input stream component */
1117 st = s->cur_st;
1118 if (st) {
1119 if (!st->need_parsing || !st->parser) {
1120 /* no parsing needed: we just output the packet as is */
1121 /* raw data support */
1122 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1123 compute_pkt_fields(s, st, NULL, pkt);
1124 s->cur_st = NULL;
1125 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1126 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1127 ff_reduce_index(s, st->index);
1128 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1129 }
1130 break;
1131 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1132 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1133 st->cur_ptr, st->cur_len,
1134 st->cur_pkt.pts, st->cur_pkt.dts,
1135 st->cur_pkt.pos);
1136 st->cur_pkt.pts = AV_NOPTS_VALUE;
1137 st->cur_pkt.dts = AV_NOPTS_VALUE;
1138 /* increment read pointer */
1139 st->cur_ptr += len;
1140 st->cur_len -= len;
1141
1142 /* return packet if any */
1143 if (pkt->size) {
1144 got_packet:
1145 pkt->duration = 0;
1146 pkt->stream_index = st->index;
1147 pkt->pts = st->parser->pts;
1148 pkt->dts = st->parser->dts;
1149 pkt->pos = st->parser->pos;
1150 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1151 s->cur_st = NULL;
1152 pkt->destruct= st->cur_pkt.destruct;
1153 st->cur_pkt.destruct= NULL;
1154 st->cur_pkt.data = NULL;
1155 assert(st->cur_len == 0);
1156 }else{
1157 pkt->destruct = NULL;
1158 }
1159 compute_pkt_fields(s, st, st->parser, pkt);
1160
1161 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1162 ff_reduce_index(s, st->index);
1163 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1164 0, 0, AVINDEX_KEYFRAME);
1165 }
1166
1167 break;
1168 }
1169 } else {
1170 /* free packet */
1171 av_free_packet(&st->cur_pkt);
1172 s->cur_st = NULL;
1173 }
1174 } else {
1175 AVPacket cur_pkt;
1176 /* read next packet */
1177 ret = av_read_packet(s, &cur_pkt);
1178 if (ret < 0) {
1179 if (ret == AVERROR(EAGAIN))
1180 return ret;
1181 /* return the last frames, if any */
1182 for(i = 0; i < s->nb_streams; i++) {
1183 st = s->streams[i];
1184 if (st->parser && st->need_parsing) {
1185 av_parser_parse2(st->parser, st->codec,
1186 &pkt->data, &pkt->size,
1187 NULL, 0,
1188 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1189 AV_NOPTS_VALUE);
1190 if (pkt->size)
1191 goto got_packet;
1192 }
1193 }
1194 /* no more packets: really terminate parsing */
1195 return ret;
1196 }
1197 st = s->streams[cur_pkt.stream_index];
1198 st->cur_pkt= cur_pkt;
1199
1200 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1201 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1202 st->cur_pkt.pts < st->cur_pkt.dts){
1203 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1204 st->cur_pkt.stream_index,
1205 st->cur_pkt.pts,
1206 st->cur_pkt.dts,
1207 st->cur_pkt.size);
1208 // av_free_packet(&st->cur_pkt);
1209 // return -1;
1210 }
1211
1212 if(s->debug & FF_FDEBUG_TS)
1213 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1214 st->cur_pkt.stream_index,
1215 st->cur_pkt.pts,
1216 st->cur_pkt.dts,
1217 st->cur_pkt.size,
1218 st->cur_pkt.duration,
1219 st->cur_pkt.flags);
1220
1221 s->cur_st = st;
1222 st->cur_ptr = st->cur_pkt.data;
1223 st->cur_len = st->cur_pkt.size;
1224 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1225 st->parser = av_parser_init(st->codec->codec_id);
1226 if (!st->parser) {
1227 /* no parser available: just output the raw packets */
1228 st->need_parsing = AVSTREAM_PARSE_NONE;
1229 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1230 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1231 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1232 st->parser->flags |= PARSER_FLAG_ONCE;
1233 }
1234 }
1235 }
1236 }
1237 if(s->debug & FF_FDEBUG_TS)
1238 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1239 pkt->stream_index,
1240 pkt->pts,
1241 pkt->dts,
1242 pkt->size,
1243 pkt->duration,
1244 pkt->flags);
1245
1246 return 0;
1247 }
1248
1249 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1250 {
1251 AVPacketList *pktl;
1252 int eof=0;
1253 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1254
1255 for(;;){
1256 pktl = s->packet_buffer;
1257 if (pktl) {
1258 AVPacket *next_pkt= &pktl->pkt;
1259
1260 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1261 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1262 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1263 if( pktl->pkt.stream_index == next_pkt->stream_index
1264 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1265 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1266 next_pkt->pts= pktl->pkt.dts;
1267 }
1268 pktl= pktl->next;
1269 }
1270 pktl = s->packet_buffer;
1271 }
1272
1273 if( next_pkt->pts != AV_NOPTS_VALUE
1274 || next_pkt->dts == AV_NOPTS_VALUE
1275 || !genpts || eof){
1276 /* read packet from packet buffer, if there is data */
1277 *pkt = *next_pkt;
1278 s->packet_buffer = pktl->next;
1279 av_free(pktl);
1280 return 0;
1281 }
1282 }
1283 if(genpts){
1284 int ret= read_frame_internal(s, pkt);
1285 if(ret<0){
1286 if(pktl && ret != AVERROR(EAGAIN)){
1287 eof=1;
1288 continue;
1289 }else
1290 return ret;
1291 }
1292
1293 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1294 &s->packet_buffer_end)) < 0)
1295 return AVERROR(ENOMEM);
1296 }else{
1297 assert(!s->packet_buffer);
1298 return read_frame_internal(s, pkt);
1299 }
1300 }
1301 }
1302
1303 /* XXX: suppress the packet queue */
1304 static void flush_packet_queue(AVFormatContext *s)
1305 {
1306 AVPacketList *pktl;
1307
1308 for(;;) {
1309 pktl = s->packet_buffer;
1310 if (!pktl)
1311 break;
1312 s->packet_buffer = pktl->next;
1313 av_free_packet(&pktl->pkt);
1314 av_free(pktl);
1315 }
1316 while(s->raw_packet_buffer){
1317 pktl = s->raw_packet_buffer;
1318 s->raw_packet_buffer = pktl->next;
1319 av_free_packet(&pktl->pkt);
1320 av_free(pktl);
1321 }
1322 s->packet_buffer_end=
1323 s->raw_packet_buffer_end= NULL;
1324 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1325 }
1326
1327 /*******************************************************/
1328 /* seek support */
1329
1330 int av_find_default_stream_index(AVFormatContext *s)
1331 {
1332 int first_audio_index = -1;
1333 int i;
1334 AVStream *st;
1335
1336 if (s->nb_streams <= 0)
1337 return -1;
1338 for(i = 0; i < s->nb_streams; i++) {
1339 st = s->streams[i];
1340 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1341 return i;
1342 }
1343 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1344 first_audio_index = i;
1345 }
1346 return first_audio_index >= 0 ? first_audio_index : 0;
1347 }
1348
1349 /**
1350 * Flush the frame reader.
1351 */
1352 void ff_read_frame_flush(AVFormatContext *s)
1353 {
1354 AVStream *st;
1355 int i, j;
1356
1357 flush_packet_queue(s);
1358
1359 s->cur_st = NULL;
1360
1361 /* for each stream, reset read state */
1362 for(i = 0; i < s->nb_streams; i++) {
1363 st = s->streams[i];
1364
1365 if (st->parser) {
1366 av_parser_close(st->parser);
1367 st->parser = NULL;
1368 av_free_packet(&st->cur_pkt);
1369 }
1370 st->last_IP_pts = AV_NOPTS_VALUE;
1371 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1372 st->reference_dts = AV_NOPTS_VALUE;
1373 /* fail safe */
1374 st->cur_ptr = NULL;
1375 st->cur_len = 0;
1376
1377 st->probe_packets = MAX_PROBE_PACKETS;
1378
1379 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1380 st->pts_buffer[j]= AV_NOPTS_VALUE;
1381 }
1382 }
1383
1384 #if FF_API_SEEK_PUBLIC
1385 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1386 {
1387 return ff_update_cur_dts(s, ref_st, timestamp);
1388 }
1389 #endif
1390
1391 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1392 {
1393 int i;
1394
1395 for(i = 0; i < s->nb_streams; i++) {
1396 AVStream *st = s->streams[i];
1397
1398 st->cur_dts = av_rescale(timestamp,
1399 st->time_base.den * (int64_t)ref_st->time_base.num,
1400 st->time_base.num * (int64_t)ref_st->time_base.den);
1401 }
1402 }
1403
1404 void ff_reduce_index(AVFormatContext *s, int stream_index)
1405 {
1406 AVStream *st= s->streams[stream_index];
1407 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1408
1409 if((unsigned)st->nb_index_entries >= max_entries){
1410 int i;
1411 for(i=0; 2*i<st->nb_index_entries; i++)
1412 st->index_entries[i]= st->index_entries[2*i];
1413 st->nb_index_entries= i;
1414 }
1415 }
1416
1417 int ff_add_index_entry(AVIndexEntry **index_entries,
1418 int *nb_index_entries,
1419 unsigned int *index_entries_allocated_size,
1420 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1421 {
1422 AVIndexEntry *entries, *ie;
1423 int index;
1424
1425 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1426 return -1;
1427
1428 entries = av_fast_realloc(*index_entries,
1429 index_entries_allocated_size,
1430 (*nb_index_entries + 1) *
1431 sizeof(AVIndexEntry));
1432 if(!entries)
1433 return -1;
1434
1435 *index_entries= entries;
1436
1437 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1438
1439 if(index<0){
1440 index= (*nb_index_entries)++;
1441 ie= &entries[index];
1442 assert(index==0 || ie[-1].timestamp < timestamp);
1443 }else{
1444 ie= &entries[index];
1445 if(ie->timestamp != timestamp){
1446 if(ie->timestamp <= timestamp)
1447 return -1;
1448 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1449 (*nb_index_entries)++;
1450 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1451 distance= ie->min_distance;
1452 }
1453
1454 ie->pos = pos;
1455 ie->timestamp = timestamp;
1456 ie->min_distance= distance;
1457 ie->size= size;
1458 ie->flags = flags;
1459
1460 return index;
1461 }
1462
1463 int av_add_index_entry(AVStream *st,
1464 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1465 {
1466 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1467 &st->index_entries_allocated_size, pos,
1468 timestamp, size, distance, flags);
1469 }
1470
1471 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1472 int64_t wanted_timestamp, int flags)
1473 {
1474 int a, b, m;
1475 int64_t timestamp;
1476
1477 a = - 1;
1478 b = nb_entries;
1479
1480 //optimize appending index entries at the end
1481 if(b && entries[b-1].timestamp < wanted_timestamp)
1482 a= b-1;
1483
1484 while (b - a > 1) {
1485 m = (a + b) >> 1;
1486 timestamp = entries[m].timestamp;
1487 if(timestamp >= wanted_timestamp)
1488 b = m;
1489 if(timestamp <= wanted_timestamp)
1490 a = m;
1491 }
1492 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1493
1494 if(!(flags & AVSEEK_FLAG_ANY)){
1495 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1496 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1497 }
1498 }
1499
1500 if(m == nb_entries)
1501 return -1;
1502 return m;
1503 }
1504
1505 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1506 int flags)
1507 {
1508 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1509 wanted_timestamp, flags);
1510 }
1511
1512 #if FF_API_SEEK_PUBLIC
1513 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1514 return ff_seek_frame_binary(s, stream_index, target_ts, flags);
1515 }
1516 #endif
1517
1518 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1519 {
1520 AVInputFormat *avif= s->iformat;
1521 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1522 int64_t ts_min, ts_max, ts;
1523 int index;
1524 int64_t ret;
1525 AVStream *st;
1526
1527 if (stream_index < 0)
1528 return -1;
1529
1530 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1531
1532 ts_max=
1533 ts_min= AV_NOPTS_VALUE;
1534 pos_limit= -1; //gcc falsely says it may be uninitialized
1535
1536 st= s->streams[stream_index];
1537 if(st->index_entries){
1538 AVIndexEntry *e;
1539
1540 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1541 index= FFMAX(index, 0);
1542 e= &st->index_entries[index];
1543
1544 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1545 pos_min= e->pos;
1546 ts_min= e->timestamp;
1547 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1548 pos_min,ts_min);
1549 }else{
1550 assert(index==0);
1551 }
1552
1553 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1554 assert(index < st->nb_index_entries);
1555 if(index >= 0){
1556 e= &st->index_entries[index];
1557 assert(e->timestamp >= target_ts);
1558 pos_max= e->pos;
1559 ts_max= e->timestamp;
1560 pos_limit= pos_max - e->min_distance;
1561 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1562 pos_max,pos_limit, ts_max);
1563 }
1564 }
1565
1566 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1567 if(pos<0)
1568 return -1;
1569
1570 /* do the seek */
1571 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1572 return ret;
1573
1574 ff_update_cur_dts(s, st, ts);
1575
1576 return 0;
1577 }
1578
1579 #if FF_API_SEEK_PUBLIC
1580 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1581 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1582 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1583 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1584 {
1585 return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
1586 pos_limit, ts_min, ts_max, flags, ts_ret,
1587 read_timestamp);
1588 }
1589 #endif
1590
1591 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1592 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1593 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1594 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1595 {
1596 int64_t pos, ts;
1597 int64_t start_pos, filesize;
1598 int no_change;
1599
1600 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1601
1602 if(ts_min == AV_NOPTS_VALUE){
1603 pos_min = s->data_offset;
1604 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1605 if (ts_min == AV_NOPTS_VALUE)
1606 return -1;
1607 }
1608
1609 if(ts_max == AV_NOPTS_VALUE){
1610 int step= 1024;
1611 filesize = avio_size(s->pb);
1612 pos_max = filesize - 1;
1613 do{
1614 pos_max -= step;
1615 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1616 step += step;
1617 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1618 if (ts_max == AV_NOPTS_VALUE)
1619 return -1;
1620
1621 for(;;){
1622 int64_t tmp_pos= pos_max + 1;
1623 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1624 if(tmp_ts == AV_NOPTS_VALUE)
1625 break;
1626 ts_max= tmp_ts;
1627 pos_max= tmp_pos;
1628 if(tmp_pos >= filesize)
1629 break;
1630 }
1631 pos_limit= pos_max;
1632 }
1633
1634 if(ts_min > ts_max){
1635 return -1;
1636 }else if(ts_min == ts_max){
1637 pos_limit= pos_min;
1638 }
1639
1640 no_change=0;
1641 while (pos_min < pos_limit) {
1642 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1643 pos_min, pos_max, ts_min, ts_max);
1644 assert(pos_limit <= pos_max);
1645
1646 if(no_change==0){
1647 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1648 // interpolate position (better than dichotomy)
1649 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1650 + pos_min - approximate_keyframe_distance;
1651 }else if(no_change==1){
1652 // bisection, if interpolation failed to change min or max pos last time
1653 pos = (pos_min + pos_limit)>>1;
1654 }else{
1655 /* linear search if bisection failed, can only happen if there
1656 are very few or no keyframes between min/max */
1657 pos=pos_min;
1658 }
1659 if(pos <= pos_min)
1660 pos= pos_min + 1;
1661 else if(pos > pos_limit)
1662 pos= pos_limit;
1663 start_pos= pos;
1664
1665 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1666 if(pos == pos_max)
1667 no_change++;
1668 else
1669 no_change=0;
1670 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1671 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1672 pos_limit, start_pos, no_change);
1673 if(ts == AV_NOPTS_VALUE){
1674 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1675 return -1;
1676 }
1677 assert(ts != AV_NOPTS_VALUE);
1678 if (target_ts <= ts) {
1679 pos_limit = start_pos - 1;
1680 pos_max = pos;
1681 ts_max = ts;
1682 }
1683 if (target_ts >= ts) {
1684 pos_min = pos;
1685 ts_min = ts;
1686 }
1687 }
1688
1689 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1690 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1691 pos_min = pos;
1692 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1693 pos_min++;
1694 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1695 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1696 pos, ts_min, target_ts, ts_max);
1697 *ts_ret= ts;
1698 return pos;
1699 }
1700
1701 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1702 int64_t pos_min, pos_max;
1703 #if 0
1704 AVStream *st;
1705
1706 if (stream_index < 0)
1707 return -1;
1708
1709 st= s->streams[stream_index];
1710 #endif
1711
1712 pos_min = s->data_offset;
1713 pos_max = avio_size(s->pb) - 1;
1714
1715 if (pos < pos_min) pos= pos_min;
1716 else if(pos > pos_max) pos= pos_max;
1717
1718 avio_seek(s->pb, pos, SEEK_SET);
1719
1720 #if 0
1721 av_update_cur_dts(s, st, ts);
1722 #endif
1723 return 0;
1724 }
1725
1726 static int seek_frame_generic(AVFormatContext *s,
1727 int stream_index, int64_t timestamp, int flags)
1728 {
1729 int index;
1730 int64_t ret;
1731 AVStream *st;
1732 AVIndexEntry *ie;
1733
1734 st = s->streams[stream_index];
1735
1736 index = av_index_search_timestamp(st, timestamp, flags);
1737
1738 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1739 return -1;
1740
1741 if(index < 0 || index==st->nb_index_entries-1){
1742 AVPacket pkt;
1743
1744 if(st->nb_index_entries){
1745 assert(st->index_entries);
1746 ie= &st->index_entries[st->nb_index_entries-1];
1747 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1748 return ret;
1749 ff_update_cur_dts(s, st, ie->timestamp);
1750 }else{
1751 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1752 return ret;
1753 }
1754 for (;;) {
1755 int read_status;
1756 do{
1757 read_status = av_read_frame(s, &pkt);
1758 } while (read_status == AVERROR(EAGAIN));
1759 if (read_status < 0)
1760 break;
1761 av_free_packet(&pkt);
1762 if(stream_index == pkt.stream_index){
1763 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1764 break;
1765 }
1766 }
1767 index = av_index_search_timestamp(st, timestamp, flags);
1768 }
1769 if (index < 0)
1770 return -1;
1771
1772 ff_read_frame_flush(s);
1773 if (s->iformat->read_seek){
1774 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1775 return 0;
1776 }
1777 ie = &st->index_entries[index];
1778 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1779 return ret;
1780 ff_update_cur_dts(s, st, ie->timestamp);
1781
1782 return 0;
1783 }
1784
1785 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1786 {
1787 int ret;
1788 AVStream *st;
1789
1790 if (flags & AVSEEK_FLAG_BYTE) {
1791 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1792 return -1;
1793 ff_read_frame_flush(s);
1794 return seek_frame_byte(s, stream_index, timestamp, flags);
1795 }
1796
1797 if(stream_index < 0){
1798 stream_index= av_find_default_stream_index(s);
1799 if(stream_index < 0)
1800 return -1;
1801
1802 st= s->streams[stream_index];
1803 /* timestamp for default must be expressed in AV_TIME_BASE units */
1804 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1805 }
1806
1807 /* first, we try the format specific seek */
1808 if (s->iformat->read_seek) {
1809 ff_read_frame_flush(s);
1810 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1811 } else
1812 ret = -1;
1813 if (ret >= 0) {
1814 return 0;
1815 }
1816
1817 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1818 ff_read_frame_flush(s);
1819 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1820 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1821 ff_read_frame_flush(s);
1822 return seek_frame_generic(s, stream_index, timestamp, flags);
1823 }
1824 else
1825 return -1;
1826 }
1827
1828 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1829 {
1830 if(min_ts > ts || max_ts < ts)
1831 return -1;
1832
1833 if (s->iformat->read_seek2) {
1834 ff_read_frame_flush(s);
1835 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1836 }
1837
1838 if(s->iformat->read_timestamp){
1839 //try to seek via read_timestamp()
1840 }
1841
1842 //Fallback to old API if new is not implemented but old is
1843 //Note the old has somewat different sematics
1844 if(s->iformat->read_seek || 1)
1845 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1846
1847 // try some generic seek like seek_frame_generic() but with new ts semantics
1848 }
1849
1850 /*******************************************************/
1851
1852 /**
1853 * Return TRUE if the stream has accurate duration in any stream.
1854 *
1855 * @return TRUE if the stream has accurate duration for at least one component.
1856 */
1857 static int has_duration(AVFormatContext *ic)
1858 {
1859 int i;
1860 AVStream *st;
1861
1862 for(i = 0;i < ic->nb_streams; i++) {
1863 st = ic->streams[i];
1864 if (st->duration != AV_NOPTS_VALUE)
1865 return 1;
1866 }
1867 return 0;
1868 }
1869
1870 /**
1871 * Estimate the stream timings from the one of each components.
1872 *
1873 * Also computes the global bitrate if possible.
1874 */
1875 static void update_stream_timings(AVFormatContext *ic)
1876 {
1877 int64_t start_time, start_time1, end_time, end_time1;
1878 int64_t duration, duration1, filesize;
1879 int i;
1880 AVStream *st;
1881
1882 start_time = INT64_MAX;
1883 end_time = INT64_MIN;
1884 duration = INT64_MIN;
1885 for(i = 0;i < ic->nb_streams; i++) {
1886 st = ic->streams[i];
1887 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1888 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1889 start_time = FFMIN(start_time, start_time1);
1890 if (st->duration != AV_NOPTS_VALUE) {
1891 end_time1 = start_time1
1892 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1893 end_time = FFMAX(end_time, end_time1);
1894 }
1895 }
1896 if (st->duration != AV_NOPTS_VALUE) {
1897 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1898 duration = FFMAX(duration, duration1);
1899 }
1900 }
1901 if (start_time != INT64_MAX) {
1902 ic->start_time = start_time;
1903 if (end_time != INT64_MIN)
1904 duration = FFMAX(duration, end_time - start_time);
1905 }
1906 if (duration != INT64_MIN) {
1907 ic->duration = duration;
1908 if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
1909 /* compute the bitrate */
1910 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
1911 (double)ic->duration;
1912 }
1913 }
1914 }
1915
1916 static void fill_all_stream_timings(AVFormatContext *ic)
1917 {
1918 int i;
1919 AVStream *st;
1920
1921 update_stream_timings(ic);
1922 for(i = 0;i < ic->nb_streams; i++) {
1923 st = ic->streams[i];
1924 if (st->start_time == AV_NOPTS_VALUE) {
1925 if(ic->start_time != AV_NOPTS_VALUE)
1926 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1927 if(ic->duration != AV_NOPTS_VALUE)
1928 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1929 }
1930 }
1931 }
1932
1933 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1934 {
1935 int64_t filesize, duration;
1936 int bit_rate, i;
1937 AVStream *st;
1938
1939 /* if bit_rate is already set, we believe it */
1940 if (ic->bit_rate <= 0) {
1941 bit_rate = 0;
1942 for(i=0;i<ic->nb_streams;i++) {
1943 st = ic->streams[i];
1944 if (st->codec->bit_rate > 0)
1945 bit_rate += st->codec->bit_rate;
1946 }
1947 ic->bit_rate = bit_rate;
1948 }
1949
1950 /* if duration is already set, we believe it */
1951 if (ic->duration == AV_NOPTS_VALUE &&
1952 ic->bit_rate != 0) {
1953 filesize = ic->pb ? avio_size(ic->pb) : 0;
1954 if (filesize > 0) {
1955 for(i = 0; i < ic->nb_streams; i++) {
1956 st = ic->streams[i];
1957 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1958 if (st->duration == AV_NOPTS_VALUE)
1959 st->duration = duration;
1960 }
1961 }
1962 }
1963 }
1964
1965 #define DURATION_MAX_READ_SIZE 250000
1966 #define DURATION_MAX_RETRY 3
1967
1968 /* only usable for MPEG-PS streams */
1969 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1970 {
1971 AVPacket pkt1, *pkt = &pkt1;
1972 AVStream *st;
1973 int read_size, i, ret;
1974 int64_t end_time;
1975 int64_t filesize, offset, duration;
1976 int retry=0;
1977
1978 ic->cur_st = NULL;
1979
1980 /* flush packet queue */
1981 flush_packet_queue(ic);
1982
1983 for (i=0; i<ic->nb_streams; i++) {
1984 st = ic->streams[i];
1985 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1986 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
1987
1988 if (st->parser) {
1989 av_parser_close(st->parser);
1990 st->parser= NULL;
1991 av_free_packet(&st->cur_pkt);
1992 }
1993 }
1994
1995 /* estimate the end time (duration) */
1996 /* XXX: may need to support wrapping */
1997 filesize = ic->pb ? avio_size(ic->pb) : 0;
1998 end_time = AV_NOPTS_VALUE;
1999 do{
2000 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2001 if (offset < 0)
2002 offset = 0;
2003
2004 avio_seek(ic->pb, offset, SEEK_SET);
2005 read_size = 0;
2006 for(;;) {
2007 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2008 break;
2009
2010 do {
2011 ret = av_read_packet(ic, pkt);
2012 } while(ret == AVERROR(EAGAIN));
2013 if (ret != 0)
2014 break;
2015 read_size += pkt->size;
2016 st = ic->streams[pkt->stream_index];
2017 if (pkt->pts != AV_NOPTS_VALUE &&
2018 (st->start_time != AV_NOPTS_VALUE ||
2019 st->first_dts != AV_NOPTS_VALUE)) {
2020 duration = end_time = pkt->pts;
2021 if (st->start_time != AV_NOPTS_VALUE)
2022 duration -= st->start_time;
2023 else
2024 duration -= st->first_dts;
2025 if (duration < 0)
2026 duration += 1LL<<st->pts_wrap_bits;
2027 if (duration > 0) {
2028 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2029 st->duration = duration;
2030 }
2031 }
2032 av_free_packet(pkt);
2033 }
2034 }while( end_time==AV_NOPTS_VALUE
2035 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2036 && ++retry <= DURATION_MAX_RETRY);
2037
2038 fill_all_stream_timings(ic);
2039
2040 avio_seek(ic->pb, old_offset, SEEK_SET);
2041 for (i=0; i<ic->nb_streams; i++) {
2042 st= ic->streams[i];
2043 st->cur_dts= st->first_dts;
2044 st->last_IP_pts = AV_NOPTS_VALUE;
2045 st->reference_dts = AV_NOPTS_VALUE;
2046 }
2047 }
2048
2049 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2050 {
2051 int64_t file_size;
2052
2053 /* get the file size, if possible */
2054 if (ic->iformat->flags & AVFMT_NOFILE) {
2055 file_size = 0;
2056 } else {
2057 file_size = avio_size(ic->pb);
2058 file_size = FFMAX(0, file_size);
2059 }
2060
2061 if ((!strcmp(ic->iformat->name, "mpeg") ||
2062 !strcmp(ic->iformat->name, "mpegts")) &&
2063 file_size && ic->pb->seekable) {
2064 /* get accurate estimate from the PTSes */
2065 estimate_timings_from_pts(ic, old_offset);
2066 } else if (has_duration(ic)) {
2067 /* at least one component has timings - we use them for all
2068 the components */
2069 fill_all_stream_timings(ic);
2070 } else {
2071 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2072 /* less precise: use bitrate info */
2073 estimate_timings_from_bit_rate(ic);
2074 }
2075 update_stream_timings(ic);
2076
2077 {
2078 int i;
2079 AVStream av_unused *st;
2080 for(i = 0;i < ic->nb_streams; i++) {
2081 st = ic->streams[i];
2082 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2083 (double) st->start_time / AV_TIME_BASE,
2084 (double) st->duration / AV_TIME_BASE);
2085 }
2086 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2087 (double) ic->start_time / AV_TIME_BASE,
2088 (double) ic->duration / AV_TIME_BASE,
2089 ic->bit_rate / 1000);
2090 }
2091 }
2092
2093 static int has_codec_parameters(AVCodecContext *avctx)
2094 {
2095 int val;
2096 switch (avctx->codec_type) {
2097 case AVMEDIA_TYPE_AUDIO:
2098 val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
2099 if (!avctx->frame_size &&
2100 (avctx->codec_id == CODEC_ID_VORBIS ||
2101 avctx->codec_id == CODEC_ID_AAC ||
2102 avctx->codec_id == CODEC_ID_MP1 ||
2103 avctx->codec_id == CODEC_ID_MP2 ||
2104 avctx->codec_id == CODEC_ID_MP3 ||
2105 avctx->codec_id == CODEC_ID_CELT))
2106 return 0;
2107 break;
2108 case AVMEDIA_TYPE_VIDEO:
2109 val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
2110 break;
2111 default:
2112 val = 1;
2113 break;
2114 }
2115 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2116 }
2117
2118 static int has_decode_delay_been_guessed(AVStream *st)
2119 {
2120 return st->codec->codec_id != CODEC_ID_H264 ||
2121 st->info->nb_decoded_frames >= 6;
2122 }
2123
2124 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2125 {
2126 int16_t *samples;
2127 AVCodec *codec;
2128 int got_picture, data_size, ret=0;
2129 AVFrame picture;
2130
2131 if(!st->codec->codec){
2132 codec = avcodec_find_decoder(st->codec->codec_id);
2133 if (!codec)
2134 return -1;
2135 ret = avcodec_open2(st->codec, codec, options);
2136 if (ret < 0)
2137 return ret;
2138 }
2139
2140 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st) ||
2141 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF)) {
2142 switch(st->codec->codec_type) {
2143 case AVMEDIA_TYPE_VIDEO:
2144 avcodec_get_frame_defaults(&picture);
2145 ret = avcodec_decode_video2(st->codec, &picture,
2146 &got_picture, avpkt);
2147 if (got_picture)
2148 st->info->nb_decoded_frames++;
2149 break;
2150 case AVMEDIA_TYPE_AUDIO:
2151 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2152 samples = av_malloc(data_size);
2153 if (!samples)
2154 goto fail;
2155 ret = avcodec_decode_audio3(st->codec, samples,
2156 &data_size, avpkt);
2157 av_free(samples);
2158 break;
2159 default:
2160 break;
2161 }
2162 }
2163 fail:
2164 return ret;
2165 }
2166
2167 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2168 {
2169 while (tags->id != CODEC_ID_NONE) {
2170 if (tags->id == id)
2171 return tags->tag;
2172 tags++;
2173 }
2174 return 0;
2175 }
2176
2177 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2178 {
2179 int i;
2180 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2181 if(tag == tags[i].tag)
2182 return tags[i].id;
2183 }
2184 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2185 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2186 return tags[i].id;
2187 }
2188 return CODEC_ID_NONE;
2189 }
2190
2191 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2192 {
2193 int i;
2194 for(i=0; tags && tags[i]; i++){
2195 int tag= ff_codec_get_tag(tags[i], id);
2196 if(tag) return tag;
2197 }
2198 return 0;
2199 }
2200
2201 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2202 {
2203 int i;
2204 for(i=0; tags && tags[i]; i++){
2205 enum CodecID id= ff_codec_get_id(tags[i], tag);
2206 if(id!=CODEC_ID_NONE) return id;
2207 }
2208 return CODEC_ID_NONE;
2209 }
2210
2211 static void compute_chapters_end(AVFormatContext *s)
2212 {
2213 unsigned int i, j;
2214 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2215
2216 for (i = 0; i < s->nb_chapters; i++)
2217 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2218 AVChapter *ch = s->chapters[i];
2219 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2220 : INT64_MAX;
2221
2222 for (j = 0; j < s->nb_chapters; j++) {
2223 AVChapter *ch1 = s->chapters[j];
2224 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2225 if (j != i && next_start > ch->start && next_start < end)
2226 end = next_start;
2227 }
2228 ch->end = (end == INT64_MAX) ? ch->start : end;
2229 }
2230 }
2231
2232 static int get_std_framerate(int i){
2233 if(i<60*12) return i*1001;
2234 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2235 }
2236
2237 /*
2238 * Is the time base unreliable.
2239 * This is a heuristic to balance between quick acceptance of the values in
2240 * the headers vs. some extra checks.
2241 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2242 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2243 * And there are "variable" fps files this needs to detect as well.
2244 */
2245 static int tb_unreliable(AVCodecContext *c){
2246 if( c->time_base.den >= 101L*c->time_base.num
2247 || c->time_base.den < 5L*c->time_base.num
2248 /* || c->codec_tag == AV_RL32("DIVX")
2249 || c->codec_tag == AV_RL32("XVID")*/
2250 || c->codec_id == CODEC_ID_MPEG2VIDEO
2251 || c->codec_id == CODEC_ID_H264
2252 )
2253 return 1;
2254 return 0;
2255 }
2256
2257 #if FF_API_FORMAT_PARAMETERS
2258 int av_find_stream_info(AVFormatContext *ic)
2259 {
2260 return avformat_find_stream_info(ic, NULL);
2261 }
2262 #endif
2263
2264 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2265 {
2266 int i, count, ret, read_size, j;
2267 AVStream *st;
2268 AVPacket pkt1, *pkt;
2269 int64_t old_offset = avio_tell(ic->pb);
2270 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2271
2272 for(i=0;i<ic->nb_streams;i++) {
2273 AVCodec *codec;
2274 st = ic->streams[i];
2275
2276 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2277 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2278 /* if(!st->time_base.num)
2279 st->time_base= */
2280 if(!st->codec->time_base.num)
2281 st->codec->time_base= st->time_base;
2282 }
2283 //only for the split stuff
2284 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2285 st->parser = av_parser_init(st->codec->codec_id);
2286 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2287 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2288 }
2289 }
2290 assert(!st->codec->codec);
2291 codec = avcodec_find_decoder(st->codec->codec_id);
2292
2293 /* Ensure that subtitle_header is properly set. */
2294 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2295 && codec && !st->codec->codec)
2296 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2297
2298 //try to just open decoders, in case this is enough to get parameters
2299 if(!has_codec_parameters(st->codec)){
2300 if (codec && !st->codec->codec)
2301 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2302 }
2303 }
2304
2305 for (i=0; i<ic->nb_streams; i++) {
2306 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2307 }
2308
2309 count = 0;
2310 read_size = 0;
2311 for(;;) {
2312 if(url_interrupt_cb()){
2313 ret= AVERROR_EXIT;
2314 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2315 break;
2316 }
2317
2318 /* check if one codec still needs to be handled */
2319 for(i=0;i<ic->nb_streams;i++) {
2320 int fps_analyze_framecount = 20;
2321
2322 st = ic->streams[i];
2323 if (!has_codec_parameters(st->codec))
2324 break;
2325 /* if the timebase is coarse (like the usual millisecond precision
2326 of mkv), we need to analyze more frames to reliably arrive at
2327 the correct fps */
2328 if (av_q2d(st->time_base) > 0.0005)
2329 fps_analyze_framecount *= 2;
2330 if (ic->fps_probe_size >= 0)
2331 fps_analyze_framecount = ic->fps_probe_size;
2332 /* variable fps and no guess at the real fps */
2333 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2334 && st->info->duration_count < fps_analyze_framecount
2335 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2336 break;
2337 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2338 break;
2339 if(st->first_dts == AV_NOPTS_VALUE)
2340 break;
2341 }
2342 if (i == ic->nb_streams) {
2343 /* NOTE: if the format has no header, then we need to read
2344 some packets to get most of the streams, so we cannot
2345 stop here */
2346 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2347 /* if we found the info for all the codecs, we can stop */
2348 ret = count;
2349 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2350 break;
2351 }
2352 }
2353 /* we did not get all the codec info, but we read too much data */
2354 if (read_size >= ic->probesize) {
2355 ret = count;
2356 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2357 break;
2358 }
2359
2360 /* NOTE: a new stream can be added there if no header in file
2361 (AVFMTCTX_NOHEADER) */
2362 ret = read_frame_internal(ic, &pkt1);
2363 if (ret == AVERROR(EAGAIN))
2364 continue;
2365
2366 if (ret < 0) {
2367 /* EOF or error */
2368 ret = -1; /* we could not have all the codec parameters before EOF */
2369 for(i=0;i<ic->nb_streams;i++) {
2370 st = ic->streams[i];
2371 if (!has_codec_parameters(st->codec)){
2372 char buf[256];
2373 avcodec_string(buf, sizeof(buf), st->codec, 0);
2374 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2375 } else {
2376 ret = 0;
2377 }
2378 }
2379 break;
2380 }
2381
2382 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2383 if ((ret = av_dup_packet(pkt)) < 0)
2384 goto find_stream_info_err;
2385
2386 read_size += pkt->size;
2387
2388 st = ic->streams[pkt->stream_index];
2389 if (st->codec_info_nb_frames>1) {
2390 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2391 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2392 break;
2393 }
2394 st->info->codec_info_duration += pkt->duration;
2395 }
2396 {
2397 int64_t last = st->info->last_dts;
2398
2399 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2400 int64_t duration= pkt->dts - last;
2401 double dur= duration * av_q2d(st->time_base);
2402
2403 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2404 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2405 if (st->info->duration_count < 2)
2406 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2407 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2408 int framerate= get_std_framerate(i);
2409 int ticks= lrintf(dur*framerate/(1001*12));
2410 double error= dur - ticks*1001*12/(double)framerate;
2411 st->info->duration_error[i] += error*error;
2412 }
2413 st->info->duration_count++;
2414 // ignore the first 4 values, they might have some random jitter
2415 if (st->info->duration_count > 3)
2416 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2417 }
2418 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2419 st->info->last_dts = pkt->dts;
2420 }
2421 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2422 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2423 if(i){
2424 st->codec->extradata_size= i;
2425 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2426 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2427 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2428 }
2429 }
2430
2431 /* if still no information, we try to open the codec and to
2432 decompress the frame. We try to avoid that in most cases as
2433 it takes longer and uses more memory. For MPEG-4, we need to
2434 decompress for QuickTime.
2435
2436 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2437 least one frame of codec data, this makes sure the codec initializes
2438 the channel configuration and does not only trust the values from the container.
2439 */
2440 try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL);
2441
2442 st->codec_info_nb_frames++;
2443 count++;
2444 }
2445
2446 // close codecs which were opened in try_decode_frame()
2447 for(i=0;i<ic->nb_streams;i++) {
2448 st = ic->streams[i];
2449 if(st->codec->codec)
2450 avcodec_close(st->codec);
2451 }
2452 for(i=0;i<ic->nb_streams;i++) {
2453 st = ic->streams[i];
2454 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2455 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2456 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2457 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2458 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2459 // the check for tb_unreliable() is not completely correct, since this is not about handling
2460 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2461 // ipmovie.c produces.
2462 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2463 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2464 if (st->info->duration_count && !st->r_frame_rate.num
2465 && tb_unreliable(st->codec) /*&&
2466 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2467 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2468 int num = 0;
2469 double best_error= 2*av_q2d(st->time_base);
2470 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2471
2472 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2473 double error = st->info->duration_error[j] * get_std_framerate(j);
2474 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2475 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2476 if(error < best_error){
2477 best_error= error;
2478 num = get_std_framerate(j);
2479 }
2480 }
2481 // do not increase frame rate by more than 1 % in order to match a standard rate.
2482 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2483 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2484 }
2485
2486 if (!st->r_frame_rate.num){
2487 if( st->codec->time_base.den * (int64_t)st->time_base.num
2488 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2489 st->r_frame_rate.num = st->codec->time_base.den;
2490 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2491 }else{
2492 st->r_frame_rate.num = st->time_base.den;
2493 st->r_frame_rate.den = st->time_base.num;
2494 }
2495 }
2496 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2497 if(!st->codec->bits_per_coded_sample)
2498 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2499 // set stream disposition based on audio service type
2500 switch (st->codec->audio_service_type) {
2501 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2502 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2503 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2504 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2505 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2506 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2507 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2508 st->disposition = AV_DISPOSITION_COMMENT; break;
2509 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2510 st->disposition = AV_DISPOSITION_KARAOKE; break;
2511 }
2512 }
2513 }
2514
2515 estimate_timings(ic, old_offset);
2516
2517 compute_chapters_end(ic);
2518
2519 #if 0
2520 /* correct DTS for B-frame streams with no timestamps */
2521 for(i=0;i<ic->nb_streams;i++) {
2522 st = ic->streams[i];
2523 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2524 if(b-frames){
2525 ppktl = &ic->packet_buffer;
2526 while(ppkt1){
2527 if(ppkt1->stream_index != i)
2528 continue;
2529 if(ppkt1->pkt->dts < 0)
2530 break;
2531 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2532 break;
2533 ppkt1->pkt->dts -= delta;
2534 ppkt1= ppkt1->next;
2535 }
2536 if(ppkt1)
2537 continue;
2538 st->cur_dts -= delta;
2539 }
2540 }
2541 }
2542 #endif
2543
2544 find_stream_info_err:
2545 for (i=0; i < ic->nb_streams; i++)
2546 av_freep(&ic->streams[i]->info);
2547 return ret;
2548 }
2549
2550 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2551 {
2552 int i, j;
2553
2554 for (i = 0; i < ic->nb_programs; i++)
2555 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2556 if (ic->programs[i]->stream_index[j] == s)
2557 return ic->programs[i];
2558 return NULL;
2559 }
2560
2561 int av_find_best_stream(AVFormatContext *ic,
2562 enum AVMediaType type,
2563 int wanted_stream_nb,
2564 int related_stream,
2565 AVCodec **decoder_ret,
2566 int flags)
2567 {
2568 int i, nb_streams = ic->nb_streams;
2569 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2570 unsigned *program = NULL;
2571 AVCodec *decoder = NULL, *best_decoder = NULL;
2572
2573 if (related_stream >= 0 && wanted_stream_nb < 0) {
2574 AVProgram *p = find_program_from_stream(ic, related_stream);
2575 if (p) {
2576 program = p->stream_index;
2577 nb_streams = p->nb_stream_indexes;
2578 }
2579 }
2580 for (i = 0; i < nb_streams; i++) {
2581 int real_stream_index = program ? program[i] : i;
2582 AVStream *st = ic->streams[real_stream_index];
2583 AVCodecContext *avctx = st->codec;
2584 if (avctx->codec_type != type)
2585 continue;
2586 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2587 continue;
2588 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2589 continue;
2590 if (decoder_ret) {
2591 decoder = avcodec_find_decoder(st->codec->codec_id);
2592 if (!decoder) {
2593 if (ret < 0)
2594 ret = AVERROR_DECODER_NOT_FOUND;
2595 continue;
2596 }
2597 }
2598 if (best_count >= st->codec_info_nb_frames)
2599 continue;
2600 best_count = st->codec_info_nb_frames;
2601 ret = real_stream_index;
2602 best_decoder = decoder;
2603 if (program && i == nb_streams - 1 && ret < 0) {
2604 program = NULL;
2605 nb_streams = ic->nb_streams;
2606 i = 0; /* no related stream found, try again with everything */
2607 }
2608 }
2609 if (decoder_ret)
2610 *decoder_ret = best_decoder;
2611 return ret;
2612 }
2613
2614 /*******************************************************/
2615
2616 int av_read_play(AVFormatContext *s)
2617 {
2618 if (s->iformat->read_play)
2619 return s->iformat->read_play(s);
2620 if (s->pb)
2621 return avio_pause(s->pb, 0);
2622 return AVERROR(ENOSYS);
2623 }
2624
2625 int av_read_pause(AVFormatContext *s)
2626 {
2627 if (s->iformat->read_pause)
2628 return s->iformat->read_pause(s);
2629 if (s->pb)
2630 return avio_pause(s->pb, 1);
2631 return AVERROR(ENOSYS);
2632 }
2633
2634 void av_close_input_stream(AVFormatContext *s)
2635 {
2636 flush_packet_queue(s);
2637 if (s->iformat->read_close)
2638 s->iformat->read_close(s);
2639 avformat_free_context(s);
2640 }
2641
2642 void avformat_free_context(AVFormatContext *s)
2643 {
2644 int i;
2645 AVStream *st;
2646
2647 av_opt_free(s);
2648 if (s->iformat && s->iformat->priv_class && s->priv_data)
2649 av_opt_free(s->priv_data);
2650
2651 for(i=0;i<s->nb_streams;i++) {
2652 /* free all data in a stream component */
2653 st = s->streams[i];
2654 if (st->parser) {
2655 av_parser_close(st->parser);
2656 av_free_packet(&st->cur_pkt);
2657 }
2658 av_dict_free(&st->metadata);
2659 av_free(st->index_entries);
2660 av_free(st->codec->extradata);
2661 av_free(st->codec->subtitle_header);
2662 av_free(st->codec);
2663 av_free(st->priv_data);
2664 av_free(st->info);
2665 av_free(st);
2666 }
2667 for(i=s->nb_programs-1; i>=0; i--) {
2668 av_dict_free(&s->programs[i]->metadata);
2669 av_freep(&s->programs[i]->stream_index);
2670 av_freep(&s->programs[i]);
2671 }
2672 av_freep(&s->programs);
2673 av_freep(&s->priv_data);
2674 while(s->nb_chapters--) {
2675 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2676 av_free(s->chapters[s->nb_chapters]);
2677 }
2678 av_freep(&s->chapters);
2679 av_dict_free(&s->metadata);
2680 av_freep(&s->streams);
2681 av_free(s);
2682 }
2683
2684 void av_close_input_file(AVFormatContext *s)
2685 {
2686 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2687 NULL : s->pb;
2688 av_close_input_stream(s);
2689 if (pb)
2690 avio_close(pb);
2691 }
2692
2693 #if FF_API_NEW_STREAM
2694 AVStream *av_new_stream(AVFormatContext *s, int id)
2695 {
2696 AVStream *st = avformat_new_stream(s, NULL);
2697 if (st)
2698 st->id = id;
2699 return st;
2700 }
2701 #endif
2702
2703 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2704 {
2705 AVStream *st;
2706 int i;
2707 AVStream **streams;
2708
2709 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2710 return NULL;
2711 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2712 if (!streams)
2713 return NULL;
2714 s->streams = streams;
2715
2716 st = av_mallocz(sizeof(AVStream));
2717 if (!st)
2718 return NULL;
2719 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2720 av_free(st);
2721 return NULL;
2722 }
2723
2724 st->codec = avcodec_alloc_context3(c);
2725 if (s->iformat) {
2726 /* no default bitrate if decoding */
2727 st->codec->bit_rate = 0;
2728 }
2729 st->index = s->nb_streams;
2730 st->start_time = AV_NOPTS_VALUE;
2731 st->duration = AV_NOPTS_VALUE;
2732 /* we set the current DTS to 0 so that formats without any timestamps
2733 but durations get some timestamps, formats with some unknown
2734 timestamps have their first few packets buffered and the
2735 timestamps corrected before they are returned to the user */
2736 st->cur_dts = 0;
2737 st->first_dts = AV_NOPTS_VALUE;
2738 st->probe_packets = MAX_PROBE_PACKETS;
2739
2740 /* default pts setting is MPEG-like */
2741 av_set_pts_info(st, 33, 1, 90000);
2742 st->last_IP_pts = AV_NOPTS_VALUE;
2743 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2744 st->pts_buffer[i]= AV_NOPTS_VALUE;
2745 st->reference_dts = AV_NOPTS_VALUE;
2746
2747 st->sample_aspect_ratio = (AVRational){0,1};
2748
2749 s->streams[s->nb_streams++] = st;
2750 return st;
2751 }
2752
2753 AVProgram *av_new_program(AVFormatContext *ac, int id)
2754 {
2755 AVProgram *program=NULL;
2756 int i;
2757
2758 av_dlog(ac, "new_program: id=0x%04x\n", id);
2759
2760 for(i=0; i<ac->nb_programs; i++)
2761 if(ac->programs[i]->id == id)
2762 program = ac->programs[i];
2763
2764 if(!program){
2765 program = av_mallocz(sizeof(AVProgram));
2766 if (!program)
2767 return NULL;
2768 dynarray_add(&ac->programs, &ac->nb_programs, program);
2769 program->discard = AVDISCARD_NONE;
2770 }
2771 program->id = id;
2772
2773 return program;
2774 }
2775
2776 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2777 {
2778 AVChapter *chapter = NULL;
2779 int i;
2780
2781 for(i=0; i<s->nb_chapters; i++)
2782 if(s->chapters[i]->id == id)
2783 chapter = s->chapters[i];
2784
2785 if(!chapter){
2786 chapter= av_mallocz(sizeof(AVChapter));
2787 if(!chapter)
2788 return NULL;
2789 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2790 }
2791 av_dict_set(&chapter->metadata, "title", title, 0);
2792 chapter->id = id;
2793 chapter->time_base= time_base;
2794 chapter->start = start;
2795 chapter->end = end;
2796
2797 return chapter;
2798 }
2799
2800 /************************************************************/
2801 /* output media file */
2802
2803 #if FF_API_FORMAT_PARAMETERS
2804 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2805 {
2806 int ret;
2807
2808 if (s->oformat->priv_data_size > 0) {
2809 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2810 if (!s->priv_data)
2811 return AVERROR(ENOMEM);
2812 if (s->oformat->priv_class) {
2813 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2814 av_opt_set_defaults(s->priv_data);
2815 }
2816 } else
2817 s->priv_data = NULL;
2818
2819 if (s->oformat->set_parameters) {
2820 ret = s->oformat->set_parameters(s, ap);
2821 if (ret < 0)
2822 return ret;
2823 }
2824 return 0;
2825 }
2826 #endif
2827
2828 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2829 {
2830 const AVCodecTag *avctag;
2831 int n;
2832 enum CodecID id = CODEC_ID_NONE;
2833 unsigned int tag = 0;
2834
2835 /**
2836 * Check that tag + id is in the table
2837 * If neither is in the table -> OK
2838 * If tag is in the table with another id -> FAIL
2839 * If id is in the table with another tag -> FAIL unless strict < normal
2840 */
2841 for (n = 0; s->oformat->codec_tag[n]; n++) {
2842 avctag = s->oformat->codec_tag[n];
2843 while (avctag->id != CODEC_ID_NONE) {
2844 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
2845 id = avctag->id;
2846 if (id == st->codec->codec_id)
2847 return 1;
2848 }
2849 if (avctag->id == st->codec->codec_id)
2850 tag = avctag->tag;
2851 avctag++;
2852 }
2853 }
2854 if (id != CODEC_ID_NONE)
2855 return 0;
2856 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2857 return 0;
2858 return 1;
2859 }
2860
2861 #if FF_API_FORMAT_PARAMETERS
2862 int av_write_header(AVFormatContext *s)
2863 {
2864 return avformat_write_header(s, NULL);
2865 }
2866 #endif
2867
2868 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
2869 {
2870 int ret = 0, i;
2871 AVStream *st;
2872 AVDictionary *tmp = NULL;
2873
2874 if (options)
2875 av_dict_copy(&tmp, *options, 0);
2876 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
2877 goto fail;
2878
2879 // some sanity checks
2880 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2881 av_log(s, AV_LOG_ERROR, "no streams\n");
2882 ret = AVERROR(EINVAL);
2883 goto fail;
2884 }
2885
2886 for(i=0;i<s->nb_streams;i++) {
2887 st = s->streams[i];
2888
2889 switch (st->codec->codec_type) {
2890 case AVMEDIA_TYPE_AUDIO:
2891 if(st->codec->sample_rate<=0){
2892 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2893 ret = AVERROR(EINVAL);
2894 goto fail;
2895 }
2896 if(!st->codec->block_align)
2897 st->codec->block_align = st->codec->channels *
2898 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2899 break;
2900 case AVMEDIA_TYPE_VIDEO:
2901 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2902 av_log(s, AV_LOG_ERROR, "time base not set\n");
2903 ret = AVERROR(EINVAL);
2904 goto fail;
2905 }
2906 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2907 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2908 ret = AVERROR(EINVAL);
2909 goto fail;
2910 }
2911 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2912 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2913 ret = AVERROR(EINVAL);
2914 goto fail;
2915 }
2916 break;
2917 }
2918
2919 if(s->oformat->codec_tag){
2920 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2921 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2922 st->codec->codec_tag= 0;
2923 }
2924 if(st->codec->codec_tag){
2925 if (!validate_codec_tag(s, st)) {
2926 char tagbuf[32];
2927 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2928 av_log(s, AV_LOG_ERROR,
2929 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2930 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2931 ret = AVERROR_INVALIDDATA;
2932 goto fail;
2933 }
2934 }else
2935 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2936 }
2937
2938 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2939 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2940 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2941 }
2942
2943 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2944 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2945 if (!s->priv_data) {
2946 ret = AVERROR(ENOMEM);
2947 goto fail;
2948 }
2949 if (s->oformat->priv_class) {
2950 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2951 av_opt_set_defaults(s->priv_data);
2952 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
2953 goto fail;
2954 }
2955 }
2956
2957 /* set muxer identification string */
2958 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2959 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2960 }
2961
2962 if(s->oformat->write_header){
2963 ret = s->oformat->write_header(s);
2964 if (ret < 0)
2965 goto fail;
2966 }
2967
2968 /* init PTS generation */
2969 for(i=0;i<s->nb_streams;i++) {
2970 int64_t den = AV_NOPTS_VALUE;
2971 st = s->streams[i];
2972
2973 switch (st->codec->codec_type) {
2974 case AVMEDIA_TYPE_AUDIO:
2975 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2976 break;
2977 case AVMEDIA_TYPE_VIDEO:
2978 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2979 break;
2980 default:
2981 break;
2982 }
2983 if (den != AV_NOPTS_VALUE) {
2984 if (den <= 0) {
2985 ret = AVERROR_INVALIDDATA;
2986 goto fail;
2987 }
2988 frac_init(&st->pts, 0, 0, den);
2989 }
2990 }
2991
2992 if (options) {
2993 av_dict_free(options);
2994 *options = tmp;
2995 }
2996 return 0;
2997 fail:
2998 av_dict_free(&tmp);
2999 return ret;
3000 }
3001
3002 //FIXME merge with compute_pkt_fields
3003 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3004 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3005 int num, den, frame_size, i;
3006
3007 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
3008 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
3009
3010 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
3011 return AVERROR(EINVAL);*/
3012
3013 /* duration field */
3014 if (pkt->duration == 0) {
3015 compute_frame_duration(&num, &den, st, NULL, pkt);
3016 if (den && num) {
3017 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3018 }
3019 }
3020
3021 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3022 pkt->pts= pkt->dts;
3023
3024 //XXX/FIXME this is a temporary hack until all encoders output pts
3025 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3026 pkt->dts=
3027 // pkt->pts= st->cur_dts;
3028 pkt->pts= st->pts.val;
3029 }
3030
3031 //calculate dts from pts
3032 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3033 st->pts_buffer[0]= pkt->pts;
3034 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3035 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3036 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3037 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3038
3039 pkt->dts= st->pts_buffer[0];
3040 }
3041
3042 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
3043 av_log(s, AV_LOG_ERROR,
3044 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
3045 st->index, st->cur_dts, pkt->dts);
3046 return AVERROR(EINVAL);
3047 }
3048 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3049 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
3050 return AVERROR(EINVAL);
3051 }
3052
3053 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
3054 st->cur_dts= pkt->dts;
3055 st->pts.val= pkt->dts;
3056
3057 /* update pts */
3058 switch (st->codec->codec_type) {
3059 case AVMEDIA_TYPE_AUDIO:
3060 frame_size = get_audio_frame_size(st->codec, pkt->size);
3061
3062 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3063 likely equal to the encoder delay, but it would be better if we
3064 had the real timestamps from the encoder */
3065 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3066 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3067 }
3068 break;
3069 case AVMEDIA_TYPE_VIDEO:
3070 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3071 break;
3072 default:
3073 break;
3074 }
3075 return 0;
3076 }
3077
3078 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3079 {
3080 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3081
3082 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3083 return ret;
3084
3085 ret= s->oformat->write_packet(s, pkt);
3086
3087 if (ret >= 0)
3088 s->streams[pkt->stream_index]->nb_frames++;
3089 return ret;
3090 }
3091
3092 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3093 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3094 {
3095 AVPacketList **next_point, *this_pktl;
3096
3097 this_pktl = av_mallocz(sizeof(AVPacketList));
3098 this_pktl->pkt= *pkt;
3099 pkt->destruct= NULL; // do not free original but only the copy
3100 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3101
3102 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3103 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3104 }else
3105 next_point = &s->packet_buffer;
3106
3107 if(*next_point){
3108 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3109 while(!compare(s, &(*next_point)->pkt, pkt)){
3110 next_point= &(*next_point)->next;
3111 }
3112 goto next_non_null;
3113 }else{
3114 next_point = &(s->packet_buffer_end->next);
3115 }
3116 }
3117 assert(!*next_point);
3118
3119 s->packet_buffer_end= this_pktl;
3120 next_non_null:
3121
3122 this_pktl->next= *next_point;
3123
3124 s->streams[pkt->stream_index]->last_in_packet_buffer=
3125 *next_point= this_pktl;
3126 }
3127
3128 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3129 {
3130 AVStream *st = s->streams[ pkt ->stream_index];
3131 AVStream *st2= s->streams[ next->stream_index];
3132 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3133 st->time_base);
3134
3135 if (comp == 0)
3136 return pkt->stream_index < next->stream_index;
3137 return comp > 0;
3138 }
3139
3140 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3141 AVPacketList *pktl;
3142 int stream_count=0;
3143 int i;
3144
3145 if(pkt){
3146 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3147 }
3148
3149 for(i=0; i < s->nb_streams; i++)
3150 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3151
3152 if(stream_count && (s->nb_streams == stream_count || flush)){
3153 pktl= s->packet_buffer;
3154 *out= pktl->pkt;
3155
3156 s->packet_buffer= pktl->next;
3157 if(!s->packet_buffer)
3158 s->packet_buffer_end= NULL;
3159
3160 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3161 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3162 av_freep(&pktl);
3163 return 1;
3164 }else{
3165 av_init_packet(out);
3166 return 0;
3167 }
3168 }
3169
3170 /**
3171 * Interleave an AVPacket correctly so it can be muxed.
3172 * @param out the interleaved packet will be output here
3173 * @param in the input packet
3174 * @param flush 1 if no further packets are available as input and all
3175 * remaining packets should be output
3176 * @return 1 if a packet was output, 0 if no packet could be output,
3177 * < 0 if an error occurred
3178 */
3179 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3180 if(s->oformat->interleave_packet)
3181 return s->oformat->interleave_packet(s, out, in, flush);
3182 else
3183 return av_interleave_packet_per_dts(s, out, in, flush);
3184 }
3185
3186 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3187 AVStream *st= s->streams[ pkt->stream_index];
3188 int ret;
3189
3190 //FIXME/XXX/HACK drop zero sized packets
3191 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3192 return 0;
3193
3194 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3195 pkt->size, pkt->dts, pkt->pts);
3196 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3197 return ret;
3198
3199 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3200 return AVERROR(EINVAL);
3201
3202 for(;;){
3203 AVPacket opkt;
3204 int ret= interleave_packet(s, &opkt, pkt, 0);
3205 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3206 return ret;
3207
3208 ret= s->oformat->write_packet(s, &opkt);
3209 if (ret >= 0)
3210 s->streams[opkt.stream_index]->nb_frames++;
3211
3212 av_free_packet(&opkt);
3213 pkt= NULL;
3214
3215 if(ret<0)
3216 return ret;
3217 }
3218 }
3219
3220 int av_write_trailer(AVFormatContext *s)
3221 {
3222 int ret, i;
3223
3224 for(;;){
3225 AVPacket pkt;
3226 ret= interleave_packet(s, &pkt, NULL, 1);
3227 if(ret<0) //FIXME cleanup needed for ret<0 ?
3228 goto fail;
3229 if(!ret)
3230 break;
3231
3232 ret= s->oformat->write_packet(s, &pkt);
3233 if (ret >= 0)
3234 s->streams[pkt.stream_index]->nb_frames++;
3235
3236 av_free_packet(&pkt);
3237
3238 if(ret<0)
3239 goto fail;
3240 }
3241
3242 if(s->oformat->write_trailer)
3243 ret = s->oformat->write_trailer(s);
3244 fail:
3245 for(i=0;i<s->nb_streams;i++) {
3246 av_freep(&s->streams[i]->priv_data);
3247 av_freep(&s->streams[i]->index_entries);
3248 }
3249 if (s->iformat && s->iformat->priv_class)
3250 av_opt_free(s->priv_data);
3251 av_freep(&s->priv_data);
3252 return ret;
3253 }
3254
3255 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3256 {
3257 int i, j;
3258 AVProgram *program=NULL;
3259 void *tmp;
3260
3261 if (idx >= ac->nb_streams) {
3262 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3263 return;
3264 }
3265
3266 for(i=0; i<ac->nb_programs; i++){
3267 if(ac->programs[i]->id != progid)
3268 continue;
3269 program = ac->programs[i];
3270 for(j=0; j<program->nb_stream_indexes; j++)
3271 if(program->stream_index[j] == idx)
3272 return;
3273
3274 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3275 if(!tmp)
3276 return;
3277 program->stream_index = tmp;
3278 program->stream_index[program->nb_stream_indexes++] = idx;
3279 return;
3280 }
3281 }
3282
3283 static void print_fps(double d, const char *postfix){
3284 uint64_t v= lrintf(d*100);
3285 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3286 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3287 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3288 }
3289
3290 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3291 {
3292 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3293 AVDictionaryEntry *tag=NULL;
3294
3295 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3296 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3297 if(strcmp("language", tag->key))
3298 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3299 }
3300 }
3301 }
3302
3303 /* "user interface" functions */
3304 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3305 {
3306 char buf[256];
3307 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3308 AVStream *st = ic->streams[i];
3309 int g = av_gcd(st->time_base.num, st->time_base.den);
3310 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3311 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3312 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3313 /* the pid is an important information, so we display it */
3314 /* XXX: add a generic system */
3315 if (flags & AVFMT_SHOW_IDS)
3316 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3317 if (lang)
3318 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3319 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3320 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3321 if (st->sample_aspect_ratio.num && // default
3322 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3323 AVRational display_aspect_ratio;
3324 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3325 st->codec->width*st->sample_aspect_ratio.num,
3326 st->codec->height*st->sample_aspect_ratio.den,
3327 1024*1024);
3328 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3329 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3330 display_aspect_ratio.num, display_aspect_ratio.den);
3331 }
3332 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3333 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3334 print_fps(av_q2d(st->avg_frame_rate), "fps");
3335 if(st->r_frame_rate.den && st->r_frame_rate.num)
3336 print_fps(av_q2d(st->r_frame_rate), "tbr");
3337 if(st->time_base.den && st->time_base.num)
3338 print_fps(1/av_q2d(st->time_base), "tbn");
3339 if(st->codec->time_base.den && st->codec->time_base.num)
3340 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3341 }
3342 if (st->disposition & AV_DISPOSITION_DEFAULT)
3343 av_log(NULL, AV_LOG_INFO, " (default)");
3344 if (st->disposition & AV_DISPOSITION_DUB)
3345 av_log(NULL, AV_LOG_INFO, " (dub)");
3346 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3347 av_log(NULL, AV_LOG_INFO, " (original)");
3348 if (st->disposition & AV_DISPOSITION_COMMENT)
3349 av_log(NULL, AV_LOG_INFO, " (comment)");
3350 if (st->disposition & AV_DISPOSITION_LYRICS)
3351 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3352 if (st->disposition & AV_DISPOSITION_KARAOKE)
3353 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3354 if (st->disposition & AV_DISPOSITION_FORCED)
3355 av_log(NULL, AV_LOG_INFO, " (forced)");
3356 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3357 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3358 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3359 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3360 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3361 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3362 av_log(NULL, AV_LOG_INFO, "\n");
3363 dump_metadata(NULL, st->metadata, " ");
3364 }
3365
3366 #if FF_API_DUMP_FORMAT
3367 void dump_format(AVFormatContext *ic,
3368 int index,
3369 const char *url,
3370 int is_output)
3371 {
3372 av_dump_format(ic, index, url, is_output);
3373 }
3374 #endif
3375
3376 void av_dump_format(AVFormatContext *ic,
3377 int index,
3378 const char *url,
3379 int is_output)
3380 {
3381 int i;
3382 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3383 if (ic->nb_streams && !printed)
3384 return;
3385
3386 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3387 is_output ? "Output" : "Input",
3388 index,
3389 is_output ? ic->oformat->name : ic->iformat->name,
3390 is_output ? "to" : "from", url);
3391 dump_metadata(NULL, ic->metadata, " ");
3392 if (!is_output) {
3393 av_log(NULL, AV_LOG_INFO, " Duration: ");
3394 if (ic->duration != AV_NOPTS_VALUE) {
3395 int hours, mins, secs, us;
3396 secs = ic->duration / AV_TIME_BASE;
3397 us = ic->duration % AV_TIME_BASE;
3398 mins = secs / 60;
3399 secs %= 60;
3400 hours = mins / 60;
3401 mins %= 60;
3402 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3403 (100 * us) / AV_TIME_BASE);
3404 } else {
3405 av_log(NULL, AV_LOG_INFO, "N/A");
3406 }
3407 if (ic->start_time != AV_NOPTS_VALUE) {
3408 int secs, us;
3409 av_log(NULL, AV_LOG_INFO, ", start: ");
3410 secs = ic->start_time / AV_TIME_BASE;
3411 us = abs(ic->start_time % AV_TIME_BASE);
3412 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3413 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3414 }
3415 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3416 if (ic->bit_rate) {
3417 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3418 } else {
3419 av_log(NULL, AV_LOG_INFO, "N/A");
3420 }
3421 av_log(NULL, AV_LOG_INFO, "\n");
3422 }
3423 for (i = 0; i < ic->nb_chapters; i++) {
3424 AVChapter *ch = ic->chapters[i];
3425 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3426 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3427 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3428
3429 dump_metadata(NULL, ch->metadata, " ");
3430 }
3431 if(ic->nb_programs) {
3432 int j, k, total = 0;
3433 for(j=0; j<ic->nb_programs; j++) {
3434 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3435 "name", NULL, 0);
3436 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3437 name ? name->value : "");
3438 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3439 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3440 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3441 printed[ic->programs[j]->stream_index[k]] = 1;
3442 }
3443 total += ic->programs[j]->nb_stream_indexes;
3444 }
3445 if (total < ic->nb_streams)
3446 av_log(NULL, AV_LOG_INFO, " No Program\n");
3447 }
3448 for(i=0;i<ic->nb_streams;i++)
3449 if (!printed[i])
3450 dump_stream_format(ic, i, index, is_output);
3451
3452 av_free(printed);
3453 }
3454
3455 int64_t av_gettime(void)
3456 {
3457 struct timeval tv;
3458 gettimeofday(&tv,NULL);
3459 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3460 }
3461
3462 uint64_t ff_ntp_time(void)
3463 {
3464 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3465 }
3466
3467 #if FF_API_PARSE_DATE
3468 #include "libavutil/parseutils.h"
3469
3470 int64_t parse_date(const char *timestr, int duration)
3471 {
3472 int64_t timeval;
3473 av_parse_time(&timeval, timestr, duration);
3474 return timeval;
3475 }
3476 #endif
3477
3478 #if FF_API_FIND_INFO_TAG
3479 #include "libavutil/parseutils.h"
3480
3481 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3482 {
3483 return av_find_info_tag(arg, arg_size, tag1, info);
3484 }
3485 #endif
3486
3487 int av_get_frame_filename(char *buf, int buf_size,
3488 const char *path, int number)
3489 {
3490 const char *p;
3491 char *q, buf1[20], c;
3492 int nd, len, percentd_found;
3493
3494 q = buf;
3495 p = path;
3496 percentd_found = 0;
3497 for(;;) {
3498 c = *p++;
3499 if (c == '\0')
3500 break;
3501 if (c == '%') {
3502 do {
3503 nd = 0;
3504 while (isdigit(*p)) {
3505 nd = nd * 10 + *p++ - '0';
3506 }
3507 c = *p++;
3508 } while (isdigit(c));
3509
3510 switch(c) {
3511 case '%':
3512 goto addchar;
3513 case 'd':
3514 if (percentd_found)
3515 goto fail;
3516 percentd_found = 1;
3517 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3518 len = strlen(buf1);
3519 if ((q - buf + len) > buf_size - 1)
3520 goto fail;
3521 memcpy(q, buf1, len);
3522 q += len;
3523 break;
3524 default:
3525 goto fail;
3526 }
3527 } else {
3528 addchar:
3529 if ((q - buf) < buf_size - 1)
3530 *q++ = c;
3531 }
3532 }
3533 if (!percentd_found)
3534 goto fail;
3535 *q = '\0';
3536 return 0;
3537 fail:
3538 *q = '\0';
3539 return -1;
3540 }
3541
3542 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3543 {
3544 int len, i, j, c;
3545 #undef fprintf
3546 #define PRINT(...) do { if (!f)