avformat: Interpret times in ff_iso8601_to_unix_time as UTC
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* #define DEBUG */
23
24 #include "avformat.h"
25 #include "avio_internal.h"
26 #include "internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/pixdesc.h"
31 #include "metadata.h"
32 #include "id3v2.h"
33 #include "libavutil/avstring.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/parseutils.h"
36 #include "riff.h"
37 #include "audiointerleave.h"
38 #include "url.h"
39 #include <sys/time.h>
40 #include <time.h>
41 #include <stdarg.h>
42 #if CONFIG_NETWORK
43 #include "network.h"
44 #endif
45
46 #undef NDEBUG
47 #include <assert.h>
48
49 /**
50 * @file
51 * various utility functions for use within Libav
52 */
53
54 unsigned avformat_version(void)
55 {
56 return LIBAVFORMAT_VERSION_INT;
57 }
58
59 const char *avformat_configuration(void)
60 {
61 return LIBAV_CONFIGURATION;
62 }
63
64 const char *avformat_license(void)
65 {
66 #define LICENSE_PREFIX "libavformat license: "
67 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
68 }
69
70 /* fraction handling */
71
72 /**
73 * f = val + (num / den) + 0.5.
74 *
75 * 'num' is normalized so that it is such as 0 <= num < den.
76 *
77 * @param f fractional number
78 * @param val integer value
79 * @param num must be >= 0
80 * @param den must be >= 1
81 */
82 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
83 {
84 num += (den >> 1);
85 if (num >= den) {
86 val += num / den;
87 num = num % den;
88 }
89 f->val = val;
90 f->num = num;
91 f->den = den;
92 }
93
94 /**
95 * Fractional addition to f: f = f + (incr / f->den).
96 *
97 * @param f fractional number
98 * @param incr increment, can be positive or negative
99 */
100 static void frac_add(AVFrac *f, int64_t incr)
101 {
102 int64_t num, den;
103
104 num = f->num + incr;
105 den = f->den;
106 if (num < 0) {
107 f->val += num / den;
108 num = num % den;
109 if (num < 0) {
110 num += den;
111 f->val--;
112 }
113 } else if (num >= den) {
114 f->val += num / den;
115 num = num % den;
116 }
117 f->num = num;
118 }
119
120 /** head of registered input format linked list */
121 static AVInputFormat *first_iformat = NULL;
122 /** head of registered output format linked list */
123 static AVOutputFormat *first_oformat = NULL;
124
125 AVInputFormat *av_iformat_next(AVInputFormat *f)
126 {
127 if(f) return f->next;
128 else return first_iformat;
129 }
130
131 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
132 {
133 if(f) return f->next;
134 else return first_oformat;
135 }
136
137 void av_register_input_format(AVInputFormat *format)
138 {
139 AVInputFormat **p;
140 p = &first_iformat;
141 while (*p != NULL) p = &(*p)->next;
142 *p = format;
143 format->next = NULL;
144 }
145
146 void av_register_output_format(AVOutputFormat *format)
147 {
148 AVOutputFormat **p;
149 p = &first_oformat;
150 while (*p != NULL) p = &(*p)->next;
151 *p = format;
152 format->next = NULL;
153 }
154
155 int av_match_ext(const char *filename, const char *extensions)
156 {
157 const char *ext, *p;
158 char ext1[32], *q;
159
160 if(!filename)
161 return 0;
162
163 ext = strrchr(filename, '.');
164 if (ext) {
165 ext++;
166 p = extensions;
167 for(;;) {
168 q = ext1;
169 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
170 *q++ = *p++;
171 *q = '\0';
172 if (!av_strcasecmp(ext1, ext))
173 return 1;
174 if (*p == '\0')
175 break;
176 p++;
177 }
178 }
179 return 0;
180 }
181
182 static int match_format(const char *name, const char *names)
183 {
184 const char *p;
185 int len, namelen;
186
187 if (!name || !names)
188 return 0;
189
190 namelen = strlen(name);
191 while ((p = strchr(names, ','))) {
192 len = FFMAX(p - names, namelen);
193 if (!av_strncasecmp(name, names, len))
194 return 1;
195 names = p+1;
196 }
197 return !av_strcasecmp(name, names);
198 }
199
200 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
201 const char *mime_type)
202 {
203 AVOutputFormat *fmt = NULL, *fmt_found;
204 int score_max, score;
205
206 /* specific test for image sequences */
207 #if CONFIG_IMAGE2_MUXER
208 if (!short_name && filename &&
209 av_filename_number_test(filename) &&
210 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
211 return av_guess_format("image2", NULL, NULL);
212 }
213 #endif
214 /* Find the proper file type. */
215 fmt_found = NULL;
216 score_max = 0;
217 while ((fmt = av_oformat_next(fmt))) {
218 score = 0;
219 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
220 score += 100;
221 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
222 score += 10;
223 if (filename && fmt->extensions &&
224 av_match_ext(filename, fmt->extensions)) {
225 score += 5;
226 }
227 if (score > score_max) {
228 score_max = score;
229 fmt_found = fmt;
230 }
231 }
232 return fmt_found;
233 }
234
235 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
236 const char *filename, const char *mime_type, enum AVMediaType type){
237 if(type == AVMEDIA_TYPE_VIDEO){
238 enum CodecID codec_id= CODEC_ID_NONE;
239
240 #if CONFIG_IMAGE2_MUXER
241 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
242 codec_id= ff_guess_image2_codec(filename);
243 }
244 #endif
245 if(codec_id == CODEC_ID_NONE)
246 codec_id= fmt->video_codec;
247 return codec_id;
248 }else if(type == AVMEDIA_TYPE_AUDIO)
249 return fmt->audio_codec;
250 else if (type == AVMEDIA_TYPE_SUBTITLE)
251 return fmt->subtitle_codec;
252 else
253 return CODEC_ID_NONE;
254 }
255
256 AVInputFormat *av_find_input_format(const char *short_name)
257 {
258 AVInputFormat *fmt = NULL;
259 while ((fmt = av_iformat_next(fmt))) {
260 if (match_format(short_name, fmt->name))
261 return fmt;
262 }
263 return NULL;
264 }
265
266
267 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
268 {
269 int ret= av_new_packet(pkt, size);
270
271 if(ret<0)
272 return ret;
273
274 pkt->pos= avio_tell(s);
275
276 ret= avio_read(s, pkt->data, size);
277 if(ret<=0)
278 av_free_packet(pkt);
279 else
280 av_shrink_packet(pkt, ret);
281
282 return ret;
283 }
284
285 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
286 {
287 int ret;
288 int old_size;
289 if (!pkt->size)
290 return av_get_packet(s, pkt, size);
291 old_size = pkt->size;
292 ret = av_grow_packet(pkt, size);
293 if (ret < 0)
294 return ret;
295 ret = avio_read(s, pkt->data + old_size, size);
296 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
297 return ret;
298 }
299
300
301 int av_filename_number_test(const char *filename)
302 {
303 char buf[1024];
304 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
305 }
306
307 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
308 {
309 AVProbeData lpd = *pd;
310 AVInputFormat *fmt1 = NULL, *fmt;
311 int score, id3 = 0;
312
313 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
314 int id3len = ff_id3v2_tag_len(lpd.buf);
315 if (lpd.buf_size > id3len + 16) {
316 lpd.buf += id3len;
317 lpd.buf_size -= id3len;
318 }
319 id3 = 1;
320 }
321
322 fmt = NULL;
323 while ((fmt1 = av_iformat_next(fmt1))) {
324 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
325 continue;
326 score = 0;
327 if (fmt1->read_probe) {
328 score = fmt1->read_probe(&lpd);
329 } else if (fmt1->extensions) {
330 if (av_match_ext(lpd.filename, fmt1->extensions)) {
331 score = 50;
332 }
333 }
334 if (score > *score_max) {
335 *score_max = score;
336 fmt = fmt1;
337 }else if (score == *score_max)
338 fmt = NULL;
339 }
340
341 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
342 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_MAX/4) {
343 while ((fmt = av_iformat_next(fmt)))
344 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
345 *score_max = AVPROBE_SCORE_MAX/4;
346 break;
347 }
348 }
349
350 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4-1) {
351 while ((fmt = av_iformat_next(fmt)))
352 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
353 *score_max = AVPROBE_SCORE_MAX/4-1;
354 break;
355 }
356 }
357
358 return fmt;
359 }
360
361 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
362 int score=0;
363 return av_probe_input_format2(pd, is_opened, &score);
364 }
365
366 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
367 {
368 static const struct {
369 const char *name; enum CodecID id; enum AVMediaType type;
370 } fmt_id_type[] = {
371 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
372 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
373 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
374 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
375 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
376 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
377 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
378 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
379 { 0 }
380 };
381 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
382
383 if (fmt) {
384 int i;
385 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
386 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
387 for (i = 0; fmt_id_type[i].name; i++) {
388 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
389 st->codec->codec_id = fmt_id_type[i].id;
390 st->codec->codec_type = fmt_id_type[i].type;
391 break;
392 }
393 }
394 }
395 return !!fmt;
396 }
397
398 /************************************************************/
399 /* input media file */
400
401 #if FF_API_FORMAT_PARAMETERS
402 static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
403 {
404 char buf[1024];
405 AVDictionary *opts = NULL;
406
407 if (!ap)
408 return NULL;
409
410 if (ap->time_base.num) {
411 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
412 av_dict_set(&opts, "framerate", buf, 0);
413 }
414 if (ap->sample_rate) {
415 snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
416 av_dict_set(&opts, "sample_rate", buf, 0);
417 }
418 if (ap->channels) {
419 snprintf(buf, sizeof(buf), "%d", ap->channels);
420 av_dict_set(&opts, "channels", buf, 0);
421 }
422 if (ap->width || ap->height) {
423 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
424 av_dict_set(&opts, "video_size", buf, 0);
425 }
426 if (ap->pix_fmt != PIX_FMT_NONE) {
427 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
428 }
429 if (ap->channel) {
430 snprintf(buf, sizeof(buf), "%d", ap->channel);
431 av_dict_set(&opts, "channel", buf, 0);
432 }
433 if (ap->standard) {
434 av_dict_set(&opts, "standard", ap->standard, 0);
435 }
436 if (ap->mpeg2ts_compute_pcr) {
437 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
438 }
439 if (ap->initial_pause) {
440 av_dict_set(&opts, "initial_pause", "1", 0);
441 }
442 return opts;
443 }
444
445 /**
446 * Open a media file from an IO stream. 'fmt' must be specified.
447 */
448 int av_open_input_stream(AVFormatContext **ic_ptr,
449 AVIOContext *pb, const char *filename,
450 AVInputFormat *fmt, AVFormatParameters *ap)
451 {
452 int err;
453 AVDictionary *opts;
454 AVFormatContext *ic;
455 AVFormatParameters default_ap;
456
457 if(!ap){
458 ap=&default_ap;
459 memset(ap, 0, sizeof(default_ap));
460 }
461 opts = convert_format_parameters(ap);
462
463 if(!ap->prealloced_context)
464 ic = avformat_alloc_context();
465 else
466 ic = *ic_ptr;
467 if (!ic) {
468 err = AVERROR(ENOMEM);
469 goto fail;
470 }
471 if (pb && fmt && fmt->flags & AVFMT_NOFILE)
472 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
473 "will be ignored with AVFMT_NOFILE format.\n");
474 else
475 ic->pb = pb;
476
477 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0)
478 goto fail;
479 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above
480
481 fail:
482 *ic_ptr = ic;
483 av_dict_free(&opts);
484 return err;
485 }
486 #endif
487
488 /** size of probe buffer, for guessing file type from file contents */
489 #define PROBE_BUF_MIN 2048
490 #define PROBE_BUF_MAX (1<<20)
491
492 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
493 const char *filename, void *logctx,
494 unsigned int offset, unsigned int max_probe_size)
495 {
496 AVProbeData pd = { filename ? filename : "", NULL, -offset };
497 unsigned char *buf = NULL;
498 int ret = 0, probe_size;
499
500 if (!max_probe_size) {
501 max_probe_size = PROBE_BUF_MAX;
502 } else if (max_probe_size > PROBE_BUF_MAX) {
503 max_probe_size = PROBE_BUF_MAX;
504 } else if (max_probe_size < PROBE_BUF_MIN) {
505 return AVERROR(EINVAL);
506 }
507
508 if (offset >= max_probe_size) {
509 return AVERROR(EINVAL);
510 }
511
512 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
513 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
514 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
515 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
516
517 if (probe_size < offset) {
518 continue;
519 }
520
521 /* read probe data */
522 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
523 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
524 /* fail if error was not end of file, otherwise, lower score */
525 if (ret != AVERROR_EOF) {
526 av_free(buf);
527 return ret;
528 }
529 score = 0;
530 ret = 0; /* error was end of file, nothing read */
531 }
532 pd.buf_size += ret;
533 pd.buf = &buf[offset];
534
535 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
536
537 /* guess file format */
538 *fmt = av_probe_input_format2(&pd, 1, &score);
539 if(*fmt){
540 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
541 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
542 }else
543 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
544 }
545 }
546
547 if (!*fmt) {
548 av_free(buf);
549 return AVERROR_INVALIDDATA;
550 }
551
552 /* rewind. reuse probe buffer to avoid seeking */
553 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
554 av_free(buf);
555
556 return ret;
557 }
558
559 #if FF_API_FORMAT_PARAMETERS
560 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
561 AVInputFormat *fmt,
562 int buf_size,
563 AVFormatParameters *ap)
564 {
565 int err;
566 AVDictionary *opts = convert_format_parameters(ap);
567
568 if (!ap || !ap->prealloced_context)
569 *ic_ptr = NULL;
570
571 err = avformat_open_input(ic_ptr, filename, fmt, &opts);
572
573 av_dict_free(&opts);
574 return err;
575 }
576 #endif
577
578 /* open input file and probe the format if necessary */
579 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
580 {
581 int ret;
582 AVProbeData pd = {filename, NULL, 0};
583
584 if (s->pb) {
585 s->flags |= AVFMT_FLAG_CUSTOM_IO;
586 if (!s->iformat)
587 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
588 else if (s->iformat->flags & AVFMT_NOFILE)
589 return AVERROR(EINVAL);
590 return 0;
591 }
592
593 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
594 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
595 return 0;
596
597 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
598 &s->interrupt_callback, options)) < 0)
599 return ret;
600 if (s->iformat)
601 return 0;
602 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
603 }
604
605 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
606 {
607 AVFormatContext *s = *ps;
608 int ret = 0;
609 AVFormatParameters ap = { { 0 } };
610 AVDictionary *tmp = NULL;
611
612 if (!s && !(s = avformat_alloc_context()))
613 return AVERROR(ENOMEM);
614 if (fmt)
615 s->iformat = fmt;
616
617 if (options)
618 av_dict_copy(&tmp, *options, 0);
619
620 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
621 goto fail;
622
623 if ((ret = init_input(s, filename, &tmp)) < 0)
624 goto fail;
625
626 /* check filename in case an image number is expected */
627 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
628 if (!av_filename_number_test(filename)) {
629 ret = AVERROR(EINVAL);
630 goto fail;
631 }
632 }
633
634 s->duration = s->start_time = AV_NOPTS_VALUE;
635 av_strlcpy(s->filename, filename, sizeof(s->filename));
636
637 /* allocate private data */
638 if (s->iformat->priv_data_size > 0) {
639 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
640 ret = AVERROR(ENOMEM);
641 goto fail;
642 }
643 if (s->iformat->priv_class) {
644 *(const AVClass**)s->priv_data = s->iformat->priv_class;
645 av_opt_set_defaults(s->priv_data);
646 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
647 goto fail;
648 }
649 }
650
651 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
652 if (s->pb)
653 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
654
655 if (s->iformat->read_header)
656 if ((ret = s->iformat->read_header(s, &ap)) < 0)
657 goto fail;
658
659 if (s->pb && !s->data_offset)
660 s->data_offset = avio_tell(s->pb);
661
662 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
663
664 if (options) {
665 av_dict_free(options);
666 *options = tmp;
667 }
668 *ps = s;
669 return 0;
670
671 fail:
672 av_dict_free(&tmp);
673 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
674 avio_close(s->pb);
675 avformat_free_context(s);
676 *ps = NULL;
677 return ret;
678 }
679
680 /*******************************************************/
681
682 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
683 AVPacketList **plast_pktl){
684 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
685 if (!pktl)
686 return NULL;
687
688 if (*packet_buffer)
689 (*plast_pktl)->next = pktl;
690 else
691 *packet_buffer = pktl;
692
693 /* add the packet in the buffered packet list */
694 *plast_pktl = pktl;
695 pktl->pkt= *pkt;
696 return &pktl->pkt;
697 }
698
699 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
700 {
701 int ret, i;
702 AVStream *st;
703
704 for(;;){
705 AVPacketList *pktl = s->raw_packet_buffer;
706
707 if (pktl) {
708 *pkt = pktl->pkt;
709 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
710 !s->streams[pkt->stream_index]->probe_packets ||
711 s->raw_packet_buffer_remaining_size < pkt->size){
712 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
713 av_freep(&pd->buf);
714 pd->buf_size = 0;
715 s->raw_packet_buffer = pktl->next;
716 s->raw_packet_buffer_remaining_size += pkt->size;
717 av_free(pktl);
718 return 0;
719 }
720 }
721
722 av_init_packet(pkt);
723 ret= s->iformat->read_packet(s, pkt);
724 if (ret < 0) {
725 if (!pktl || ret == AVERROR(EAGAIN))
726 return ret;
727 for (i = 0; i < s->nb_streams; i++)
728 s->streams[i]->probe_packets = 0;
729 continue;
730 }
731
732 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
733 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
734 av_log(s, AV_LOG_WARNING,
735 "Dropped corrupted packet (stream = %d)\n",
736 pkt->stream_index);
737 av_free_packet(pkt);
738 continue;
739 }
740
741 st= s->streams[pkt->stream_index];
742
743 switch(st->codec->codec_type){
744 case AVMEDIA_TYPE_VIDEO:
745 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
746 break;
747 case AVMEDIA_TYPE_AUDIO:
748 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
749 break;
750 case AVMEDIA_TYPE_SUBTITLE:
751 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
752 break;
753 }
754
755 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
756 !st->probe_packets))
757 return ret;
758
759 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
760 s->raw_packet_buffer_remaining_size -= pkt->size;
761
762 if(st->codec->codec_id == CODEC_ID_PROBE){
763 AVProbeData *pd = &st->probe_data;
764 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
765 --st->probe_packets;
766
767 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
768 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
769 pd->buf_size += pkt->size;
770 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
771
772 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
773 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
774 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
775 if(st->codec->codec_id != CODEC_ID_PROBE){
776 pd->buf_size=0;
777 av_freep(&pd->buf);
778 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
779 }
780 }
781 }
782 }
783 }
784
785 /**********************************************************/
786
787 /**
788 * Get the number of samples of an audio frame. Return -1 on error.
789 */
790 static int get_audio_frame_size(AVCodecContext *enc, int size)
791 {
792 int frame_size;
793
794 if(enc->codec_id == CODEC_ID_VORBIS)
795 return -1;
796
797 if (enc->frame_size <= 1) {
798 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
799
800 if (bits_per_sample) {
801 if (enc->channels == 0)
802 return -1;
803 frame_size = (size << 3) / (bits_per_sample * enc->channels);
804 } else {
805 /* used for example by ADPCM codecs */
806 if (enc->bit_rate == 0)
807 return -1;
808 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
809 }
810 } else {
811 frame_size = enc->frame_size;
812 }
813 return frame_size;
814 }
815
816
817 /**
818 * Return the frame duration in seconds. Return 0 if not available.
819 */
820 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
821 AVCodecParserContext *pc, AVPacket *pkt)
822 {
823 int frame_size;
824
825 *pnum = 0;
826 *pden = 0;
827 switch(st->codec->codec_type) {
828 case AVMEDIA_TYPE_VIDEO:
829 if(st->time_base.num*1000LL > st->time_base.den){
830 *pnum = st->time_base.num;
831 *pden = st->time_base.den;
832 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
833 *pnum = st->codec->time_base.num;
834 *pden = st->codec->time_base.den;
835 if (pc && pc->repeat_pict) {
836 *pnum = (*pnum) * (1 + pc->repeat_pict);
837 }
838 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
839 //Thus if we have no parser in such case leave duration undefined.
840 if(st->codec->ticks_per_frame>1 && !pc){
841 *pnum = *pden = 0;
842 }
843 }
844 break;
845 case AVMEDIA_TYPE_AUDIO:
846 frame_size = get_audio_frame_size(st->codec, pkt->size);
847 if (frame_size <= 0 || st->codec->sample_rate <= 0)
848 break;
849 *pnum = frame_size;
850 *pden = st->codec->sample_rate;
851 break;
852 default:
853 break;
854 }
855 }
856
857 static int is_intra_only(AVCodecContext *enc){
858 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
859 return 1;
860 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
861 switch(enc->codec_id){
862 case CODEC_ID_MJPEG:
863 case CODEC_ID_MJPEGB:
864 case CODEC_ID_LJPEG:
865 case CODEC_ID_PRORES:
866 case CODEC_ID_RAWVIDEO:
867 case CODEC_ID_DVVIDEO:
868 case CODEC_ID_HUFFYUV:
869 case CODEC_ID_FFVHUFF:
870 case CODEC_ID_ASV1:
871 case CODEC_ID_ASV2:
872 case CODEC_ID_VCR1:
873 case CODEC_ID_DNXHD:
874 case CODEC_ID_JPEG2000:
875 return 1;
876 default: break;
877 }
878 }
879 return 0;
880 }
881
882 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
883 int64_t dts, int64_t pts)
884 {
885 AVStream *st= s->streams[stream_index];
886 AVPacketList *pktl= s->packet_buffer;
887
888 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
889 return;
890
891 st->first_dts= dts - st->cur_dts;
892 st->cur_dts= dts;
893
894 for(; pktl; pktl= pktl->next){
895 if(pktl->pkt.stream_index != stream_index)
896 continue;
897 //FIXME think more about this check
898 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
899 pktl->pkt.pts += st->first_dts;
900
901 if(pktl->pkt.dts != AV_NOPTS_VALUE)
902 pktl->pkt.dts += st->first_dts;
903
904 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
905 st->start_time= pktl->pkt.pts;
906 }
907 if (st->start_time == AV_NOPTS_VALUE)
908 st->start_time = pts;
909 }
910
911 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
912 {
913 AVPacketList *pktl= s->packet_buffer;
914 int64_t cur_dts= 0;
915
916 if(st->first_dts != AV_NOPTS_VALUE){
917 cur_dts= st->first_dts;
918 for(; pktl; pktl= pktl->next){
919 if(pktl->pkt.stream_index == pkt->stream_index){
920 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
921 break;
922 cur_dts -= pkt->duration;
923 }
924 }
925 pktl= s->packet_buffer;
926 st->first_dts = cur_dts;
927 }else if(st->cur_dts)
928 return;
929
930 for(; pktl; pktl= pktl->next){
931 if(pktl->pkt.stream_index != pkt->stream_index)
932 continue;
933 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
934 && !pktl->pkt.duration){
935 pktl->pkt.dts= cur_dts;
936 if(!st->codec->has_b_frames)
937 pktl->pkt.pts= cur_dts;
938 cur_dts += pkt->duration;
939 pktl->pkt.duration= pkt->duration;
940 }else
941 break;
942 }
943 if(st->first_dts == AV_NOPTS_VALUE)
944 st->cur_dts= cur_dts;
945 }
946
947 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
948 AVCodecParserContext *pc, AVPacket *pkt)
949 {
950 int num, den, presentation_delayed, delay, i;
951 int64_t offset;
952
953 if (s->flags & AVFMT_FLAG_NOFILLIN)
954 return;
955
956 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
957 pkt->dts= AV_NOPTS_VALUE;
958
959 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
960 //FIXME Set low_delay = 0 when has_b_frames = 1
961 st->codec->has_b_frames = 1;
962
963 /* do we have a video B-frame ? */
964 delay= st->codec->has_b_frames;
965 presentation_delayed = 0;
966
967 // ignore delay caused by frame threading so that the mpeg2-without-dts
968 // warning will not trigger
969 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
970 delay -= st->codec->thread_count-1;
971
972 /* XXX: need has_b_frame, but cannot get it if the codec is
973 not initialized */
974 if (delay &&
975 pc && pc->pict_type != AV_PICTURE_TYPE_B)
976 presentation_delayed = 1;
977
978 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
979 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
980 pkt->dts -= 1LL<<st->pts_wrap_bits;
981 }
982
983 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
984 // we take the conservative approach and discard both
985 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
986 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
987 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
988 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
989 }
990
991 if (pkt->duration == 0) {
992 compute_frame_duration(&num, &den, st, pc, pkt);
993 if (den && num) {
994 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
995
996 if(pkt->duration != 0 && s->packet_buffer)
997 update_initial_durations(s, st, pkt);
998 }
999 }
1000
1001 /* correct timestamps with byte offset if demuxers only have timestamps
1002 on packet boundaries */
1003 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1004 /* this will estimate bitrate based on this frame's duration and size */
1005 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1006 if(pkt->pts != AV_NOPTS_VALUE)
1007 pkt->pts += offset;
1008 if(pkt->dts != AV_NOPTS_VALUE)
1009 pkt->dts += offset;
1010 }
1011
1012 if (pc && pc->dts_sync_point >= 0) {
1013 // we have synchronization info from the parser
1014 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1015 if (den > 0) {
1016 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1017 if (pkt->dts != AV_NOPTS_VALUE) {
1018 // got DTS from the stream, update reference timestamp
1019 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1020 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1021 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1022 // compute DTS based on reference timestamp
1023 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1024 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1025 }
1026 if (pc->dts_sync_point > 0)
1027 st->reference_dts = pkt->dts; // new reference
1028 }
1029 }
1030
1031 /* This may be redundant, but it should not hurt. */
1032 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1033 presentation_delayed = 1;
1034
1035 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1036 /* interpolate PTS and DTS if they are not present */
1037 //We skip H264 currently because delay and has_b_frames are not reliably set
1038 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1039 if (presentation_delayed) {
1040 /* DTS = decompression timestamp */
1041 /* PTS = presentation timestamp */
1042 if (pkt->dts == AV_NOPTS_VALUE)
1043 pkt->dts = st->last_IP_pts;
1044 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1045 if (pkt->dts == AV_NOPTS_VALUE)
1046 pkt->dts = st->cur_dts;
1047
1048 /* this is tricky: the dts must be incremented by the duration
1049 of the frame we are displaying, i.e. the last I- or P-frame */
1050 if (st->last_IP_duration == 0)
1051 st->last_IP_duration = pkt->duration;
1052 if(pkt->dts != AV_NOPTS_VALUE)
1053 st->cur_dts = pkt->dts + st->last_IP_duration;
1054 st->last_IP_duration = pkt->duration;
1055 st->last_IP_pts= pkt->pts;
1056 /* cannot compute PTS if not present (we can compute it only
1057 by knowing the future */
1058 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1059 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1060 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1061 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1062 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1063 pkt->pts += pkt->duration;
1064 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1065 }
1066 }
1067
1068 /* presentation is not delayed : PTS and DTS are the same */
1069 if(pkt->pts == AV_NOPTS_VALUE)
1070 pkt->pts = pkt->dts;
1071 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1072 if(pkt->pts == AV_NOPTS_VALUE)
1073 pkt->pts = st->cur_dts;
1074 pkt->dts = pkt->pts;
1075 if(pkt->pts != AV_NOPTS_VALUE)
1076 st->cur_dts = pkt->pts + pkt->duration;
1077 }
1078 }
1079
1080 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1081 st->pts_buffer[0]= pkt->pts;
1082 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1083 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1084 if(pkt->dts == AV_NOPTS_VALUE)
1085 pkt->dts= st->pts_buffer[0];
1086 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1087 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1088 }
1089 if(pkt->dts > st->cur_dts)
1090 st->cur_dts = pkt->dts;
1091 }
1092
1093 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1094
1095 /* update flags */
1096 if(is_intra_only(st->codec))
1097 pkt->flags |= AV_PKT_FLAG_KEY;
1098 else if (pc) {
1099 pkt->flags = 0;
1100 /* keyframe computation */
1101 if (pc->key_frame == 1)
1102 pkt->flags |= AV_PKT_FLAG_KEY;
1103 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1104 pkt->flags |= AV_PKT_FLAG_KEY;
1105 }
1106 if (pc)
1107 pkt->convergence_duration = pc->convergence_duration;
1108 }
1109
1110
1111 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1112 {
1113 AVStream *st;
1114 int len, ret, i;
1115
1116 av_init_packet(pkt);
1117
1118 for(;;) {
1119 /* select current input stream component */
1120 st = s->cur_st;
1121 if (st) {
1122 if (!st->need_parsing || !st->parser) {
1123 /* no parsing needed: we just output the packet as is */
1124 /* raw data support */
1125 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1126 compute_pkt_fields(s, st, NULL, pkt);
1127 s->cur_st = NULL;
1128 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1129 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1130 ff_reduce_index(s, st->index);
1131 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1132 }
1133 break;
1134 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1135 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1136 st->cur_ptr, st->cur_len,
1137 st->cur_pkt.pts, st->cur_pkt.dts,
1138 st->cur_pkt.pos);
1139 st->cur_pkt.pts = AV_NOPTS_VALUE;
1140 st->cur_pkt.dts = AV_NOPTS_VALUE;
1141 /* increment read pointer */
1142 st->cur_ptr += len;
1143 st->cur_len -= len;
1144
1145 /* return packet if any */
1146 if (pkt->size) {
1147 got_packet:
1148 pkt->duration = 0;
1149 pkt->stream_index = st->index;
1150 pkt->pts = st->parser->pts;
1151 pkt->dts = st->parser->dts;
1152 pkt->pos = st->parser->pos;
1153 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1154 s->cur_st = NULL;
1155 pkt->destruct= st->cur_pkt.destruct;
1156 st->cur_pkt.destruct= NULL;
1157 st->cur_pkt.data = NULL;
1158 assert(st->cur_len == 0);
1159 }else{
1160 pkt->destruct = NULL;
1161 }
1162 compute_pkt_fields(s, st, st->parser, pkt);
1163
1164 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1165 ff_reduce_index(s, st->index);
1166 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1167 0, 0, AVINDEX_KEYFRAME);
1168 }
1169
1170 break;
1171 }
1172 } else {
1173 /* free packet */
1174 av_free_packet(&st->cur_pkt);
1175 s->cur_st = NULL;
1176 }
1177 } else {
1178 AVPacket cur_pkt;
1179 /* read next packet */
1180 ret = av_read_packet(s, &cur_pkt);
1181 if (ret < 0) {
1182 if (ret == AVERROR(EAGAIN))
1183 return ret;
1184 /* return the last frames, if any */
1185 for(i = 0; i < s->nb_streams; i++) {
1186 st = s->streams[i];
1187 if (st->parser && st->need_parsing) {
1188 av_parser_parse2(st->parser, st->codec,
1189 &pkt->data, &pkt->size,
1190 NULL, 0,
1191 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1192 AV_NOPTS_VALUE);
1193 if (pkt->size)
1194 goto got_packet;
1195 }
1196 }
1197 /* no more packets: really terminate parsing */
1198 return ret;
1199 }
1200 st = s->streams[cur_pkt.stream_index];
1201 st->cur_pkt= cur_pkt;
1202
1203 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1204 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1205 st->cur_pkt.pts < st->cur_pkt.dts){
1206 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1207 st->cur_pkt.stream_index,
1208 st->cur_pkt.pts,
1209 st->cur_pkt.dts,
1210 st->cur_pkt.size);
1211 // av_free_packet(&st->cur_pkt);
1212 // return -1;
1213 }
1214
1215 if(s->debug & FF_FDEBUG_TS)
1216 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1217 st->cur_pkt.stream_index,
1218 st->cur_pkt.pts,
1219 st->cur_pkt.dts,
1220 st->cur_pkt.size,
1221 st->cur_pkt.duration,
1222 st->cur_pkt.flags);
1223
1224 s->cur_st = st;
1225 st->cur_ptr = st->cur_pkt.data;
1226 st->cur_len = st->cur_pkt.size;
1227 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1228 st->parser = av_parser_init(st->codec->codec_id);
1229 if (!st->parser) {
1230 /* no parser available: just output the raw packets */
1231 st->need_parsing = AVSTREAM_PARSE_NONE;
1232 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1233 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1234 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1235 st->parser->flags |= PARSER_FLAG_ONCE;
1236 }
1237 }
1238 }
1239 }
1240 if(s->debug & FF_FDEBUG_TS)
1241 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1242 pkt->stream_index,
1243 pkt->pts,
1244 pkt->dts,
1245 pkt->size,
1246 pkt->duration,
1247 pkt->flags);
1248
1249 return 0;
1250 }
1251
1252 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1253 {
1254 AVPacketList *pktl;
1255 int eof=0;
1256 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1257
1258 for(;;){
1259 pktl = s->packet_buffer;
1260 if (pktl) {
1261 AVPacket *next_pkt= &pktl->pkt;
1262
1263 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1264 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1265 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1266 if( pktl->pkt.stream_index == next_pkt->stream_index
1267 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1268 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1269 next_pkt->pts= pktl->pkt.dts;
1270 }
1271 pktl= pktl->next;
1272 }
1273 pktl = s->packet_buffer;
1274 }
1275
1276 if( next_pkt->pts != AV_NOPTS_VALUE
1277 || next_pkt->dts == AV_NOPTS_VALUE
1278 || !genpts || eof){
1279 /* read packet from packet buffer, if there is data */
1280 *pkt = *next_pkt;
1281 s->packet_buffer = pktl->next;
1282 av_free(pktl);
1283 return 0;
1284 }
1285 }
1286 if(genpts){
1287 int ret= read_frame_internal(s, pkt);
1288 if(ret<0){
1289 if(pktl && ret != AVERROR(EAGAIN)){
1290 eof=1;
1291 continue;
1292 }else
1293 return ret;
1294 }
1295
1296 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1297 &s->packet_buffer_end)) < 0)
1298 return AVERROR(ENOMEM);
1299 }else{
1300 assert(!s->packet_buffer);
1301 return read_frame_internal(s, pkt);
1302 }
1303 }
1304 }
1305
1306 /* XXX: suppress the packet queue */
1307 static void flush_packet_queue(AVFormatContext *s)
1308 {
1309 AVPacketList *pktl;
1310
1311 for(;;) {
1312 pktl = s->packet_buffer;
1313 if (!pktl)
1314 break;
1315 s->packet_buffer = pktl->next;
1316 av_free_packet(&pktl->pkt);
1317 av_free(pktl);
1318 }
1319 while(s->raw_packet_buffer){
1320 pktl = s->raw_packet_buffer;
1321 s->raw_packet_buffer = pktl->next;
1322 av_free_packet(&pktl->pkt);
1323 av_free(pktl);
1324 }
1325 s->packet_buffer_end=
1326 s->raw_packet_buffer_end= NULL;
1327 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1328 }
1329
1330 /*******************************************************/
1331 /* seek support */
1332
1333 int av_find_default_stream_index(AVFormatContext *s)
1334 {
1335 int first_audio_index = -1;
1336 int i;
1337 AVStream *st;
1338
1339 if (s->nb_streams <= 0)
1340 return -1;
1341 for(i = 0; i < s->nb_streams; i++) {
1342 st = s->streams[i];
1343 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1344 return i;
1345 }
1346 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1347 first_audio_index = i;
1348 }
1349 return first_audio_index >= 0 ? first_audio_index : 0;
1350 }
1351
1352 /**
1353 * Flush the frame reader.
1354 */
1355 void ff_read_frame_flush(AVFormatContext *s)
1356 {
1357 AVStream *st;
1358 int i, j;
1359
1360 flush_packet_queue(s);
1361
1362 s->cur_st = NULL;
1363
1364 /* for each stream, reset read state */
1365 for(i = 0; i < s->nb_streams; i++) {
1366 st = s->streams[i];
1367
1368 if (st->parser) {
1369 av_parser_close(st->parser);
1370 st->parser = NULL;
1371 av_free_packet(&st->cur_pkt);
1372 }
1373 st->last_IP_pts = AV_NOPTS_VALUE;
1374 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1375 st->reference_dts = AV_NOPTS_VALUE;
1376 /* fail safe */
1377 st->cur_ptr = NULL;
1378 st->cur_len = 0;
1379
1380 st->probe_packets = MAX_PROBE_PACKETS;
1381
1382 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1383 st->pts_buffer[j]= AV_NOPTS_VALUE;
1384 }
1385 }
1386
1387 #if FF_API_SEEK_PUBLIC
1388 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1389 {
1390 ff_update_cur_dts(s, ref_st, timestamp);
1391 }
1392 #endif
1393
1394 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1395 {
1396 int i;
1397
1398 for(i = 0; i < s->nb_streams; i++) {
1399 AVStream *st = s->streams[i];
1400
1401 st->cur_dts = av_rescale(timestamp,
1402 st->time_base.den * (int64_t)ref_st->time_base.num,
1403 st->time_base.num * (int64_t)ref_st->time_base.den);
1404 }
1405 }
1406
1407 void ff_reduce_index(AVFormatContext *s, int stream_index)
1408 {
1409 AVStream *st= s->streams[stream_index];
1410 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1411
1412 if((unsigned)st->nb_index_entries >= max_entries){
1413 int i;
1414 for(i=0; 2*i<st->nb_index_entries; i++)
1415 st->index_entries[i]= st->index_entries[2*i];
1416 st->nb_index_entries= i;
1417 }
1418 }
1419
1420 int ff_add_index_entry(AVIndexEntry **index_entries,
1421 int *nb_index_entries,
1422 unsigned int *index_entries_allocated_size,
1423 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1424 {
1425 AVIndexEntry *entries, *ie;
1426 int index;
1427
1428 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1429 return -1;
1430
1431 entries = av_fast_realloc(*index_entries,
1432 index_entries_allocated_size,
1433 (*nb_index_entries + 1) *
1434 sizeof(AVIndexEntry));
1435 if(!entries)
1436 return -1;
1437
1438 *index_entries= entries;
1439
1440 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1441
1442 if(index<0){
1443 index= (*nb_index_entries)++;
1444 ie= &entries[index];
1445 assert(index==0 || ie[-1].timestamp < timestamp);
1446 }else{
1447 ie= &entries[index];
1448 if(ie->timestamp != timestamp){
1449 if(ie->timestamp <= timestamp)
1450 return -1;
1451 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1452 (*nb_index_entries)++;
1453 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1454 distance= ie->min_distance;
1455 }
1456
1457 ie->pos = pos;
1458 ie->timestamp = timestamp;
1459 ie->min_distance= distance;
1460 ie->size= size;
1461 ie->flags = flags;
1462
1463 return index;
1464 }
1465
1466 int av_add_index_entry(AVStream *st,
1467 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1468 {
1469 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1470 &st->index_entries_allocated_size, pos,
1471 timestamp, size, distance, flags);
1472 }
1473
1474 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1475 int64_t wanted_timestamp, int flags)
1476 {
1477 int a, b, m;
1478 int64_t timestamp;
1479
1480 a = - 1;
1481 b = nb_entries;
1482
1483 //optimize appending index entries at the end
1484 if(b && entries[b-1].timestamp < wanted_timestamp)
1485 a= b-1;
1486
1487 while (b - a > 1) {
1488 m = (a + b) >> 1;
1489 timestamp = entries[m].timestamp;
1490 if(timestamp >= wanted_timestamp)
1491 b = m;
1492 if(timestamp <= wanted_timestamp)
1493 a = m;
1494 }
1495 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1496
1497 if(!(flags & AVSEEK_FLAG_ANY)){
1498 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1499 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1500 }
1501 }
1502
1503 if(m == nb_entries)
1504 return -1;
1505 return m;
1506 }
1507
1508 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1509 int flags)
1510 {
1511 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1512 wanted_timestamp, flags);
1513 }
1514
1515 #if FF_API_SEEK_PUBLIC
1516 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1517 return ff_seek_frame_binary(s, stream_index, target_ts, flags);
1518 }
1519 #endif
1520
1521 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1522 {
1523 AVInputFormat *avif= s->iformat;
1524 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1525 int64_t ts_min, ts_max, ts;
1526 int index;
1527 int64_t ret;
1528 AVStream *st;
1529
1530 if (stream_index < 0)
1531 return -1;
1532
1533 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1534
1535 ts_max=
1536 ts_min= AV_NOPTS_VALUE;
1537 pos_limit= -1; //gcc falsely says it may be uninitialized
1538
1539 st= s->streams[stream_index];
1540 if(st->index_entries){
1541 AVIndexEntry *e;
1542
1543 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1544 index= FFMAX(index, 0);
1545 e= &st->index_entries[index];
1546
1547 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1548 pos_min= e->pos;
1549 ts_min= e->timestamp;
1550 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1551 pos_min,ts_min);
1552 }else{
1553 assert(index==0);
1554 }
1555
1556 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1557 assert(index < st->nb_index_entries);
1558 if(index >= 0){
1559 e= &st->index_entries[index];
1560 assert(e->timestamp >= target_ts);
1561 pos_max= e->pos;
1562 ts_max= e->timestamp;
1563 pos_limit= pos_max - e->min_distance;
1564 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1565 pos_max,pos_limit, ts_max);
1566 }
1567 }
1568
1569 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1570 if(pos<0)
1571 return -1;
1572
1573 /* do the seek */
1574 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1575 return ret;
1576
1577 ff_update_cur_dts(s, st, ts);
1578
1579 return 0;
1580 }
1581
1582 #if FF_API_SEEK_PUBLIC
1583 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1584 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1585 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1586 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1587 {
1588 return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
1589 pos_limit, ts_min, ts_max, flags, ts_ret,
1590 read_timestamp);
1591 }
1592 #endif
1593
1594 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1595 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1596 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1597 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1598 {
1599 int64_t pos, ts;
1600 int64_t start_pos, filesize;
1601 int no_change;
1602
1603 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1604
1605 if(ts_min == AV_NOPTS_VALUE){
1606 pos_min = s->data_offset;
1607 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1608 if (ts_min == AV_NOPTS_VALUE)
1609 return -1;
1610 }
1611
1612 if(ts_max == AV_NOPTS_VALUE){
1613 int step= 1024;
1614 filesize = avio_size(s->pb);
1615 pos_max = filesize - 1;
1616 do{
1617 pos_max -= step;
1618 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1619 step += step;
1620 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1621 if (ts_max == AV_NOPTS_VALUE)
1622 return -1;
1623
1624 for(;;){
1625 int64_t tmp_pos= pos_max + 1;
1626 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1627 if(tmp_ts == AV_NOPTS_VALUE)
1628 break;
1629 ts_max= tmp_ts;
1630 pos_max= tmp_pos;
1631 if(tmp_pos >= filesize)
1632 break;
1633 }
1634 pos_limit= pos_max;
1635 }
1636
1637 if(ts_min > ts_max){
1638 return -1;
1639 }else if(ts_min == ts_max){
1640 pos_limit= pos_min;
1641 }
1642
1643 no_change=0;
1644 while (pos_min < pos_limit) {
1645 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1646 pos_min, pos_max, ts_min, ts_max);
1647 assert(pos_limit <= pos_max);
1648
1649 if(no_change==0){
1650 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1651 // interpolate position (better than dichotomy)
1652 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1653 + pos_min - approximate_keyframe_distance;
1654 }else if(no_change==1){
1655 // bisection, if interpolation failed to change min or max pos last time
1656 pos = (pos_min + pos_limit)>>1;
1657 }else{
1658 /* linear search if bisection failed, can only happen if there
1659 are very few or no keyframes between min/max */
1660 pos=pos_min;
1661 }
1662 if(pos <= pos_min)
1663 pos= pos_min + 1;
1664 else if(pos > pos_limit)
1665 pos= pos_limit;
1666 start_pos= pos;
1667
1668 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1669 if(pos == pos_max)
1670 no_change++;
1671 else
1672 no_change=0;
1673 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1674 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1675 pos_limit, start_pos, no_change);
1676 if(ts == AV_NOPTS_VALUE){
1677 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1678 return -1;
1679 }
1680 assert(ts != AV_NOPTS_VALUE);
1681 if (target_ts <= ts) {
1682 pos_limit = start_pos - 1;
1683 pos_max = pos;
1684 ts_max = ts;
1685 }
1686 if (target_ts >= ts) {
1687 pos_min = pos;
1688 ts_min = ts;
1689 }
1690 }
1691
1692 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1693 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1694 pos_min = pos;
1695 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1696 pos_min++;
1697 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1698 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1699 pos, ts_min, target_ts, ts_max);
1700 *ts_ret= ts;
1701 return pos;
1702 }
1703
1704 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1705 int64_t pos_min, pos_max;
1706 #if 0
1707 AVStream *st;
1708
1709 if (stream_index < 0)
1710 return -1;
1711
1712 st= s->streams[stream_index];
1713 #endif
1714
1715 pos_min = s->data_offset;
1716 pos_max = avio_size(s->pb) - 1;
1717
1718 if (pos < pos_min) pos= pos_min;
1719 else if(pos > pos_max) pos= pos_max;
1720
1721 avio_seek(s->pb, pos, SEEK_SET);
1722
1723 #if 0
1724 av_update_cur_dts(s, st, ts);
1725 #endif
1726 return 0;
1727 }
1728
1729 static int seek_frame_generic(AVFormatContext *s,
1730 int stream_index, int64_t timestamp, int flags)
1731 {
1732 int index;
1733 int64_t ret;
1734 AVStream *st;
1735 AVIndexEntry *ie;
1736
1737 st = s->streams[stream_index];
1738
1739 index = av_index_search_timestamp(st, timestamp, flags);
1740
1741 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1742 return -1;
1743
1744 if(index < 0 || index==st->nb_index_entries-1){
1745 AVPacket pkt;
1746
1747 if(st->nb_index_entries){
1748 assert(st->index_entries);
1749 ie= &st->index_entries[st->nb_index_entries-1];
1750 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1751 return ret;
1752 ff_update_cur_dts(s, st, ie->timestamp);
1753 }else{
1754 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1755 return ret;
1756 }
1757 for (;;) {
1758 int read_status;
1759 do{
1760 read_status = av_read_frame(s, &pkt);
1761 } while (read_status == AVERROR(EAGAIN));
1762 if (read_status < 0)
1763 break;
1764 av_free_packet(&pkt);
1765 if(stream_index == pkt.stream_index){
1766 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1767 break;
1768 }
1769 }
1770 index = av_index_search_timestamp(st, timestamp, flags);
1771 }
1772 if (index < 0)
1773 return -1;
1774
1775 ff_read_frame_flush(s);
1776 if (s->iformat->read_seek){
1777 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1778 return 0;
1779 }
1780 ie = &st->index_entries[index];
1781 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1782 return ret;
1783 ff_update_cur_dts(s, st, ie->timestamp);
1784
1785 return 0;
1786 }
1787
1788 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1789 {
1790 int ret;
1791 AVStream *st;
1792
1793 if (flags & AVSEEK_FLAG_BYTE) {
1794 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1795 return -1;
1796 ff_read_frame_flush(s);
1797 return seek_frame_byte(s, stream_index, timestamp, flags);
1798 }
1799
1800 if(stream_index < 0){
1801 stream_index= av_find_default_stream_index(s);
1802 if(stream_index < 0)
1803 return -1;
1804
1805 st= s->streams[stream_index];
1806 /* timestamp for default must be expressed in AV_TIME_BASE units */
1807 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1808 }
1809
1810 /* first, we try the format specific seek */
1811 if (s->iformat->read_seek) {
1812 ff_read_frame_flush(s);
1813 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1814 } else
1815 ret = -1;
1816 if (ret >= 0) {
1817 return 0;
1818 }
1819
1820 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1821 ff_read_frame_flush(s);
1822 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1823 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1824 ff_read_frame_flush(s);
1825 return seek_frame_generic(s, stream_index, timestamp, flags);
1826 }
1827 else
1828 return -1;
1829 }
1830
1831 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1832 {
1833 if(min_ts > ts || max_ts < ts)
1834 return -1;
1835
1836 if (s->iformat->read_seek2) {
1837 ff_read_frame_flush(s);
1838 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1839 }
1840
1841 if(s->iformat->read_timestamp){
1842 //try to seek via read_timestamp()
1843 }
1844
1845 //Fallback to old API if new is not implemented but old is
1846 //Note the old has somewat different sematics
1847 if(s->iformat->read_seek || 1)
1848 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1849
1850 // try some generic seek like seek_frame_generic() but with new ts semantics
1851 }
1852
1853 /*******************************************************/
1854
1855 /**
1856 * Return TRUE if the stream has accurate duration in any stream.
1857 *
1858 * @return TRUE if the stream has accurate duration for at least one component.
1859 */
1860 static int has_duration(AVFormatContext *ic)
1861 {
1862 int i;
1863 AVStream *st;
1864
1865 for(i = 0;i < ic->nb_streams; i++) {
1866 st = ic->streams[i];
1867 if (st->duration != AV_NOPTS_VALUE)
1868 return 1;
1869 }
1870 return 0;
1871 }
1872
1873 /**
1874 * Estimate the stream timings from the one of each components.
1875 *
1876 * Also computes the global bitrate if possible.
1877 */
1878 static void update_stream_timings(AVFormatContext *ic)
1879 {
1880 int64_t start_time, start_time1, end_time, end_time1;
1881 int64_t duration, duration1, filesize;
1882 int i;
1883 AVStream *st;
1884
1885 start_time = INT64_MAX;
1886 end_time = INT64_MIN;
1887 duration = INT64_MIN;
1888 for(i = 0;i < ic->nb_streams; i++) {
1889 st = ic->streams[i];
1890 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1891 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1892 start_time = FFMIN(start_time, start_time1);
1893 if (st->duration != AV_NOPTS_VALUE) {
1894 end_time1 = start_time1
1895 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1896 end_time = FFMAX(end_time, end_time1);
1897 }
1898 }
1899 if (st->duration != AV_NOPTS_VALUE) {
1900 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1901 duration = FFMAX(duration, duration1);
1902 }
1903 }
1904 if (start_time != INT64_MAX) {
1905 ic->start_time = start_time;
1906 if (end_time != INT64_MIN)
1907 duration = FFMAX(duration, end_time - start_time);
1908 }
1909 if (duration != INT64_MIN) {
1910 ic->duration = duration;
1911 if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
1912 /* compute the bitrate */
1913 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
1914 (double)ic->duration;
1915 }
1916 }
1917 }
1918
1919 static void fill_all_stream_timings(AVFormatContext *ic)
1920 {
1921 int i;
1922 AVStream *st;
1923
1924 update_stream_timings(ic);
1925 for(i = 0;i < ic->nb_streams; i++) {
1926 st = ic->streams[i];
1927 if (st->start_time == AV_NOPTS_VALUE) {
1928 if(ic->start_time != AV_NOPTS_VALUE)
1929 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1930 if(ic->duration != AV_NOPTS_VALUE)
1931 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1932 }
1933 }
1934 }
1935
1936 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1937 {
1938 int64_t filesize, duration;
1939 int bit_rate, i;
1940 AVStream *st;
1941
1942 /* if bit_rate is already set, we believe it */
1943 if (ic->bit_rate <= 0) {
1944 bit_rate = 0;
1945 for(i=0;i<ic->nb_streams;i++) {
1946 st = ic->streams[i];
1947 if (st->codec->bit_rate > 0)
1948 bit_rate += st->codec->bit_rate;
1949 }
1950 ic->bit_rate = bit_rate;
1951 }
1952
1953 /* if duration is already set, we believe it */
1954 if (ic->duration == AV_NOPTS_VALUE &&
1955 ic->bit_rate != 0) {
1956 filesize = ic->pb ? avio_size(ic->pb) : 0;
1957 if (filesize > 0) {
1958 for(i = 0; i < ic->nb_streams; i++) {
1959 st = ic->streams[i];
1960 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1961 if (st->duration == AV_NOPTS_VALUE)
1962 st->duration = duration;
1963 }
1964 }
1965 }
1966 }
1967
1968 #define DURATION_MAX_READ_SIZE 250000
1969 #define DURATION_MAX_RETRY 3
1970
1971 /* only usable for MPEG-PS streams */
1972 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1973 {
1974 AVPacket pkt1, *pkt = &pkt1;
1975 AVStream *st;
1976 int read_size, i, ret;
1977 int64_t end_time;
1978 int64_t filesize, offset, duration;
1979 int retry=0;
1980
1981 ic->cur_st = NULL;
1982
1983 /* flush packet queue */
1984 flush_packet_queue(ic);
1985
1986 for (i=0; i<ic->nb_streams; i++) {
1987 st = ic->streams[i];
1988 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1989 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
1990
1991 if (st->parser) {
1992 av_parser_close(st->parser);
1993 st->parser= NULL;
1994 av_free_packet(&st->cur_pkt);
1995 }
1996 }
1997
1998 /* estimate the end time (duration) */
1999 /* XXX: may need to support wrapping */
2000 filesize = ic->pb ? avio_size(ic->pb) : 0;
2001 end_time = AV_NOPTS_VALUE;
2002 do{
2003 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2004 if (offset < 0)
2005 offset = 0;
2006
2007 avio_seek(ic->pb, offset, SEEK_SET);
2008 read_size = 0;
2009 for(;;) {
2010 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2011 break;
2012
2013 do {
2014 ret = av_read_packet(ic, pkt);
2015 } while(ret == AVERROR(EAGAIN));
2016 if (ret != 0)
2017 break;
2018 read_size += pkt->size;
2019 st = ic->streams[pkt->stream_index];
2020 if (pkt->pts != AV_NOPTS_VALUE &&
2021 (st->start_time != AV_NOPTS_VALUE ||
2022 st->first_dts != AV_NOPTS_VALUE)) {
2023 duration = end_time = pkt->pts;
2024 if (st->start_time != AV_NOPTS_VALUE)
2025 duration -= st->start_time;
2026 else
2027 duration -= st->first_dts;
2028 if (duration < 0)
2029 duration += 1LL<<st->pts_wrap_bits;
2030 if (duration > 0) {
2031 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2032 st->duration = duration;
2033 }
2034 }
2035 av_free_packet(pkt);
2036 }
2037 }while( end_time==AV_NOPTS_VALUE
2038 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2039 && ++retry <= DURATION_MAX_RETRY);
2040
2041 fill_all_stream_timings(ic);
2042
2043 avio_seek(ic->pb, old_offset, SEEK_SET);
2044 for (i=0; i<ic->nb_streams; i++) {
2045 st= ic->streams[i];
2046 st->cur_dts= st->first_dts;
2047 st->last_IP_pts = AV_NOPTS_VALUE;
2048 st->reference_dts = AV_NOPTS_VALUE;
2049 }
2050 }
2051
2052 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2053 {
2054 int64_t file_size;
2055
2056 /* get the file size, if possible */
2057 if (ic->iformat->flags & AVFMT_NOFILE) {
2058 file_size = 0;
2059 } else {
2060 file_size = avio_size(ic->pb);
2061 file_size = FFMAX(0, file_size);
2062 }
2063
2064 if ((!strcmp(ic->iformat->name, "mpeg") ||
2065 !strcmp(ic->iformat->name, "mpegts")) &&
2066 file_size && ic->pb->seekable) {
2067 /* get accurate estimate from the PTSes */
2068 estimate_timings_from_pts(ic, old_offset);
2069 } else if (has_duration(ic)) {
2070 /* at least one component has timings - we use them for all
2071 the components */
2072 fill_all_stream_timings(ic);
2073 } else {
2074 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2075 /* less precise: use bitrate info */
2076 estimate_timings_from_bit_rate(ic);
2077 }
2078 update_stream_timings(ic);
2079
2080 {
2081 int i;
2082 AVStream av_unused *st;
2083 for(i = 0;i < ic->nb_streams; i++) {
2084 st = ic->streams[i];
2085 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2086 (double) st->start_time / AV_TIME_BASE,
2087 (double) st->duration / AV_TIME_BASE);
2088 }
2089 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2090 (double) ic->start_time / AV_TIME_BASE,
2091 (double) ic->duration / AV_TIME_BASE,
2092 ic->bit_rate / 1000);
2093 }
2094 }
2095
2096 static int has_codec_parameters(AVCodecContext *avctx)
2097 {
2098 int val;
2099 switch (avctx->codec_type) {
2100 case AVMEDIA_TYPE_AUDIO:
2101 val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
2102 if (!avctx->frame_size &&
2103 (avctx->codec_id == CODEC_ID_VORBIS ||
2104 avctx->codec_id == CODEC_ID_AAC ||
2105 avctx->codec_id == CODEC_ID_MP1 ||
2106 avctx->codec_id == CODEC_ID_MP2 ||
2107 avctx->codec_id == CODEC_ID_MP3 ||
2108 avctx->codec_id == CODEC_ID_CELT))
2109 return 0;
2110 break;
2111 case AVMEDIA_TYPE_VIDEO:
2112 val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
2113 break;
2114 default:
2115 val = 1;
2116 break;
2117 }
2118 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2119 }
2120
2121 static int has_decode_delay_been_guessed(AVStream *st)
2122 {
2123 return st->codec->codec_id != CODEC_ID_H264 ||
2124 st->info->nb_decoded_frames >= 6;
2125 }
2126
2127 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2128 {
2129 int16_t *samples;
2130 AVCodec *codec;
2131 int got_picture, data_size, ret=0;
2132 AVFrame picture;
2133
2134 if(!st->codec->codec){
2135 codec = avcodec_find_decoder(st->codec->codec_id);
2136 if (!codec)
2137 return -1;
2138 ret = avcodec_open2(st->codec, codec, options);
2139 if (ret < 0)
2140 return ret;
2141 }
2142
2143 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st) ||
2144 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF)) {
2145 switch(st->codec->codec_type) {
2146 case AVMEDIA_TYPE_VIDEO:
2147 avcodec_get_frame_defaults(&picture);
2148 ret = avcodec_decode_video2(st->codec, &picture,
2149 &got_picture, avpkt);
2150 if (got_picture)
2151 st->info->nb_decoded_frames++;
2152 break;
2153 case AVMEDIA_TYPE_AUDIO:
2154 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2155 samples = av_malloc(data_size);
2156 if (!samples)
2157 goto fail;
2158 ret = avcodec_decode_audio3(st->codec, samples,
2159 &data_size, avpkt);
2160 av_free(samples);
2161 break;
2162 default:
2163 break;
2164 }
2165 }
2166 fail:
2167 return ret;
2168 }
2169
2170 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2171 {
2172 while (tags->id != CODEC_ID_NONE) {
2173 if (tags->id == id)
2174 return tags->tag;
2175 tags++;
2176 }
2177 return 0;
2178 }
2179
2180 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2181 {
2182 int i;
2183 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2184 if(tag == tags[i].tag)
2185 return tags[i].id;
2186 }
2187 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2188 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2189 return tags[i].id;
2190 }
2191 return CODEC_ID_NONE;
2192 }
2193
2194 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2195 {
2196 int i;
2197 for(i=0; tags && tags[i]; i++){
2198 int tag= ff_codec_get_tag(tags[i], id);
2199 if(tag) return tag;
2200 }
2201 return 0;
2202 }
2203
2204 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2205 {
2206 int i;
2207 for(i=0; tags && tags[i]; i++){
2208 enum CodecID id= ff_codec_get_id(tags[i], tag);
2209 if(id!=CODEC_ID_NONE) return id;
2210 }
2211 return CODEC_ID_NONE;
2212 }
2213
2214 static void compute_chapters_end(AVFormatContext *s)
2215 {
2216 unsigned int i, j;
2217 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2218
2219 for (i = 0; i < s->nb_chapters; i++)
2220 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2221 AVChapter *ch = s->chapters[i];
2222 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2223 : INT64_MAX;
2224
2225 for (j = 0; j < s->nb_chapters; j++) {
2226 AVChapter *ch1 = s->chapters[j];
2227 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2228 if (j != i && next_start > ch->start && next_start < end)
2229 end = next_start;
2230 }
2231 ch->end = (end == INT64_MAX) ? ch->start : end;
2232 }
2233 }
2234
2235 static int get_std_framerate(int i){
2236 if(i<60*12) return i*1001;
2237 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2238 }
2239
2240 /*
2241 * Is the time base unreliable.
2242 * This is a heuristic to balance between quick acceptance of the values in
2243 * the headers vs. some extra checks.
2244 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2245 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2246 * And there are "variable" fps files this needs to detect as well.
2247 */
2248 static int tb_unreliable(AVCodecContext *c){
2249 if( c->time_base.den >= 101L*c->time_base.num
2250 || c->time_base.den < 5L*c->time_base.num
2251 /* || c->codec_tag == AV_RL32("DIVX")
2252 || c->codec_tag == AV_RL32("XVID")*/
2253 || c->codec_id == CODEC_ID_MPEG2VIDEO
2254 || c->codec_id == CODEC_ID_H264
2255 )
2256 return 1;
2257 return 0;
2258 }
2259
2260 #if FF_API_FORMAT_PARAMETERS
2261 int av_find_stream_info(AVFormatContext *ic)
2262 {
2263 return avformat_find_stream_info(ic, NULL);
2264 }
2265 #endif
2266
2267 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2268 {
2269 int i, count, ret, read_size, j;
2270 AVStream *st;
2271 AVPacket pkt1, *pkt;
2272 int64_t old_offset = avio_tell(ic->pb);
2273 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2274
2275 for(i=0;i<ic->nb_streams;i++) {
2276 AVCodec *codec;
2277 st = ic->streams[i];
2278
2279 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2280 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2281 /* if(!st->time_base.num)
2282 st->time_base= */
2283 if(!st->codec->time_base.num)
2284 st->codec->time_base= st->time_base;
2285 }
2286 //only for the split stuff
2287 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2288 st->parser = av_parser_init(st->codec->codec_id);
2289 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2290 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2291 }
2292 }
2293 assert(!st->codec->codec);
2294 codec = avcodec_find_decoder(st->codec->codec_id);
2295
2296 /* Ensure that subtitle_header is properly set. */
2297 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2298 && codec && !st->codec->codec)
2299 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2300
2301 //try to just open decoders, in case this is enough to get parameters
2302 if(!has_codec_parameters(st->codec)){
2303 if (codec && !st->codec->codec)
2304 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2305 }
2306 }
2307
2308 for (i=0; i<ic->nb_streams; i++) {
2309 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2310 }
2311
2312 count = 0;
2313 read_size = 0;
2314 for(;;) {
2315 if (ff_check_interrupt(&ic->interrupt_callback)){
2316 ret= AVERROR_EXIT;
2317 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2318 break;
2319 }
2320
2321 /* check if one codec still needs to be handled */
2322 for(i=0;i<ic->nb_streams;i++) {
2323 int fps_analyze_framecount = 20;
2324
2325 st = ic->streams[i];
2326 if (!has_codec_parameters(st->codec))
2327 break;
2328 /* if the timebase is coarse (like the usual millisecond precision
2329 of mkv), we need to analyze more frames to reliably arrive at
2330 the correct fps */
2331 if (av_q2d(st->time_base) > 0.0005)
2332 fps_analyze_framecount *= 2;
2333 if (ic->fps_probe_size >= 0)
2334 fps_analyze_framecount = ic->fps_probe_size;
2335 /* variable fps and no guess at the real fps */
2336 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2337 && st->info->duration_count < fps_analyze_framecount
2338 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2339 break;
2340 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2341 break;
2342 if(st->first_dts == AV_NOPTS_VALUE)
2343 break;
2344 }
2345 if (i == ic->nb_streams) {
2346 /* NOTE: if the format has no header, then we need to read
2347 some packets to get most of the streams, so we cannot
2348 stop here */
2349 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2350 /* if we found the info for all the codecs, we can stop */
2351 ret = count;
2352 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2353 break;
2354 }
2355 }
2356 /* we did not get all the codec info, but we read too much data */
2357 if (read_size >= ic->probesize) {
2358 ret = count;
2359 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2360 break;
2361 }
2362
2363 /* NOTE: a new stream can be added there if no header in file
2364 (AVFMTCTX_NOHEADER) */
2365 ret = read_frame_internal(ic, &pkt1);
2366 if (ret == AVERROR(EAGAIN))
2367 continue;
2368
2369 if (ret < 0) {
2370 /* EOF or error */
2371 ret = -1; /* we could not have all the codec parameters before EOF */
2372 for(i=0;i<ic->nb_streams;i++) {
2373 st = ic->streams[i];
2374 if (!has_codec_parameters(st->codec)){
2375 char buf[256];
2376 avcodec_string(buf, sizeof(buf), st->codec, 0);
2377 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2378 } else {
2379 ret = 0;
2380 }
2381 }
2382 break;
2383 }
2384
2385 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2386 if ((ret = av_dup_packet(pkt)) < 0)
2387 goto find_stream_info_err;
2388
2389 read_size += pkt->size;
2390
2391 st = ic->streams[pkt->stream_index];
2392 if (st->codec_info_nb_frames>1) {
2393 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2394 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2395 break;
2396 }
2397 st->info->codec_info_duration += pkt->duration;
2398 }
2399 {
2400 int64_t last = st->info->last_dts;
2401
2402 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2403 int64_t duration= pkt->dts - last;
2404 double dur= duration * av_q2d(st->time_base);
2405
2406 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2407 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2408 if (st->info->duration_count < 2)
2409 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2410 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2411 int framerate= get_std_framerate(i);
2412 int ticks= lrintf(dur*framerate/(1001*12));
2413 double error = dur - (double)ticks*1001*12 / framerate;
2414 st->info->duration_error[i] += error*error;
2415 }
2416 st->info->duration_count++;
2417 // ignore the first 4 values, they might have some random jitter
2418 if (st->info->duration_count > 3)
2419 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2420 }
2421 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2422 st->info->last_dts = pkt->dts;
2423 }
2424 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2425 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2426 if(i){
2427 st->codec->extradata_size= i;
2428 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2429 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2430 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2431 }
2432 }
2433
2434 /* if still no information, we try to open the codec and to
2435 decompress the frame. We try to avoid that in most cases as
2436 it takes longer and uses more memory. For MPEG-4, we need to
2437 decompress for QuickTime.
2438
2439 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2440 least one frame of codec data, this makes sure the codec initializes
2441 the channel configuration and does not only trust the values from the container.
2442 */
2443 try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL);
2444
2445 st->codec_info_nb_frames++;
2446 count++;
2447 }
2448
2449 // close codecs which were opened in try_decode_frame()
2450 for(i=0;i<ic->nb_streams;i++) {
2451 st = ic->streams[i];
2452 if(st->codec->codec)
2453 avcodec_close(st->codec);
2454 }
2455 for(i=0;i<ic->nb_streams;i++) {
2456 st = ic->streams[i];
2457 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2458 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2459 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2460 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2461 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2462 // the check for tb_unreliable() is not completely correct, since this is not about handling
2463 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2464 // ipmovie.c produces.
2465 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2466 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2467 if (st->info->duration_count && !st->r_frame_rate.num
2468 && tb_unreliable(st->codec) /*&&
2469 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2470 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2471 int num = 0;
2472 double best_error= 2*av_q2d(st->time_base);
2473 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2474
2475 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2476 double error = st->info->duration_error[j] * get_std_framerate(j);
2477 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2478 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2479 if(error < best_error){
2480 best_error= error;
2481 num = get_std_framerate(j);
2482 }
2483 }
2484 // do not increase frame rate by more than 1 % in order to match a standard rate.
2485 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2486 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2487 }
2488
2489 if (!st->r_frame_rate.num){
2490 if( st->codec->time_base.den * (int64_t)st->time_base.num
2491 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2492 st->r_frame_rate.num = st->codec->time_base.den;
2493 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2494 }else{
2495 st->r_frame_rate.num = st->time_base.den;
2496 st->r_frame_rate.den = st->time_base.num;
2497 }
2498 }
2499 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2500 if(!st->codec->bits_per_coded_sample)
2501 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2502 // set stream disposition based on audio service type
2503 switch (st->codec->audio_service_type) {
2504 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2505 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2506 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2507 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2508 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2509 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2510 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2511 st->disposition = AV_DISPOSITION_COMMENT; break;
2512 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2513 st->disposition = AV_DISPOSITION_KARAOKE; break;
2514 }
2515 }
2516 }
2517
2518 estimate_timings(ic, old_offset);
2519
2520 compute_chapters_end(ic);
2521
2522 #if 0
2523 /* correct DTS for B-frame streams with no timestamps */
2524 for(i=0;i<ic->nb_streams;i++) {
2525 st = ic->streams[i];
2526 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2527 if(b-frames){
2528 ppktl = &ic->packet_buffer;
2529 while(ppkt1){
2530 if(ppkt1->stream_index != i)
2531 continue;
2532 if(ppkt1->pkt->dts < 0)
2533 break;
2534 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2535 break;
2536 ppkt1->pkt->dts -= delta;
2537 ppkt1= ppkt1->next;
2538 }
2539 if(ppkt1)
2540 continue;
2541 st->cur_dts -= delta;
2542 }
2543 }
2544 }
2545 #endif
2546
2547 find_stream_info_err:
2548 for (i=0; i < ic->nb_streams; i++)
2549 av_freep(&ic->streams[i]->info);
2550 return ret;
2551 }
2552
2553 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2554 {
2555 int i, j;
2556
2557 for (i = 0; i < ic->nb_programs; i++)
2558 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2559 if (ic->programs[i]->stream_index[j] == s)
2560 return ic->programs[i];
2561 return NULL;
2562 }
2563
2564 int av_find_best_stream(AVFormatContext *ic,
2565 enum AVMediaType type,
2566 int wanted_stream_nb,
2567 int related_stream,
2568 AVCodec **decoder_ret,
2569 int flags)
2570 {
2571 int i, nb_streams = ic->nb_streams;
2572 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2573 unsigned *program = NULL;
2574 AVCodec *decoder = NULL, *best_decoder = NULL;
2575
2576 if (related_stream >= 0 && wanted_stream_nb < 0) {
2577 AVProgram *p = find_program_from_stream(ic, related_stream);
2578 if (p) {
2579 program = p->stream_index;
2580 nb_streams = p->nb_stream_indexes;
2581 }
2582 }
2583 for (i = 0; i < nb_streams; i++) {
2584 int real_stream_index = program ? program[i] : i;
2585 AVStream *st = ic->streams[real_stream_index];
2586 AVCodecContext *avctx = st->codec;
2587 if (avctx->codec_type != type)
2588 continue;
2589 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2590 continue;
2591 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2592 continue;
2593 if (decoder_ret) {
2594 decoder = avcodec_find_decoder(st->codec->codec_id);
2595 if (!decoder) {
2596 if (ret < 0)
2597 ret = AVERROR_DECODER_NOT_FOUND;
2598 continue;
2599 }
2600 }
2601 if (best_count >= st->codec_info_nb_frames)
2602 continue;
2603 best_count = st->codec_info_nb_frames;
2604 ret = real_stream_index;
2605 best_decoder = decoder;
2606 if (program && i == nb_streams - 1 && ret < 0) {
2607 program = NULL;
2608 nb_streams = ic->nb_streams;
2609 i = 0; /* no related stream found, try again with everything */
2610 }
2611 }
2612 if (decoder_ret)
2613 *decoder_ret = best_decoder;
2614 return ret;
2615 }
2616
2617 /*******************************************************/
2618
2619 int av_read_play(AVFormatContext *s)
2620 {
2621 if (s->iformat->read_play)
2622 return s->iformat->read_play(s);
2623 if (s->pb)
2624 return avio_pause(s->pb, 0);
2625 return AVERROR(ENOSYS);
2626 }
2627
2628 int av_read_pause(AVFormatContext *s)
2629 {
2630 if (s->iformat->read_pause)
2631 return s->iformat->read_pause(s);
2632 if (s->pb)
2633 return avio_pause(s->pb, 1);
2634 return AVERROR(ENOSYS);
2635 }
2636
2637 void av_close_input_stream(AVFormatContext *s)
2638 {
2639 flush_packet_queue(s);
2640 if (s->iformat->read_close)
2641 s->iformat->read_close(s);
2642 avformat_free_context(s);
2643 }
2644
2645 void avformat_free_context(AVFormatContext *s)
2646 {
2647 int i;
2648 AVStream *st;
2649
2650 av_opt_free(s);
2651 if (s->iformat && s->iformat->priv_class && s->priv_data)
2652 av_opt_free(s->priv_data);
2653
2654 for(i=0;i<s->nb_streams;i++) {
2655 /* free all data in a stream component */
2656 st = s->streams[i];
2657 if (st->parser) {
2658 av_parser_close(st->parser);
2659 av_free_packet(&st->cur_pkt);
2660 }
2661 av_dict_free(&st->metadata);
2662 av_free(st->index_entries);
2663 av_free(st->codec->extradata);
2664 av_free(st->codec->subtitle_header);
2665 av_free(st->codec);
2666 av_free(st->priv_data);
2667 av_free(st->info);
2668 av_free(st);
2669 }
2670 for(i=s->nb_programs-1; i>=0; i--) {
2671 av_dict_free(&s->programs[i]->metadata);
2672 av_freep(&s->programs[i]->stream_index);
2673 av_freep(&s->programs[i]);
2674 }
2675 av_freep(&s->programs);
2676 av_freep(&s->priv_data);
2677 while(s->nb_chapters--) {
2678 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2679 av_free(s->chapters[s->nb_chapters]);
2680 }
2681 av_freep(&s->chapters);
2682 av_dict_free(&s->metadata);
2683 av_freep(&s->streams);
2684 av_free(s);
2685 }
2686
2687 void av_close_input_file(AVFormatContext *s)
2688 {
2689 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2690 NULL : s->pb;
2691 av_close_input_stream(s);
2692 if (pb)
2693 avio_close(pb);
2694 }
2695
2696 #if FF_API_NEW_STREAM
2697 AVStream *av_new_stream(AVFormatContext *s, int id)
2698 {
2699 AVStream *st = avformat_new_stream(s, NULL);
2700 if (st)
2701 st->id = id;
2702 return st;
2703 }
2704 #endif
2705
2706 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2707 {
2708 AVStream *st;
2709 int i;
2710 AVStream **streams;
2711
2712 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2713 return NULL;
2714 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2715 if (!streams)
2716 return NULL;
2717 s->streams = streams;
2718
2719 st = av_mallocz(sizeof(AVStream));
2720 if (!st)
2721 return NULL;
2722 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2723 av_free(st);
2724 return NULL;
2725 }
2726
2727 st->codec = avcodec_alloc_context3(c);
2728 if (s->iformat) {
2729 /* no default bitrate if decoding */
2730 st->codec->bit_rate = 0;
2731 }
2732 st->index = s->nb_streams;
2733 st->start_time = AV_NOPTS_VALUE;
2734 st->duration = AV_NOPTS_VALUE;
2735 /* we set the current DTS to 0 so that formats without any timestamps
2736 but durations get some timestamps, formats with some unknown
2737 timestamps have their first few packets buffered and the
2738 timestamps corrected before they are returned to the user */
2739 st->cur_dts = 0;
2740 st->first_dts = AV_NOPTS_VALUE;
2741 st->probe_packets = MAX_PROBE_PACKETS;
2742
2743 /* default pts setting is MPEG-like */
2744 av_set_pts_info(st, 33, 1, 90000);
2745 st->last_IP_pts = AV_NOPTS_VALUE;
2746 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2747 st->pts_buffer[i]= AV_NOPTS_VALUE;
2748 st->reference_dts = AV_NOPTS_VALUE;
2749
2750 st->sample_aspect_ratio = (AVRational){0,1};
2751
2752 s->streams[s->nb_streams++] = st;
2753 return st;
2754 }
2755
2756 AVProgram *av_new_program(AVFormatContext *ac, int id)
2757 {
2758 AVProgram *program=NULL;
2759 int i;
2760
2761 av_dlog(ac, "new_program: id=0x%04x\n", id);
2762
2763 for(i=0; i<ac->nb_programs; i++)
2764 if(ac->programs[i]->id == id)
2765 program = ac->programs[i];
2766
2767 if(!program){
2768 program = av_mallocz(sizeof(AVProgram));
2769 if (!program)
2770 return NULL;
2771 dynarray_add(&ac->programs, &ac->nb_programs, program);
2772 program->discard = AVDISCARD_NONE;
2773 }
2774 program->id = id;
2775
2776 return program;
2777 }
2778
2779 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2780 {
2781 AVChapter *chapter = NULL;
2782 int i;
2783
2784 for(i=0; i<s->nb_chapters; i++)
2785 if(s->chapters[i]->id == id)
2786 chapter = s->chapters[i];
2787
2788 if(!chapter){
2789 chapter= av_mallocz(sizeof(AVChapter));
2790 if(!chapter)
2791 return NULL;
2792 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2793 }
2794 av_dict_set(&chapter->metadata, "title", title, 0);
2795 chapter->id = id;
2796 chapter->time_base= time_base;
2797 chapter->start = start;
2798 chapter->end = end;
2799
2800 return chapter;
2801 }
2802
2803 /************************************************************/
2804 /* output media file */
2805
2806 #if FF_API_FORMAT_PARAMETERS
2807 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2808 {
2809 int ret;
2810
2811 if (s->oformat->priv_data_size > 0) {
2812 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2813 if (!s->priv_data)
2814 return AVERROR(ENOMEM);
2815 if (s->oformat->priv_class) {
2816 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2817 av_opt_set_defaults(s->priv_data);
2818 }
2819 } else
2820 s->priv_data = NULL;
2821
2822 if (s->oformat->set_parameters) {
2823 ret = s->oformat->set_parameters(s, ap);
2824 if (ret < 0)
2825 return ret;
2826 }
2827 return 0;
2828 }
2829 #endif
2830
2831 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2832 {
2833 const AVCodecTag *avctag;
2834 int n;
2835 enum CodecID id = CODEC_ID_NONE;
2836 unsigned int tag = 0;
2837
2838 /**
2839 * Check that tag + id is in the table
2840 * If neither is in the table -> OK
2841 * If tag is in the table with another id -> FAIL
2842 * If id is in the table with another tag -> FAIL unless strict < normal
2843 */
2844 for (n = 0; s->oformat->codec_tag[n]; n++) {
2845 avctag = s->oformat->codec_tag[n];
2846 while (avctag->id != CODEC_ID_NONE) {
2847 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
2848 id = avctag->id;
2849 if (id == st->codec->codec_id)
2850 return 1;
2851 }
2852 if (avctag->id == st->codec->codec_id)
2853 tag = avctag->tag;
2854 avctag++;
2855 }
2856 }
2857 if (id != CODEC_ID_NONE)
2858 return 0;
2859 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2860 return 0;
2861 return 1;
2862 }
2863
2864 #if FF_API_FORMAT_PARAMETERS
2865 int av_write_header(AVFormatContext *s)
2866 {
2867 return avformat_write_header(s, NULL);
2868 }
2869 #endif
2870
2871 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
2872 {
2873 int ret = 0, i;
2874 AVStream *st;
2875 AVDictionary *tmp = NULL;
2876
2877 if (options)
2878 av_dict_copy(&tmp, *options, 0);
2879 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
2880 goto fail;
2881
2882 // some sanity checks
2883 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2884 av_log(s, AV_LOG_ERROR, "no streams\n");
2885 ret = AVERROR(EINVAL);
2886 goto fail;
2887 }
2888
2889 for(i=0;i<s->nb_streams;i++) {
2890 st = s->streams[i];
2891
2892 switch (st->codec->codec_type) {
2893 case AVMEDIA_TYPE_AUDIO:
2894 if(st->codec->sample_rate<=0){
2895 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2896 ret = AVERROR(EINVAL);
2897 goto fail;
2898 }
2899 if(!st->codec->block_align)
2900 st->codec->block_align = st->codec->channels *
2901 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2902 break;
2903 case AVMEDIA_TYPE_VIDEO:
2904 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2905 av_log(s, AV_LOG_ERROR, "time base not set\n");
2906 ret = AVERROR(EINVAL);
2907 goto fail;
2908 }
2909 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2910 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2911 ret = AVERROR(EINVAL);
2912 goto fail;
2913 }
2914 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2915 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2916 ret = AVERROR(EINVAL);
2917 goto fail;
2918 }
2919 break;
2920 }
2921
2922 if(s->oformat->codec_tag){
2923 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2924 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2925 st->codec->codec_tag= 0;
2926 }
2927 if(st->codec->codec_tag){
2928 if (!validate_codec_tag(s, st)) {
2929 char tagbuf[32];
2930 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2931 av_log(s, AV_LOG_ERROR,
2932 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2933 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2934 ret = AVERROR_INVALIDDATA;
2935 goto fail;
2936 }
2937 }else
2938 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2939 }
2940
2941 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2942 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2943 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2944 }
2945
2946 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2947 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2948 if (!s->priv_data) {
2949 ret = AVERROR(ENOMEM);
2950 goto fail;
2951 }
2952 if (s->oformat->priv_class) {
2953 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2954 av_opt_set_defaults(s->priv_data);
2955 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
2956 goto fail;
2957 }
2958 }
2959
2960 /* set muxer identification string */
2961 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2962 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2963 }
2964
2965 if(s->oformat->write_header){
2966 ret = s->oformat->write_header(s);
2967 if (ret < 0)
2968 goto fail;
2969 }
2970
2971 /* init PTS generation */
2972 for(i=0;i<s->nb_streams;i++) {
2973 int64_t den = AV_NOPTS_VALUE;
2974 st = s->streams[i];
2975
2976 switch (st->codec->codec_type) {
2977 case AVMEDIA_TYPE_AUDIO:
2978 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2979 break;
2980 case AVMEDIA_TYPE_VIDEO:
2981 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2982 break;
2983 default:
2984 break;
2985 }
2986 if (den != AV_NOPTS_VALUE) {
2987 if (den <= 0) {
2988 ret = AVERROR_INVALIDDATA;
2989 goto fail;
2990 }
2991 frac_init(&st->pts, 0, 0, den);
2992 }
2993 }
2994
2995 if (options) {
2996 av_dict_free(options);
2997 *options = tmp;
2998 }
2999 return 0;
3000 fail:
3001 av_dict_free(&tmp);
3002 return ret;
3003 }
3004
3005 //FIXME merge with compute_pkt_fields
3006 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3007 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3008 int num, den, frame_size, i;
3009
3010 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
3011 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
3012
3013 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
3014 return AVERROR(EINVAL);*/
3015
3016 /* duration field */
3017 if (pkt->duration == 0) {
3018 compute_frame_duration(&num, &den, st, NULL, pkt);
3019 if (den && num) {
3020 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3021 }
3022 }
3023
3024 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3025 pkt->pts= pkt->dts;
3026
3027 //XXX/FIXME this is a temporary hack until all encoders output pts
3028 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3029 pkt->dts=
3030 // pkt->pts= st->cur_dts;
3031 pkt->pts= st->pts.val;
3032 }
3033
3034 //calculate dts from pts
3035 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3036 st->pts_buffer[0]= pkt->pts;
3037 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3038 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3039 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3040 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3041
3042 pkt->dts= st->pts_buffer[0];
3043 }
3044
3045 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
3046 av_log(s, AV_LOG_ERROR,
3047 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
3048 st->index, st->cur_dts, pkt->dts);
3049 return AVERROR(EINVAL);
3050 }
3051 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3052 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
3053 return AVERROR(EINVAL);
3054 }
3055
3056 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
3057 st->cur_dts= pkt->dts;
3058 st->pts.val= pkt->dts;
3059
3060 /* update pts */
3061 switch (st->codec->codec_type) {
3062 case AVMEDIA_TYPE_AUDIO:
3063 frame_size = get_audio_frame_size(st->codec, pkt->size);
3064
3065 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3066 likely equal to the encoder delay, but it would be better if we
3067 had the real timestamps from the encoder */
3068 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3069 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3070 }
3071 break;
3072 case AVMEDIA_TYPE_VIDEO:
3073 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3074 break;
3075 default:
3076 break;
3077 }
3078 return 0;
3079 }
3080
3081 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3082 {
3083 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3084
3085 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3086 return ret;
3087
3088 ret= s->oformat->write_packet(s, pkt);
3089
3090 if (ret >= 0)
3091 s->streams[pkt->stream_index]->nb_frames++;
3092 return ret;
3093 }
3094
3095 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3096 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3097 {
3098 AVPacketList **next_point, *this_pktl;
3099
3100 this_pktl = av_mallocz(sizeof(AVPacketList));
3101 this_pktl->pkt= *pkt;