11cb4f8b8e6976b490a04341738047e22b9966f7
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* #define DEBUG */
23
24 #include "avformat.h"
25 #include "avio_internal.h"
26 #include "internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/pixdesc.h"
31 #include "metadata.h"
32 #include "id3v2.h"
33 #include "libavutil/avstring.h"
34 #include "libavutil/mathematics.h"
35 #include "riff.h"
36 #include "audiointerleave.h"
37 #include "url.h"
38 #include <sys/time.h>
39 #include <time.h>
40 #include <stdarg.h>
41 #if CONFIG_NETWORK
42 #include "network.h"
43 #endif
44
45 #undef NDEBUG
46 #include <assert.h>
47
48 /**
49 * @file
50 * various utility functions for use within Libav
51 */
52
53 unsigned avformat_version(void)
54 {
55 return LIBAVFORMAT_VERSION_INT;
56 }
57
58 const char *avformat_configuration(void)
59 {
60 return LIBAV_CONFIGURATION;
61 }
62
63 const char *avformat_license(void)
64 {
65 #define LICENSE_PREFIX "libavformat license: "
66 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
67 }
68
69 /* fraction handling */
70
71 /**
72 * f = val + (num / den) + 0.5.
73 *
74 * 'num' is normalized so that it is such as 0 <= num < den.
75 *
76 * @param f fractional number
77 * @param val integer value
78 * @param num must be >= 0
79 * @param den must be >= 1
80 */
81 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
82 {
83 num += (den >> 1);
84 if (num >= den) {
85 val += num / den;
86 num = num % den;
87 }
88 f->val = val;
89 f->num = num;
90 f->den = den;
91 }
92
93 /**
94 * Fractional addition to f: f = f + (incr / f->den).
95 *
96 * @param f fractional number
97 * @param incr increment, can be positive or negative
98 */
99 static void frac_add(AVFrac *f, int64_t incr)
100 {
101 int64_t num, den;
102
103 num = f->num + incr;
104 den = f->den;
105 if (num < 0) {
106 f->val += num / den;
107 num = num % den;
108 if (num < 0) {
109 num += den;
110 f->val--;
111 }
112 } else if (num >= den) {
113 f->val += num / den;
114 num = num % den;
115 }
116 f->num = num;
117 }
118
119 /** head of registered input format linked list */
120 static AVInputFormat *first_iformat = NULL;
121 /** head of registered output format linked list */
122 static AVOutputFormat *first_oformat = NULL;
123
124 AVInputFormat *av_iformat_next(AVInputFormat *f)
125 {
126 if(f) return f->next;
127 else return first_iformat;
128 }
129
130 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
131 {
132 if(f) return f->next;
133 else return first_oformat;
134 }
135
136 void av_register_input_format(AVInputFormat *format)
137 {
138 AVInputFormat **p;
139 p = &first_iformat;
140 while (*p != NULL) p = &(*p)->next;
141 *p = format;
142 format->next = NULL;
143 }
144
145 void av_register_output_format(AVOutputFormat *format)
146 {
147 AVOutputFormat **p;
148 p = &first_oformat;
149 while (*p != NULL) p = &(*p)->next;
150 *p = format;
151 format->next = NULL;
152 }
153
154 int av_match_ext(const char *filename, const char *extensions)
155 {
156 const char *ext, *p;
157 char ext1[32], *q;
158
159 if(!filename)
160 return 0;
161
162 ext = strrchr(filename, '.');
163 if (ext) {
164 ext++;
165 p = extensions;
166 for(;;) {
167 q = ext1;
168 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
169 *q++ = *p++;
170 *q = '\0';
171 if (!av_strcasecmp(ext1, ext))
172 return 1;
173 if (*p == '\0')
174 break;
175 p++;
176 }
177 }
178 return 0;
179 }
180
181 static int match_format(const char *name, const char *names)
182 {
183 const char *p;
184 int len, namelen;
185
186 if (!name || !names)
187 return 0;
188
189 namelen = strlen(name);
190 while ((p = strchr(names, ','))) {
191 len = FFMAX(p - names, namelen);
192 if (!av_strncasecmp(name, names, len))
193 return 1;
194 names = p+1;
195 }
196 return !av_strcasecmp(name, names);
197 }
198
199 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
200 const char *mime_type)
201 {
202 AVOutputFormat *fmt = NULL, *fmt_found;
203 int score_max, score;
204
205 /* specific test for image sequences */
206 #if CONFIG_IMAGE2_MUXER
207 if (!short_name && filename &&
208 av_filename_number_test(filename) &&
209 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
210 return av_guess_format("image2", NULL, NULL);
211 }
212 #endif
213 /* Find the proper file type. */
214 fmt_found = NULL;
215 score_max = 0;
216 while ((fmt = av_oformat_next(fmt))) {
217 score = 0;
218 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
219 score += 100;
220 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
221 score += 10;
222 if (filename && fmt->extensions &&
223 av_match_ext(filename, fmt->extensions)) {
224 score += 5;
225 }
226 if (score > score_max) {
227 score_max = score;
228 fmt_found = fmt;
229 }
230 }
231 return fmt_found;
232 }
233
234 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
235 const char *filename, const char *mime_type, enum AVMediaType type){
236 if(type == AVMEDIA_TYPE_VIDEO){
237 enum CodecID codec_id= CODEC_ID_NONE;
238
239 #if CONFIG_IMAGE2_MUXER
240 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
241 codec_id= ff_guess_image2_codec(filename);
242 }
243 #endif
244 if(codec_id == CODEC_ID_NONE)
245 codec_id= fmt->video_codec;
246 return codec_id;
247 }else if(type == AVMEDIA_TYPE_AUDIO)
248 return fmt->audio_codec;
249 else if (type == AVMEDIA_TYPE_SUBTITLE)
250 return fmt->subtitle_codec;
251 else
252 return CODEC_ID_NONE;
253 }
254
255 AVInputFormat *av_find_input_format(const char *short_name)
256 {
257 AVInputFormat *fmt = NULL;
258 while ((fmt = av_iformat_next(fmt))) {
259 if (match_format(short_name, fmt->name))
260 return fmt;
261 }
262 return NULL;
263 }
264
265
266 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
267 {
268 int ret= av_new_packet(pkt, size);
269
270 if(ret<0)
271 return ret;
272
273 pkt->pos= avio_tell(s);
274
275 ret= avio_read(s, pkt->data, size);
276 if(ret<=0)
277 av_free_packet(pkt);
278 else
279 av_shrink_packet(pkt, ret);
280
281 return ret;
282 }
283
284 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
285 {
286 int ret;
287 int old_size;
288 if (!pkt->size)
289 return av_get_packet(s, pkt, size);
290 old_size = pkt->size;
291 ret = av_grow_packet(pkt, size);
292 if (ret < 0)
293 return ret;
294 ret = avio_read(s, pkt->data + old_size, size);
295 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
296 return ret;
297 }
298
299
300 int av_filename_number_test(const char *filename)
301 {
302 char buf[1024];
303 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
304 }
305
306 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
307 {
308 AVProbeData lpd = *pd;
309 AVInputFormat *fmt1 = NULL, *fmt;
310 int score, id3 = 0;
311
312 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
313 int id3len = ff_id3v2_tag_len(lpd.buf);
314 if (lpd.buf_size > id3len + 16) {
315 lpd.buf += id3len;
316 lpd.buf_size -= id3len;
317 }
318 id3 = 1;
319 }
320
321 fmt = NULL;
322 while ((fmt1 = av_iformat_next(fmt1))) {
323 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
324 continue;
325 score = 0;
326 if (fmt1->read_probe) {
327 score = fmt1->read_probe(&lpd);
328 } else if (fmt1->extensions) {
329 if (av_match_ext(lpd.filename, fmt1->extensions)) {
330 score = 50;
331 }
332 }
333 if (score > *score_max) {
334 *score_max = score;
335 fmt = fmt1;
336 }else if (score == *score_max)
337 fmt = NULL;
338 }
339
340 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
341 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_MAX/4) {
342 while ((fmt = av_iformat_next(fmt)))
343 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
344 *score_max = AVPROBE_SCORE_MAX/4;
345 break;
346 }
347 }
348
349 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4-1) {
350 while ((fmt = av_iformat_next(fmt)))
351 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
352 *score_max = AVPROBE_SCORE_MAX/4-1;
353 break;
354 }
355 }
356
357 return fmt;
358 }
359
360 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
361 int score=0;
362 return av_probe_input_format2(pd, is_opened, &score);
363 }
364
365 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
366 {
367 static const struct {
368 const char *name; enum CodecID id; enum AVMediaType type;
369 } fmt_id_type[] = {
370 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
371 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
372 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
373 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
374 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
375 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
376 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
377 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
378 { 0 }
379 };
380 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
381
382 if (fmt) {
383 int i;
384 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
385 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
386 for (i = 0; fmt_id_type[i].name; i++) {
387 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
388 st->codec->codec_id = fmt_id_type[i].id;
389 st->codec->codec_type = fmt_id_type[i].type;
390 break;
391 }
392 }
393 }
394 return !!fmt;
395 }
396
397 /************************************************************/
398 /* input media file */
399
400 #if FF_API_FORMAT_PARAMETERS
401 static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
402 {
403 char buf[1024];
404 AVDictionary *opts = NULL;
405
406 if (!ap)
407 return NULL;
408
409 if (ap->time_base.num) {
410 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
411 av_dict_set(&opts, "framerate", buf, 0);
412 }
413 if (ap->sample_rate) {
414 snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
415 av_dict_set(&opts, "sample_rate", buf, 0);
416 }
417 if (ap->channels) {
418 snprintf(buf, sizeof(buf), "%d", ap->channels);
419 av_dict_set(&opts, "channels", buf, 0);
420 }
421 if (ap->width || ap->height) {
422 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
423 av_dict_set(&opts, "video_size", buf, 0);
424 }
425 if (ap->pix_fmt != PIX_FMT_NONE) {
426 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
427 }
428 if (ap->channel) {
429 snprintf(buf, sizeof(buf), "%d", ap->channel);
430 av_dict_set(&opts, "channel", buf, 0);
431 }
432 if (ap->standard) {
433 av_dict_set(&opts, "standard", ap->standard, 0);
434 }
435 if (ap->mpeg2ts_compute_pcr) {
436 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
437 }
438 if (ap->initial_pause) {
439 av_dict_set(&opts, "initial_pause", "1", 0);
440 }
441 return opts;
442 }
443
444 /**
445 * Open a media file from an IO stream. 'fmt' must be specified.
446 */
447 int av_open_input_stream(AVFormatContext **ic_ptr,
448 AVIOContext *pb, const char *filename,
449 AVInputFormat *fmt, AVFormatParameters *ap)
450 {
451 int err;
452 AVDictionary *opts;
453 AVFormatContext *ic;
454 AVFormatParameters default_ap;
455
456 if(!ap){
457 ap=&default_ap;
458 memset(ap, 0, sizeof(default_ap));
459 }
460 opts = convert_format_parameters(ap);
461
462 if(!ap->prealloced_context)
463 ic = avformat_alloc_context();
464 else
465 ic = *ic_ptr;
466 if (!ic) {
467 err = AVERROR(ENOMEM);
468 goto fail;
469 }
470 if (pb && fmt && fmt->flags & AVFMT_NOFILE)
471 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
472 "will be ignored with AVFMT_NOFILE format.\n");
473 else
474 ic->pb = pb;
475
476 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0)
477 goto fail;
478 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above
479
480 fail:
481 *ic_ptr = ic;
482 av_dict_free(&opts);
483 return err;
484 }
485 #endif
486
487 /** size of probe buffer, for guessing file type from file contents */
488 #define PROBE_BUF_MIN 2048
489 #define PROBE_BUF_MAX (1<<20)
490
491 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
492 const char *filename, void *logctx,
493 unsigned int offset, unsigned int max_probe_size)
494 {
495 AVProbeData pd = { filename ? filename : "", NULL, -offset };
496 unsigned char *buf = NULL;
497 int ret = 0, probe_size;
498
499 if (!max_probe_size) {
500 max_probe_size = PROBE_BUF_MAX;
501 } else if (max_probe_size > PROBE_BUF_MAX) {
502 max_probe_size = PROBE_BUF_MAX;
503 } else if (max_probe_size < PROBE_BUF_MIN) {
504 return AVERROR(EINVAL);
505 }
506
507 if (offset >= max_probe_size) {
508 return AVERROR(EINVAL);
509 }
510
511 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
512 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
513 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
514 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
515
516 if (probe_size < offset) {
517 continue;
518 }
519
520 /* read probe data */
521 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
522 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
523 /* fail if error was not end of file, otherwise, lower score */
524 if (ret != AVERROR_EOF) {
525 av_free(buf);
526 return ret;
527 }
528 score = 0;
529 ret = 0; /* error was end of file, nothing read */
530 }
531 pd.buf_size += ret;
532 pd.buf = &buf[offset];
533
534 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
535
536 /* guess file format */
537 *fmt = av_probe_input_format2(&pd, 1, &score);
538 if(*fmt){
539 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
540 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
541 }else
542 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
543 }
544 }
545
546 if (!*fmt) {
547 av_free(buf);
548 return AVERROR_INVALIDDATA;
549 }
550
551 /* rewind. reuse probe buffer to avoid seeking */
552 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
553 av_free(buf);
554
555 return ret;
556 }
557
558 #if FF_API_FORMAT_PARAMETERS
559 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
560 AVInputFormat *fmt,
561 int buf_size,
562 AVFormatParameters *ap)
563 {
564 int err;
565 AVDictionary *opts = convert_format_parameters(ap);
566
567 if (!ap || !ap->prealloced_context)
568 *ic_ptr = NULL;
569
570 err = avformat_open_input(ic_ptr, filename, fmt, &opts);
571
572 av_dict_free(&opts);
573 return err;
574 }
575 #endif
576
577 /* open input file and probe the format if necessary */
578 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
579 {
580 int ret;
581 AVProbeData pd = {filename, NULL, 0};
582
583 if (s->pb) {
584 s->flags |= AVFMT_FLAG_CUSTOM_IO;
585 if (!s->iformat)
586 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
587 else if (s->iformat->flags & AVFMT_NOFILE)
588 return AVERROR(EINVAL);
589 return 0;
590 }
591
592 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
593 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
594 return 0;
595
596 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
597 &s->interrupt_callback, options)) < 0)
598 return ret;
599 if (s->iformat)
600 return 0;
601 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
602 }
603
604 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
605 {
606 AVFormatContext *s = *ps;
607 int ret = 0;
608 AVFormatParameters ap = { { 0 } };
609 AVDictionary *tmp = NULL;
610
611 if (!s && !(s = avformat_alloc_context()))
612 return AVERROR(ENOMEM);
613 if (fmt)
614 s->iformat = fmt;
615
616 if (options)
617 av_dict_copy(&tmp, *options, 0);
618
619 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
620 goto fail;
621
622 if ((ret = init_input(s, filename, &tmp)) < 0)
623 goto fail;
624
625 /* check filename in case an image number is expected */
626 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
627 if (!av_filename_number_test(filename)) {
628 ret = AVERROR(EINVAL);
629 goto fail;
630 }
631 }
632
633 s->duration = s->start_time = AV_NOPTS_VALUE;
634 av_strlcpy(s->filename, filename, sizeof(s->filename));
635
636 /* allocate private data */
637 if (s->iformat->priv_data_size > 0) {
638 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
639 ret = AVERROR(ENOMEM);
640 goto fail;
641 }
642 if (s->iformat->priv_class) {
643 *(const AVClass**)s->priv_data = s->iformat->priv_class;
644 av_opt_set_defaults(s->priv_data);
645 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
646 goto fail;
647 }
648 }
649
650 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
651 if (s->pb)
652 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
653
654 if (s->iformat->read_header)
655 if ((ret = s->iformat->read_header(s, &ap)) < 0)
656 goto fail;
657
658 if (s->pb && !s->data_offset)
659 s->data_offset = avio_tell(s->pb);
660
661 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
662
663 if (options) {
664 av_dict_free(options);
665 *options = tmp;
666 }
667 *ps = s;
668 return 0;
669
670 fail:
671 av_dict_free(&tmp);
672 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
673 avio_close(s->pb);
674 avformat_free_context(s);
675 *ps = NULL;
676 return ret;
677 }
678
679 /*******************************************************/
680
681 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
682 AVPacketList **plast_pktl){
683 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
684 if (!pktl)
685 return NULL;
686
687 if (*packet_buffer)
688 (*plast_pktl)->next = pktl;
689 else
690 *packet_buffer = pktl;
691
692 /* add the packet in the buffered packet list */
693 *plast_pktl = pktl;
694 pktl->pkt= *pkt;
695 return &pktl->pkt;
696 }
697
698 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
699 {
700 int ret, i;
701 AVStream *st;
702
703 for(;;){
704 AVPacketList *pktl = s->raw_packet_buffer;
705
706 if (pktl) {
707 *pkt = pktl->pkt;
708 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
709 !s->streams[pkt->stream_index]->probe_packets ||
710 s->raw_packet_buffer_remaining_size < pkt->size){
711 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
712 av_freep(&pd->buf);
713 pd->buf_size = 0;
714 s->raw_packet_buffer = pktl->next;
715 s->raw_packet_buffer_remaining_size += pkt->size;
716 av_free(pktl);
717 return 0;
718 }
719 }
720
721 av_init_packet(pkt);
722 ret= s->iformat->read_packet(s, pkt);
723 if (ret < 0) {
724 if (!pktl || ret == AVERROR(EAGAIN))
725 return ret;
726 for (i = 0; i < s->nb_streams; i++)
727 s->streams[i]->probe_packets = 0;
728 continue;
729 }
730
731 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
732 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
733 av_log(s, AV_LOG_WARNING,
734 "Dropped corrupted packet (stream = %d)\n",
735 pkt->stream_index);
736 av_free_packet(pkt);
737 continue;
738 }
739
740 st= s->streams[pkt->stream_index];
741
742 switch(st->codec->codec_type){
743 case AVMEDIA_TYPE_VIDEO:
744 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
745 break;
746 case AVMEDIA_TYPE_AUDIO:
747 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
748 break;
749 case AVMEDIA_TYPE_SUBTITLE:
750 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
751 break;
752 }
753
754 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
755 !st->probe_packets))
756 return ret;
757
758 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
759 s->raw_packet_buffer_remaining_size -= pkt->size;
760
761 if(st->codec->codec_id == CODEC_ID_PROBE){
762 AVProbeData *pd = &st->probe_data;
763 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
764 --st->probe_packets;
765
766 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
767 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
768 pd->buf_size += pkt->size;
769 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
770
771 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
772 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
773 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
774 if(st->codec->codec_id != CODEC_ID_PROBE){
775 pd->buf_size=0;
776 av_freep(&pd->buf);
777 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
778 }
779 }
780 }
781 }
782 }
783
784 /**********************************************************/
785
786 /**
787 * Get the number of samples of an audio frame. Return -1 on error.
788 */
789 static int get_audio_frame_size(AVCodecContext *enc, int size)
790 {
791 int frame_size;
792
793 if(enc->codec_id == CODEC_ID_VORBIS)
794 return -1;
795
796 if (enc->frame_size <= 1) {
797 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
798
799 if (bits_per_sample) {
800 if (enc->channels == 0)
801 return -1;
802 frame_size = (size << 3) / (bits_per_sample * enc->channels);
803 } else {
804 /* used for example by ADPCM codecs */
805 if (enc->bit_rate == 0)
806 return -1;
807 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
808 }
809 } else {
810 frame_size = enc->frame_size;
811 }
812 return frame_size;
813 }
814
815
816 /**
817 * Return the frame duration in seconds. Return 0 if not available.
818 */
819 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
820 AVCodecParserContext *pc, AVPacket *pkt)
821 {
822 int frame_size;
823
824 *pnum = 0;
825 *pden = 0;
826 switch(st->codec->codec_type) {
827 case AVMEDIA_TYPE_VIDEO:
828 if(st->time_base.num*1000LL > st->time_base.den){
829 *pnum = st->time_base.num;
830 *pden = st->time_base.den;
831 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
832 *pnum = st->codec->time_base.num;
833 *pden = st->codec->time_base.den;
834 if (pc && pc->repeat_pict) {
835 *pnum = (*pnum) * (1 + pc->repeat_pict);
836 }
837 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
838 //Thus if we have no parser in such case leave duration undefined.
839 if(st->codec->ticks_per_frame>1 && !pc){
840 *pnum = *pden = 0;
841 }
842 }
843 break;
844 case AVMEDIA_TYPE_AUDIO:
845 frame_size = get_audio_frame_size(st->codec, pkt->size);
846 if (frame_size <= 0 || st->codec->sample_rate <= 0)
847 break;
848 *pnum = frame_size;
849 *pden = st->codec->sample_rate;
850 break;
851 default:
852 break;
853 }
854 }
855
856 static int is_intra_only(AVCodecContext *enc){
857 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
858 return 1;
859 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
860 switch(enc->codec_id){
861 case CODEC_ID_MJPEG:
862 case CODEC_ID_MJPEGB:
863 case CODEC_ID_LJPEG:
864 case CODEC_ID_PRORES:
865 case CODEC_ID_RAWVIDEO:
866 case CODEC_ID_DVVIDEO:
867 case CODEC_ID_HUFFYUV:
868 case CODEC_ID_FFVHUFF:
869 case CODEC_ID_ASV1:
870 case CODEC_ID_ASV2:
871 case CODEC_ID_VCR1:
872 case CODEC_ID_DNXHD:
873 case CODEC_ID_JPEG2000:
874 return 1;
875 default: break;
876 }
877 }
878 return 0;
879 }
880
881 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
882 int64_t dts, int64_t pts)
883 {
884 AVStream *st= s->streams[stream_index];
885 AVPacketList *pktl= s->packet_buffer;
886
887 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
888 return;
889
890 st->first_dts= dts - st->cur_dts;
891 st->cur_dts= dts;
892
893 for(; pktl; pktl= pktl->next){
894 if(pktl->pkt.stream_index != stream_index)
895 continue;
896 //FIXME think more about this check
897 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
898 pktl->pkt.pts += st->first_dts;
899
900 if(pktl->pkt.dts != AV_NOPTS_VALUE)
901 pktl->pkt.dts += st->first_dts;
902
903 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
904 st->start_time= pktl->pkt.pts;
905 }
906 if (st->start_time == AV_NOPTS_VALUE)
907 st->start_time = pts;
908 }
909
910 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
911 {
912 AVPacketList *pktl= s->packet_buffer;
913 int64_t cur_dts= 0;
914
915 if(st->first_dts != AV_NOPTS_VALUE){
916 cur_dts= st->first_dts;
917 for(; pktl; pktl= pktl->next){
918 if(pktl->pkt.stream_index == pkt->stream_index){
919 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
920 break;
921 cur_dts -= pkt->duration;
922 }
923 }
924 pktl= s->packet_buffer;
925 st->first_dts = cur_dts;
926 }else if(st->cur_dts)
927 return;
928
929 for(; pktl; pktl= pktl->next){
930 if(pktl->pkt.stream_index != pkt->stream_index)
931 continue;
932 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
933 && !pktl->pkt.duration){
934 pktl->pkt.dts= cur_dts;
935 if(!st->codec->has_b_frames)
936 pktl->pkt.pts= cur_dts;
937 cur_dts += pkt->duration;
938 pktl->pkt.duration= pkt->duration;
939 }else
940 break;
941 }
942 if(st->first_dts == AV_NOPTS_VALUE)
943 st->cur_dts= cur_dts;
944 }
945
946 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
947 AVCodecParserContext *pc, AVPacket *pkt)
948 {
949 int num, den, presentation_delayed, delay, i;
950 int64_t offset;
951
952 if (s->flags & AVFMT_FLAG_NOFILLIN)
953 return;
954
955 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
956 pkt->dts= AV_NOPTS_VALUE;
957
958 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
959 //FIXME Set low_delay = 0 when has_b_frames = 1
960 st->codec->has_b_frames = 1;
961
962 /* do we have a video B-frame ? */
963 delay= st->codec->has_b_frames;
964 presentation_delayed = 0;
965
966 // ignore delay caused by frame threading so that the mpeg2-without-dts
967 // warning will not trigger
968 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
969 delay -= st->codec->thread_count-1;
970
971 /* XXX: need has_b_frame, but cannot get it if the codec is
972 not initialized */
973 if (delay &&
974 pc && pc->pict_type != AV_PICTURE_TYPE_B)
975 presentation_delayed = 1;
976
977 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
978 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
979 pkt->dts -= 1LL<<st->pts_wrap_bits;
980 }
981
982 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
983 // we take the conservative approach and discard both
984 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
985 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
986 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
987 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
988 }
989
990 if (pkt->duration == 0) {
991 compute_frame_duration(&num, &den, st, pc, pkt);
992 if (den && num) {
993 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
994
995 if(pkt->duration != 0 && s->packet_buffer)
996 update_initial_durations(s, st, pkt);
997 }
998 }
999
1000 /* correct timestamps with byte offset if demuxers only have timestamps
1001 on packet boundaries */
1002 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1003 /* this will estimate bitrate based on this frame's duration and size */
1004 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1005 if(pkt->pts != AV_NOPTS_VALUE)
1006 pkt->pts += offset;
1007 if(pkt->dts != AV_NOPTS_VALUE)
1008 pkt->dts += offset;
1009 }
1010
1011 if (pc && pc->dts_sync_point >= 0) {
1012 // we have synchronization info from the parser
1013 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1014 if (den > 0) {
1015 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1016 if (pkt->dts != AV_NOPTS_VALUE) {
1017 // got DTS from the stream, update reference timestamp
1018 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1019 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1020 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1021 // compute DTS based on reference timestamp
1022 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1023 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1024 }
1025 if (pc->dts_sync_point > 0)
1026 st->reference_dts = pkt->dts; // new reference
1027 }
1028 }
1029
1030 /* This may be redundant, but it should not hurt. */
1031 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1032 presentation_delayed = 1;
1033
1034 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1035 /* interpolate PTS and DTS if they are not present */
1036 //We skip H264 currently because delay and has_b_frames are not reliably set
1037 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1038 if (presentation_delayed) {
1039 /* DTS = decompression timestamp */
1040 /* PTS = presentation timestamp */
1041 if (pkt->dts == AV_NOPTS_VALUE)
1042 pkt->dts = st->last_IP_pts;
1043 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1044 if (pkt->dts == AV_NOPTS_VALUE)
1045 pkt->dts = st->cur_dts;
1046
1047 /* this is tricky: the dts must be incremented by the duration
1048 of the frame we are displaying, i.e. the last I- or P-frame */
1049 if (st->last_IP_duration == 0)
1050 st->last_IP_duration = pkt->duration;
1051 if(pkt->dts != AV_NOPTS_VALUE)
1052 st->cur_dts = pkt->dts + st->last_IP_duration;
1053 st->last_IP_duration = pkt->duration;
1054 st->last_IP_pts= pkt->pts;
1055 /* cannot compute PTS if not present (we can compute it only
1056 by knowing the future */
1057 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1058 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1059 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1060 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1061 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1062 pkt->pts += pkt->duration;
1063 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1064 }
1065 }
1066
1067 /* presentation is not delayed : PTS and DTS are the same */
1068 if(pkt->pts == AV_NOPTS_VALUE)
1069 pkt->pts = pkt->dts;
1070 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1071 if(pkt->pts == AV_NOPTS_VALUE)
1072 pkt->pts = st->cur_dts;
1073 pkt->dts = pkt->pts;
1074 if(pkt->pts != AV_NOPTS_VALUE)
1075 st->cur_dts = pkt->pts + pkt->duration;
1076 }
1077 }
1078
1079 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1080 st->pts_buffer[0]= pkt->pts;
1081 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1082 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1083 if(pkt->dts == AV_NOPTS_VALUE)
1084 pkt->dts= st->pts_buffer[0];
1085 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1086 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1087 }
1088 if(pkt->dts > st->cur_dts)
1089 st->cur_dts = pkt->dts;
1090 }
1091
1092 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1093
1094 /* update flags */
1095 if(is_intra_only(st->codec))
1096 pkt->flags |= AV_PKT_FLAG_KEY;
1097 else if (pc) {
1098 pkt->flags = 0;
1099 /* keyframe computation */
1100 if (pc->key_frame == 1)
1101 pkt->flags |= AV_PKT_FLAG_KEY;
1102 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1103 pkt->flags |= AV_PKT_FLAG_KEY;
1104 }
1105 if (pc)
1106 pkt->convergence_duration = pc->convergence_duration;
1107 }
1108
1109
1110 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1111 {
1112 AVStream *st;
1113 int len, ret, i;
1114
1115 av_init_packet(pkt);
1116
1117 for(;;) {
1118 /* select current input stream component */
1119 st = s->cur_st;
1120 if (st) {
1121 if (!st->need_parsing || !st->parser) {
1122 /* no parsing needed: we just output the packet as is */
1123 /* raw data support */
1124 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1125 compute_pkt_fields(s, st, NULL, pkt);
1126 s->cur_st = NULL;
1127 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1128 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1129 ff_reduce_index(s, st->index);
1130 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1131 }
1132 break;
1133 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1134 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1135 st->cur_ptr, st->cur_len,
1136 st->cur_pkt.pts, st->cur_pkt.dts,
1137 st->cur_pkt.pos);
1138 st->cur_pkt.pts = AV_NOPTS_VALUE;
1139 st->cur_pkt.dts = AV_NOPTS_VALUE;
1140 /* increment read pointer */
1141 st->cur_ptr += len;
1142 st->cur_len -= len;
1143
1144 /* return packet if any */
1145 if (pkt->size) {
1146 got_packet:
1147 pkt->duration = 0;
1148 pkt->stream_index = st->index;
1149 pkt->pts = st->parser->pts;
1150 pkt->dts = st->parser->dts;
1151 pkt->pos = st->parser->pos;
1152 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1153 s->cur_st = NULL;
1154 pkt->destruct= st->cur_pkt.destruct;
1155 st->cur_pkt.destruct= NULL;
1156 st->cur_pkt.data = NULL;
1157 assert(st->cur_len == 0);
1158 }else{
1159 pkt->destruct = NULL;
1160 }
1161 compute_pkt_fields(s, st, st->parser, pkt);
1162
1163 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1164 ff_reduce_index(s, st->index);
1165 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1166 0, 0, AVINDEX_KEYFRAME);
1167 }
1168
1169 break;
1170 }
1171 } else {
1172 /* free packet */
1173 av_free_packet(&st->cur_pkt);
1174 s->cur_st = NULL;
1175 }
1176 } else {
1177 AVPacket cur_pkt;
1178 /* read next packet */
1179 ret = av_read_packet(s, &cur_pkt);
1180 if (ret < 0) {
1181 if (ret == AVERROR(EAGAIN))
1182 return ret;
1183 /* return the last frames, if any */
1184 for(i = 0; i < s->nb_streams; i++) {
1185 st = s->streams[i];
1186 if (st->parser && st->need_parsing) {
1187 av_parser_parse2(st->parser, st->codec,
1188 &pkt->data, &pkt->size,
1189 NULL, 0,
1190 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1191 AV_NOPTS_VALUE);
1192 if (pkt->size)
1193 goto got_packet;
1194 }
1195 }
1196 /* no more packets: really terminate parsing */
1197 return ret;
1198 }
1199 st = s->streams[cur_pkt.stream_index];
1200 st->cur_pkt= cur_pkt;
1201
1202 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1203 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1204 st->cur_pkt.pts < st->cur_pkt.dts){
1205 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1206 st->cur_pkt.stream_index,
1207 st->cur_pkt.pts,
1208 st->cur_pkt.dts,
1209 st->cur_pkt.size);
1210 // av_free_packet(&st->cur_pkt);
1211 // return -1;
1212 }
1213
1214 if(s->debug & FF_FDEBUG_TS)
1215 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1216 st->cur_pkt.stream_index,
1217 st->cur_pkt.pts,
1218 st->cur_pkt.dts,
1219 st->cur_pkt.size,
1220 st->cur_pkt.duration,
1221 st->cur_pkt.flags);
1222
1223 s->cur_st = st;
1224 st->cur_ptr = st->cur_pkt.data;
1225 st->cur_len = st->cur_pkt.size;
1226 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1227 st->parser = av_parser_init(st->codec->codec_id);
1228 if (!st->parser) {
1229 /* no parser available: just output the raw packets */
1230 st->need_parsing = AVSTREAM_PARSE_NONE;
1231 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1232 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1233 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1234 st->parser->flags |= PARSER_FLAG_ONCE;
1235 }
1236 }
1237 }
1238 }
1239 if(s->debug & FF_FDEBUG_TS)
1240 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1241 pkt->stream_index,
1242 pkt->pts,
1243 pkt->dts,
1244 pkt->size,
1245 pkt->duration,
1246 pkt->flags);
1247
1248 return 0;
1249 }
1250
1251 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1252 {
1253 AVPacketList *pktl;
1254 int eof=0;
1255 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1256
1257 for(;;){
1258 pktl = s->packet_buffer;
1259 if (pktl) {
1260 AVPacket *next_pkt= &pktl->pkt;
1261
1262 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1263 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1264 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1265 if( pktl->pkt.stream_index == next_pkt->stream_index
1266 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1267 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1268 next_pkt->pts= pktl->pkt.dts;
1269 }
1270 pktl= pktl->next;
1271 }
1272 pktl = s->packet_buffer;
1273 }
1274
1275 if( next_pkt->pts != AV_NOPTS_VALUE
1276 || next_pkt->dts == AV_NOPTS_VALUE
1277 || !genpts || eof){
1278 /* read packet from packet buffer, if there is data */
1279 *pkt = *next_pkt;
1280 s->packet_buffer = pktl->next;
1281 av_free(pktl);
1282 return 0;
1283 }
1284 }
1285 if(genpts){
1286 int ret= read_frame_internal(s, pkt);
1287 if(ret<0){
1288 if(pktl && ret != AVERROR(EAGAIN)){
1289 eof=1;
1290 continue;
1291 }else
1292 return ret;
1293 }
1294
1295 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1296 &s->packet_buffer_end)) < 0)
1297 return AVERROR(ENOMEM);
1298 }else{
1299 assert(!s->packet_buffer);
1300 return read_frame_internal(s, pkt);
1301 }
1302 }
1303 }
1304
1305 /* XXX: suppress the packet queue */
1306 static void flush_packet_queue(AVFormatContext *s)
1307 {
1308 AVPacketList *pktl;
1309
1310 for(;;) {
1311 pktl = s->packet_buffer;
1312 if (!pktl)
1313 break;
1314 s->packet_buffer = pktl->next;
1315 av_free_packet(&pktl->pkt);
1316 av_free(pktl);
1317 }
1318 while(s->raw_packet_buffer){
1319 pktl = s->raw_packet_buffer;
1320 s->raw_packet_buffer = pktl->next;
1321 av_free_packet(&pktl->pkt);
1322 av_free(pktl);
1323 }
1324 s->packet_buffer_end=
1325 s->raw_packet_buffer_end= NULL;
1326 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1327 }
1328
1329 /*******************************************************/
1330 /* seek support */
1331
1332 int av_find_default_stream_index(AVFormatContext *s)
1333 {
1334 int first_audio_index = -1;
1335 int i;
1336 AVStream *st;
1337
1338 if (s->nb_streams <= 0)
1339 return -1;
1340 for(i = 0; i < s->nb_streams; i++) {
1341 st = s->streams[i];
1342 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1343 return i;
1344 }
1345 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1346 first_audio_index = i;
1347 }
1348 return first_audio_index >= 0 ? first_audio_index : 0;
1349 }
1350
1351 /**
1352 * Flush the frame reader.
1353 */
1354 void ff_read_frame_flush(AVFormatContext *s)
1355 {
1356 AVStream *st;
1357 int i, j;
1358
1359 flush_packet_queue(s);
1360
1361 s->cur_st = NULL;
1362
1363 /* for each stream, reset read state */
1364 for(i = 0; i < s->nb_streams; i++) {
1365 st = s->streams[i];
1366
1367 if (st->parser) {
1368 av_parser_close(st->parser);
1369 st->parser = NULL;
1370 av_free_packet(&st->cur_pkt);
1371 }
1372 st->last_IP_pts = AV_NOPTS_VALUE;
1373 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1374 st->reference_dts = AV_NOPTS_VALUE;
1375 /* fail safe */
1376 st->cur_ptr = NULL;
1377 st->cur_len = 0;
1378
1379 st->probe_packets = MAX_PROBE_PACKETS;
1380
1381 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1382 st->pts_buffer[j]= AV_NOPTS_VALUE;
1383 }
1384 }
1385
1386 #if FF_API_SEEK_PUBLIC
1387 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1388 {
1389 ff_update_cur_dts(s, ref_st, timestamp);
1390 }
1391 #endif
1392
1393 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1394 {
1395 int i;
1396
1397 for(i = 0; i < s->nb_streams; i++) {
1398 AVStream *st = s->streams[i];
1399
1400 st->cur_dts = av_rescale(timestamp,
1401 st->time_base.den * (int64_t)ref_st->time_base.num,
1402 st->time_base.num * (int64_t)ref_st->time_base.den);
1403 }
1404 }
1405
1406 void ff_reduce_index(AVFormatContext *s, int stream_index)
1407 {
1408 AVStream *st= s->streams[stream_index];
1409 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1410
1411 if((unsigned)st->nb_index_entries >= max_entries){
1412 int i;
1413 for(i=0; 2*i<st->nb_index_entries; i++)
1414 st->index_entries[i]= st->index_entries[2*i];
1415 st->nb_index_entries= i;
1416 }
1417 }
1418
1419 int ff_add_index_entry(AVIndexEntry **index_entries,
1420 int *nb_index_entries,
1421 unsigned int *index_entries_allocated_size,
1422 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1423 {
1424 AVIndexEntry *entries, *ie;
1425 int index;
1426
1427 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1428 return -1;
1429
1430 entries = av_fast_realloc(*index_entries,
1431 index_entries_allocated_size,
1432 (*nb_index_entries + 1) *
1433 sizeof(AVIndexEntry));
1434 if(!entries)
1435 return -1;
1436
1437 *index_entries= entries;
1438
1439 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1440
1441 if(index<0){
1442 index= (*nb_index_entries)++;
1443 ie= &entries[index];
1444 assert(index==0 || ie[-1].timestamp < timestamp);
1445 }else{
1446 ie= &entries[index];
1447 if(ie->timestamp != timestamp){
1448 if(ie->timestamp <= timestamp)
1449 return -1;
1450 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1451 (*nb_index_entries)++;
1452 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1453 distance= ie->min_distance;
1454 }
1455
1456 ie->pos = pos;
1457 ie->timestamp = timestamp;
1458 ie->min_distance= distance;
1459 ie->size= size;
1460 ie->flags = flags;
1461
1462 return index;
1463 }
1464
1465 int av_add_index_entry(AVStream *st,
1466 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1467 {
1468 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1469 &st->index_entries_allocated_size, pos,
1470 timestamp, size, distance, flags);
1471 }
1472
1473 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1474 int64_t wanted_timestamp, int flags)
1475 {
1476 int a, b, m;
1477 int64_t timestamp;
1478
1479 a = - 1;
1480 b = nb_entries;
1481
1482 //optimize appending index entries at the end
1483 if(b && entries[b-1].timestamp < wanted_timestamp)
1484 a= b-1;
1485
1486 while (b - a > 1) {
1487 m = (a + b) >> 1;
1488 timestamp = entries[m].timestamp;
1489 if(timestamp >= wanted_timestamp)
1490 b = m;
1491 if(timestamp <= wanted_timestamp)
1492 a = m;
1493 }
1494 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1495
1496 if(!(flags & AVSEEK_FLAG_ANY)){
1497 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1498 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1499 }
1500 }
1501
1502 if(m == nb_entries)
1503 return -1;
1504 return m;
1505 }
1506
1507 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1508 int flags)
1509 {
1510 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1511 wanted_timestamp, flags);
1512 }
1513
1514 #if FF_API_SEEK_PUBLIC
1515 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1516 return ff_seek_frame_binary(s, stream_index, target_ts, flags);
1517 }
1518 #endif
1519
1520 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1521 {
1522 AVInputFormat *avif= s->iformat;
1523 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1524 int64_t ts_min, ts_max, ts;
1525 int index;
1526 int64_t ret;
1527 AVStream *st;
1528
1529 if (stream_index < 0)
1530 return -1;
1531
1532 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1533
1534 ts_max=
1535 ts_min= AV_NOPTS_VALUE;
1536 pos_limit= -1; //gcc falsely says it may be uninitialized
1537
1538 st= s->streams[stream_index];
1539 if(st->index_entries){
1540 AVIndexEntry *e;
1541
1542 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1543 index= FFMAX(index, 0);
1544 e= &st->index_entries[index];
1545
1546 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1547 pos_min= e->pos;
1548 ts_min= e->timestamp;
1549 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1550 pos_min,ts_min);
1551 }else{
1552 assert(index==0);
1553 }
1554
1555 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1556 assert(index < st->nb_index_entries);
1557 if(index >= 0){
1558 e= &st->index_entries[index];
1559 assert(e->timestamp >= target_ts);
1560 pos_max= e->pos;
1561 ts_max= e->timestamp;
1562 pos_limit= pos_max - e->min_distance;
1563 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1564 pos_max,pos_limit, ts_max);
1565 }
1566 }
1567
1568 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1569 if(pos<0)
1570 return -1;
1571
1572 /* do the seek */
1573 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1574 return ret;
1575
1576 ff_update_cur_dts(s, st, ts);
1577
1578 return 0;
1579 }
1580
1581 #if FF_API_SEEK_PUBLIC
1582 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1583 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1584 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1585 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1586 {
1587 return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
1588 pos_limit, ts_min, ts_max, flags, ts_ret,
1589 read_timestamp);
1590 }
1591 #endif
1592
1593 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1594 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1595 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1596 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1597 {
1598 int64_t pos, ts;
1599 int64_t start_pos, filesize;
1600 int no_change;
1601
1602 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1603
1604 if(ts_min == AV_NOPTS_VALUE){
1605 pos_min = s->data_offset;
1606 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1607 if (ts_min == AV_NOPTS_VALUE)
1608 return -1;
1609 }
1610
1611 if(ts_max == AV_NOPTS_VALUE){
1612 int step= 1024;
1613 filesize = avio_size(s->pb);
1614 pos_max = filesize - 1;
1615 do{
1616 pos_max -= step;
1617 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1618 step += step;
1619 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1620 if (ts_max == AV_NOPTS_VALUE)
1621 return -1;
1622
1623 for(;;){
1624 int64_t tmp_pos= pos_max + 1;
1625 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1626 if(tmp_ts == AV_NOPTS_VALUE)
1627 break;
1628 ts_max= tmp_ts;
1629 pos_max= tmp_pos;
1630 if(tmp_pos >= filesize)
1631 break;
1632 }
1633 pos_limit= pos_max;
1634 }
1635
1636 if(ts_min > ts_max){
1637 return -1;
1638 }else if(ts_min == ts_max){
1639 pos_limit= pos_min;
1640 }
1641
1642 no_change=0;
1643 while (pos_min < pos_limit) {
1644 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1645 pos_min, pos_max, ts_min, ts_max);
1646 assert(pos_limit <= pos_max);
1647
1648 if(no_change==0){
1649 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1650 // interpolate position (better than dichotomy)
1651 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1652 + pos_min - approximate_keyframe_distance;
1653 }else if(no_change==1){
1654 // bisection, if interpolation failed to change min or max pos last time
1655 pos = (pos_min + pos_limit)>>1;
1656 }else{
1657 /* linear search if bisection failed, can only happen if there
1658 are very few or no keyframes between min/max */
1659 pos=pos_min;
1660 }
1661 if(pos <= pos_min)
1662 pos= pos_min + 1;
1663 else if(pos > pos_limit)
1664 pos= pos_limit;
1665 start_pos= pos;
1666
1667 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1668 if(pos == pos_max)
1669 no_change++;
1670 else
1671 no_change=0;
1672 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1673 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1674 pos_limit, start_pos, no_change);
1675 if(ts == AV_NOPTS_VALUE){
1676 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1677 return -1;
1678 }
1679 assert(ts != AV_NOPTS_VALUE);
1680 if (target_ts <= ts) {
1681 pos_limit = start_pos - 1;
1682 pos_max = pos;
1683 ts_max = ts;
1684 }
1685 if (target_ts >= ts) {
1686 pos_min = pos;
1687 ts_min = ts;
1688 }
1689 }
1690
1691 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1692 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1693 pos_min = pos;
1694 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1695 pos_min++;
1696 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1697 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1698 pos, ts_min, target_ts, ts_max);
1699 *ts_ret= ts;
1700 return pos;
1701 }
1702
1703 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1704 int64_t pos_min, pos_max;
1705 #if 0
1706 AVStream *st;
1707
1708 if (stream_index < 0)
1709 return -1;
1710
1711 st= s->streams[stream_index];
1712 #endif
1713
1714 pos_min = s->data_offset;
1715 pos_max = avio_size(s->pb) - 1;
1716
1717 if (pos < pos_min) pos= pos_min;
1718 else if(pos > pos_max) pos= pos_max;
1719
1720 avio_seek(s->pb, pos, SEEK_SET);
1721
1722 #if 0
1723 av_update_cur_dts(s, st, ts);
1724 #endif
1725 return 0;
1726 }
1727
1728 static int seek_frame_generic(AVFormatContext *s,
1729 int stream_index, int64_t timestamp, int flags)
1730 {
1731 int index;
1732 int64_t ret;
1733 AVStream *st;
1734 AVIndexEntry *ie;
1735
1736 st = s->streams[stream_index];
1737
1738 index = av_index_search_timestamp(st, timestamp, flags);
1739
1740 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1741 return -1;
1742
1743 if(index < 0 || index==st->nb_index_entries-1){
1744 AVPacket pkt;
1745
1746 if(st->nb_index_entries){
1747 assert(st->index_entries);
1748 ie= &st->index_entries[st->nb_index_entries-1];
1749 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1750 return ret;
1751 ff_update_cur_dts(s, st, ie->timestamp);
1752 }else{
1753 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1754 return ret;
1755 }
1756 for (;;) {
1757 int read_status;
1758 do{
1759 read_status = av_read_frame(s, &pkt);
1760 } while (read_status == AVERROR(EAGAIN));
1761 if (read_status < 0)
1762 break;
1763 av_free_packet(&pkt);
1764 if(stream_index == pkt.stream_index){
1765 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1766 break;
1767 }
1768 }
1769 index = av_index_search_timestamp(st, timestamp, flags);
1770 }
1771 if (index < 0)
1772 return -1;
1773
1774 ff_read_frame_flush(s);
1775 if (s->iformat->read_seek){
1776 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1777 return 0;
1778 }
1779 ie = &st->index_entries[index];
1780 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1781 return ret;
1782 ff_update_cur_dts(s, st, ie->timestamp);
1783
1784 return 0;
1785 }
1786
1787 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1788 {
1789 int ret;
1790 AVStream *st;
1791
1792 if (flags & AVSEEK_FLAG_BYTE) {
1793 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1794 return -1;
1795 ff_read_frame_flush(s);
1796 return seek_frame_byte(s, stream_index, timestamp, flags);
1797 }
1798
1799 if(stream_index < 0){
1800 stream_index= av_find_default_stream_index(s);
1801 if(stream_index < 0)
1802 return -1;
1803
1804 st= s->streams[stream_index];
1805 /* timestamp for default must be expressed in AV_TIME_BASE units */
1806 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1807 }
1808
1809 /* first, we try the format specific seek */
1810 if (s->iformat->read_seek) {
1811 ff_read_frame_flush(s);
1812 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1813 } else
1814 ret = -1;
1815 if (ret >= 0) {
1816 return 0;
1817 }
1818
1819 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1820 ff_read_frame_flush(s);
1821 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1822 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1823 ff_read_frame_flush(s);
1824 return seek_frame_generic(s, stream_index, timestamp, flags);
1825 }
1826 else
1827 return -1;
1828 }
1829
1830 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1831 {
1832 if(min_ts > ts || max_ts < ts)
1833 return -1;
1834
1835 if (s->iformat->read_seek2) {
1836 ff_read_frame_flush(s);
1837 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1838 }
1839
1840 if(s->iformat->read_timestamp){
1841 //try to seek via read_timestamp()
1842 }
1843
1844 //Fallback to old API if new is not implemented but old is
1845 //Note the old has somewat different sematics
1846 if(s->iformat->read_seek || 1)
1847 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1848
1849 // try some generic seek like seek_frame_generic() but with new ts semantics
1850 }
1851
1852 /*******************************************************/
1853
1854 /**
1855 * Return TRUE if the stream has accurate duration in any stream.
1856 *
1857 * @return TRUE if the stream has accurate duration for at least one component.
1858 */
1859 static int has_duration(AVFormatContext *ic)
1860 {
1861 int i;
1862 AVStream *st;
1863
1864 for(i = 0;i < ic->nb_streams; i++) {
1865 st = ic->streams[i];
1866 if (st->duration != AV_NOPTS_VALUE)
1867 return 1;
1868 }
1869 return 0;
1870 }
1871
1872 /**
1873 * Estimate the stream timings from the one of each components.
1874 *
1875 * Also computes the global bitrate if possible.
1876 */
1877 static void update_stream_timings(AVFormatContext *ic)
1878 {
1879 int64_t start_time, start_time1, end_time, end_time1;
1880 int64_t duration, duration1, filesize;
1881 int i;
1882 AVStream *st;
1883
1884 start_time = INT64_MAX;
1885 end_time = INT64_MIN;
1886 duration = INT64_MIN;
1887 for(i = 0;i < ic->nb_streams; i++) {
1888 st = ic->streams[i];
1889 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1890 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1891 start_time = FFMIN(start_time, start_time1);
1892 if (st->duration != AV_NOPTS_VALUE) {
1893 end_time1 = start_time1
1894 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1895 end_time = FFMAX(end_time, end_time1);
1896 }
1897 }
1898 if (st->duration != AV_NOPTS_VALUE) {
1899 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1900 duration = FFMAX(duration, duration1);
1901 }
1902 }
1903 if (start_time != INT64_MAX) {
1904 ic->start_time = start_time;
1905 if (end_time != INT64_MIN)
1906 duration = FFMAX(duration, end_time - start_time);
1907 }
1908 if (duration != INT64_MIN) {
1909 ic->duration = duration;
1910 if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
1911 /* compute the bitrate */
1912 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
1913 (double)ic->duration;
1914 }
1915 }
1916 }
1917
1918 static void fill_all_stream_timings(AVFormatContext *ic)
1919 {
1920 int i;
1921 AVStream *st;
1922
1923 update_stream_timings(ic);
1924 for(i = 0;i < ic->nb_streams; i++) {
1925 st = ic->streams[i];
1926 if (st->start_time == AV_NOPTS_VALUE) {
1927 if(ic->start_time != AV_NOPTS_VALUE)
1928 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1929 if(ic->duration != AV_NOPTS_VALUE)
1930 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1931 }
1932 }
1933 }
1934
1935 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1936 {
1937 int64_t filesize, duration;
1938 int bit_rate, i;
1939 AVStream *st;
1940
1941 /* if bit_rate is already set, we believe it */
1942 if (ic->bit_rate <= 0) {
1943 bit_rate = 0;
1944 for(i=0;i<ic->nb_streams;i++) {
1945 st = ic->streams[i];
1946 if (st->codec->bit_rate > 0)
1947 bit_rate += st->codec->bit_rate;
1948 }
1949 ic->bit_rate = bit_rate;
1950 }
1951
1952 /* if duration is already set, we believe it */
1953 if (ic->duration == AV_NOPTS_VALUE &&
1954 ic->bit_rate != 0) {
1955 filesize = ic->pb ? avio_size(ic->pb) : 0;
1956 if (filesize > 0) {
1957 for(i = 0; i < ic->nb_streams; i++) {
1958 st = ic->streams[i];
1959 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1960 if (st->duration == AV_NOPTS_VALUE)
1961 st->duration = duration;
1962 }
1963 }
1964 }
1965 }
1966
1967 #define DURATION_MAX_READ_SIZE 250000
1968 #define DURATION_MAX_RETRY 3
1969
1970 /* only usable for MPEG-PS streams */
1971 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1972 {
1973 AVPacket pkt1, *pkt = &pkt1;
1974 AVStream *st;
1975 int read_size, i, ret;
1976 int64_t end_time;
1977 int64_t filesize, offset, duration;
1978 int retry=0;
1979
1980 ic->cur_st = NULL;
1981
1982 /* flush packet queue */
1983 flush_packet_queue(ic);
1984
1985 for (i=0; i<ic->nb_streams; i++) {
1986 st = ic->streams[i];
1987 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1988 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
1989
1990 if (st->parser) {
1991 av_parser_close(st->parser);
1992 st->parser= NULL;
1993 av_free_packet(&st->cur_pkt);
1994 }
1995 }
1996
1997 /* estimate the end time (duration) */
1998 /* XXX: may need to support wrapping */
1999 filesize = ic->pb ? avio_size(ic->pb) : 0;
2000 end_time = AV_NOPTS_VALUE;
2001 do{
2002 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2003 if (offset < 0)
2004 offset = 0;
2005
2006 avio_seek(ic->pb, offset, SEEK_SET);
2007 read_size = 0;
2008 for(;;) {
2009 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2010 break;
2011
2012 do {
2013 ret = av_read_packet(ic, pkt);
2014 } while(ret == AVERROR(EAGAIN));
2015 if (ret != 0)
2016 break;
2017 read_size += pkt->size;
2018 st = ic->streams[pkt->stream_index];
2019 if (pkt->pts != AV_NOPTS_VALUE &&
2020 (st->start_time != AV_NOPTS_VALUE ||
2021 st->first_dts != AV_NOPTS_VALUE)) {
2022 duration = end_time = pkt->pts;
2023 if (st->start_time != AV_NOPTS_VALUE)
2024 duration -= st->start_time;
2025 else
2026 duration -= st->first_dts;
2027 if (duration < 0)
2028 duration += 1LL<<st->pts_wrap_bits;
2029 if (duration > 0) {
2030 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2031 st->duration = duration;
2032 }
2033 }
2034 av_free_packet(pkt);
2035 }
2036 }while( end_time==AV_NOPTS_VALUE
2037 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2038 && ++retry <= DURATION_MAX_RETRY);
2039
2040 fill_all_stream_timings(ic);
2041
2042 avio_seek(ic->pb, old_offset, SEEK_SET);
2043 for (i=0; i<ic->nb_streams; i++) {
2044 st= ic->streams[i];
2045 st->cur_dts= st->first_dts;
2046 st->last_IP_pts = AV_NOPTS_VALUE;
2047 st->reference_dts = AV_NOPTS_VALUE;
2048 }
2049 }
2050
2051 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2052 {
2053 int64_t file_size;
2054
2055 /* get the file size, if possible */
2056 if (ic->iformat->flags & AVFMT_NOFILE) {
2057 file_size = 0;
2058 } else {
2059 file_size = avio_size(ic->pb);
2060 file_size = FFMAX(0, file_size);
2061 }
2062
2063 if ((!strcmp(ic->iformat->name, "mpeg") ||
2064 !strcmp(ic->iformat->name, "mpegts")) &&
2065 file_size && ic->pb->seekable) {
2066 /* get accurate estimate from the PTSes */
2067 estimate_timings_from_pts(ic, old_offset);
2068 } else if (has_duration(ic)) {
2069 /* at least one component has timings - we use them for all
2070 the components */
2071 fill_all_stream_timings(ic);
2072 } else {
2073 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2074 /* less precise: use bitrate info */
2075 estimate_timings_from_bit_rate(ic);
2076 }
2077 update_stream_timings(ic);
2078
2079 {
2080 int i;
2081 AVStream av_unused *st;
2082 for(i = 0;i < ic->nb_streams; i++) {
2083 st = ic->streams[i];
2084 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2085 (double) st->start_time / AV_TIME_BASE,
2086 (double) st->duration / AV_TIME_BASE);
2087 }
2088 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2089 (double) ic->start_time / AV_TIME_BASE,
2090 (double) ic->duration / AV_TIME_BASE,
2091 ic->bit_rate / 1000);
2092 }
2093 }
2094
2095 static int has_codec_parameters(AVCodecContext *avctx)
2096 {
2097 int val;
2098 switch (avctx->codec_type) {
2099 case AVMEDIA_TYPE_AUDIO:
2100 val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
2101 if (!avctx->frame_size &&
2102 (avctx->codec_id == CODEC_ID_VORBIS ||
2103 avctx->codec_id == CODEC_ID_AAC ||
2104 avctx->codec_id == CODEC_ID_MP1 ||
2105 avctx->codec_id == CODEC_ID_MP2 ||
2106 avctx->codec_id == CODEC_ID_MP3 ||
2107 avctx->codec_id == CODEC_ID_CELT))
2108 return 0;
2109 break;
2110 case AVMEDIA_TYPE_VIDEO:
2111 val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
2112 break;
2113 default:
2114 val = 1;
2115 break;
2116 }
2117 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2118 }
2119
2120 static int has_decode_delay_been_guessed(AVStream *st)
2121 {
2122 return st->codec->codec_id != CODEC_ID_H264 ||
2123 st->info->nb_decoded_frames >= 6;
2124 }
2125
2126 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2127 {
2128 int16_t *samples;
2129 AVCodec *codec;
2130 int got_picture, data_size, ret=0;
2131 AVFrame picture;
2132
2133 if(!st->codec->codec){
2134 codec = avcodec_find_decoder(st->codec->codec_id);
2135 if (!codec)
2136 return -1;
2137 ret = avcodec_open2(st->codec, codec, options);
2138 if (ret < 0)
2139 return ret;
2140 }
2141
2142 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st) ||
2143 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF)) {
2144 switch(st->codec->codec_type) {
2145 case AVMEDIA_TYPE_VIDEO:
2146 avcodec_get_frame_defaults(&picture);
2147 ret = avcodec_decode_video2(st->codec, &picture,
2148 &got_picture, avpkt);
2149 if (got_picture)
2150 st->info->nb_decoded_frames++;
2151 break;
2152 case AVMEDIA_TYPE_AUDIO:
2153 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2154 samples = av_malloc(data_size);
2155 if (!samples)
2156 goto fail;
2157 ret = avcodec_decode_audio3(st->codec, samples,
2158 &data_size, avpkt);
2159 av_free(samples);
2160 break;
2161 default:
2162 break;
2163 }
2164 }
2165 fail:
2166 return ret;
2167 }
2168
2169 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2170 {
2171 while (tags->id != CODEC_ID_NONE) {
2172 if (tags->id == id)
2173 return tags->tag;
2174 tags++;
2175 }
2176 return 0;
2177 }
2178
2179 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2180 {
2181 int i;
2182 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2183 if(tag == tags[i].tag)
2184 return tags[i].id;
2185 }
2186 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2187 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2188 return tags[i].id;
2189 }
2190 return CODEC_ID_NONE;
2191 }
2192
2193 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2194 {
2195 int i;
2196 for(i=0; tags && tags[i]; i++){
2197 int tag= ff_codec_get_tag(tags[i], id);
2198 if(tag) return tag;
2199 }
2200 return 0;
2201 }
2202
2203 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2204 {
2205 int i;
2206 for(i=0; tags && tags[i]; i++){
2207 enum CodecID id= ff_codec_get_id(tags[i], tag);
2208 if(id!=CODEC_ID_NONE) return id;
2209 }
2210 return CODEC_ID_NONE;
2211 }
2212
2213 static void compute_chapters_end(AVFormatContext *s)
2214 {
2215 unsigned int i, j;
2216 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2217
2218 for (i = 0; i < s->nb_chapters; i++)
2219 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2220 AVChapter *ch = s->chapters[i];
2221 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2222 : INT64_MAX;
2223
2224 for (j = 0; j < s->nb_chapters; j++) {
2225 AVChapter *ch1 = s->chapters[j];
2226 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2227 if (j != i && next_start > ch->start && next_start < end)
2228 end = next_start;
2229 }
2230 ch->end = (end == INT64_MAX) ? ch->start : end;
2231 }
2232 }
2233
2234 static int get_std_framerate(int i){
2235 if(i<60*12) return i*1001;
2236 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2237 }
2238
2239 /*
2240 * Is the time base unreliable.
2241 * This is a heuristic to balance between quick acceptance of the values in
2242 * the headers vs. some extra checks.
2243 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2244 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2245 * And there are "variable" fps files this needs to detect as well.
2246 */
2247 static int tb_unreliable(AVCodecContext *c){
2248 if( c->time_base.den >= 101L*c->time_base.num
2249 || c->time_base.den < 5L*c->time_base.num
2250 /* || c->codec_tag == AV_RL32("DIVX")
2251 || c->codec_tag == AV_RL32("XVID")*/
2252 || c->codec_id == CODEC_ID_MPEG2VIDEO
2253 || c->codec_id == CODEC_ID_H264
2254 )
2255 return 1;
2256 return 0;
2257 }
2258
2259 #if FF_API_FORMAT_PARAMETERS
2260 int av_find_stream_info(AVFormatContext *ic)
2261 {
2262 return avformat_find_stream_info(ic, NULL);
2263 }
2264 #endif
2265
2266 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2267 {
2268 int i, count, ret, read_size, j;
2269 AVStream *st;
2270 AVPacket pkt1, *pkt;
2271 int64_t old_offset = avio_tell(ic->pb);
2272 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2273
2274 for(i=0;i<ic->nb_streams;i++) {
2275 AVCodec *codec;
2276 st = ic->streams[i];
2277
2278 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2279 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2280 /* if(!st->time_base.num)
2281 st->time_base= */
2282 if(!st->codec->time_base.num)
2283 st->codec->time_base= st->time_base;
2284 }
2285 //only for the split stuff
2286 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2287 st->parser = av_parser_init(st->codec->codec_id);
2288 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2289 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2290 }
2291 }
2292 assert(!st->codec->codec);
2293 codec = avcodec_find_decoder(st->codec->codec_id);
2294
2295 /* Ensure that subtitle_header is properly set. */
2296 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2297 && codec && !st->codec->codec)
2298 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2299
2300 //try to just open decoders, in case this is enough to get parameters
2301 if(!has_codec_parameters(st->codec)){
2302 if (codec && !st->codec->codec)
2303 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2304 }
2305 }
2306
2307 for (i=0; i<ic->nb_streams; i++) {
2308 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2309 }
2310
2311 count = 0;
2312 read_size = 0;
2313 for(;;) {
2314 if (ff_check_interrupt(&ic->interrupt_callback)){
2315 ret= AVERROR_EXIT;
2316 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2317 break;
2318 }
2319
2320 /* check if one codec still needs to be handled */
2321 for(i=0;i<ic->nb_streams;i++) {
2322 int fps_analyze_framecount = 20;
2323
2324 st = ic->streams[i];
2325 if (!has_codec_parameters(st->codec))
2326 break;
2327 /* if the timebase is coarse (like the usual millisecond precision
2328 of mkv), we need to analyze more frames to reliably arrive at
2329 the correct fps */
2330 if (av_q2d(st->time_base) > 0.0005)
2331 fps_analyze_framecount *= 2;
2332 if (ic->fps_probe_size >= 0)
2333 fps_analyze_framecount = ic->fps_probe_size;
2334 /* variable fps and no guess at the real fps */
2335 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2336 && st->info->duration_count < fps_analyze_framecount
2337 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2338 break;
2339 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2340 break;
2341 if(st->first_dts == AV_NOPTS_VALUE)
2342 break;
2343 }
2344 if (i == ic->nb_streams) {
2345 /* NOTE: if the format has no header, then we need to read
2346 some packets to get most of the streams, so we cannot
2347 stop here */
2348 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2349 /* if we found the info for all the codecs, we can stop */
2350 ret = count;
2351 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2352 break;
2353 }
2354 }
2355 /* we did not get all the codec info, but we read too much data */
2356 if (read_size >= ic->probesize) {
2357 ret = count;
2358 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2359 break;
2360 }
2361
2362 /* NOTE: a new stream can be added there if no header in file
2363 (AVFMTCTX_NOHEADER) */
2364 ret = read_frame_internal(ic, &pkt1);
2365 if (ret == AVERROR(EAGAIN))
2366 continue;
2367
2368 if (ret < 0) {
2369 /* EOF or error */
2370 ret = -1; /* we could not have all the codec parameters before EOF */
2371 for(i=0;i<ic->nb_streams;i++) {
2372 st = ic->streams[i];
2373 if (!has_codec_parameters(st->codec)){
2374 char buf[256];
2375 avcodec_string(buf, sizeof(buf), st->codec, 0);
2376 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2377 } else {
2378 ret = 0;
2379 }
2380 }
2381 break;
2382 }
2383
2384 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2385 if ((ret = av_dup_packet(pkt)) < 0)
2386 goto find_stream_info_err;
2387
2388 read_size += pkt->size;
2389
2390 st = ic->streams[pkt->stream_index];
2391 if (st->codec_info_nb_frames>1) {
2392 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2393 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2394 break;
2395 }
2396 st->info->codec_info_duration += pkt->duration;
2397 }
2398 {
2399 int64_t last = st->info->last_dts;
2400
2401 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2402 int64_t duration= pkt->dts - last;
2403 double dur= duration * av_q2d(st->time_base);
2404
2405 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2406 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2407 if (st->info->duration_count < 2)
2408 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2409 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2410 int framerate= get_std_framerate(i);
2411 int ticks= lrintf(dur*framerate/(1001*12));
2412 double error = dur - (double)ticks*1001*12 / framerate;
2413 st->info->duration_error[i] += error*error;
2414 }
2415 st->info->duration_count++;
2416 // ignore the first 4 values, they might have some random jitter
2417 if (st->info->duration_count > 3)
2418 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2419 }
2420 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2421 st->info->last_dts = pkt->dts;
2422 }
2423 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2424 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2425 if(i){
2426 st->codec->extradata_size= i;
2427 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2428 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2429 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2430 }
2431 }
2432
2433 /* if still no information, we try to open the codec and to
2434 decompress the frame. We try to avoid that in most cases as
2435 it takes longer and uses more memory. For MPEG-4, we need to
2436 decompress for QuickTime.
2437
2438 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2439 least one frame of codec data, this makes sure the codec initializes
2440 the channel configuration and does not only trust the values from the container.
2441 */
2442 try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL);
2443
2444 st->codec_info_nb_frames++;
2445 count++;
2446 }
2447
2448 // close codecs which were opened in try_decode_frame()
2449 for(i=0;i<ic->nb_streams;i++) {
2450 st = ic->streams[i];
2451 if(st->codec->codec)
2452 avcodec_close(st->codec);
2453 }
2454 for(i=0;i<ic->nb_streams;i++) {
2455 st = ic->streams[i];
2456 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2457 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2458 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2459 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2460 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2461 // the check for tb_unreliable() is not completely correct, since this is not about handling
2462 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2463 // ipmovie.c produces.
2464 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2465 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2466 if (st->info->duration_count && !st->r_frame_rate.num
2467 && tb_unreliable(st->codec) /*&&
2468 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2469 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2470 int num = 0;
2471 double best_error= 2*av_q2d(st->time_base);
2472 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2473
2474 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2475 double error = st->info->duration_error[j] * get_std_framerate(j);
2476 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2477 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2478 if(error < best_error){
2479 best_error= error;
2480 num = get_std_framerate(j);
2481 }
2482 }
2483 // do not increase frame rate by more than 1 % in order to match a standard rate.
2484 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2485 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2486 }
2487
2488 if (!st->r_frame_rate.num){
2489 if( st->codec->time_base.den * (int64_t)st->time_base.num
2490 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2491 st->r_frame_rate.num = st->codec->time_base.den;
2492 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2493 }else{
2494 st->r_frame_rate.num = st->time_base.den;
2495 st->r_frame_rate.den = st->time_base.num;
2496 }
2497 }
2498 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2499 if(!st->codec->bits_per_coded_sample)
2500 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2501 // set stream disposition based on audio service type
2502 switch (st->codec->audio_service_type) {
2503 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2504 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2505 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2506 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2507 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2508 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2509 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2510 st->disposition = AV_DISPOSITION_COMMENT; break;
2511 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2512 st->disposition = AV_DISPOSITION_KARAOKE; break;
2513 }
2514 }
2515 }
2516
2517 estimate_timings(ic, old_offset);
2518
2519 compute_chapters_end(ic);
2520
2521 #if 0
2522 /* correct DTS for B-frame streams with no timestamps */
2523 for(i=0;i<ic->nb_streams;i++) {
2524 st = ic->streams[i];
2525 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2526 if(b-frames){
2527 ppktl = &ic->packet_buffer;
2528 while(ppkt1){
2529 if(ppkt1->stream_index != i)
2530 continue;
2531 if(ppkt1->pkt->dts < 0)
2532 break;
2533 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2534 break;
2535 ppkt1->pkt->dts -= delta;
2536 ppkt1= ppkt1->next;
2537 }
2538 if(ppkt1)
2539 continue;
2540 st->cur_dts -= delta;
2541 }
2542 }
2543 }
2544 #endif
2545
2546 find_stream_info_err:
2547 for (i=0; i < ic->nb_streams; i++)
2548 av_freep(&ic->streams[i]->info);
2549 return ret;
2550 }
2551
2552 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2553 {
2554 int i, j;
2555
2556 for (i = 0; i < ic->nb_programs; i++)
2557 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2558 if (ic->programs[i]->stream_index[j] == s)
2559 return ic->programs[i];
2560 return NULL;
2561 }
2562
2563 int av_find_best_stream(AVFormatContext *ic,
2564 enum AVMediaType type,
2565 int wanted_stream_nb,
2566 int related_stream,
2567 AVCodec **decoder_ret,
2568 int flags)
2569 {
2570 int i, nb_streams = ic->nb_streams;
2571 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2572 unsigned *program = NULL;
2573 AVCodec *decoder = NULL, *best_decoder = NULL;
2574
2575 if (related_stream >= 0 && wanted_stream_nb < 0) {
2576 AVProgram *p = find_program_from_stream(ic, related_stream);
2577 if (p) {
2578 program = p->stream_index;
2579 nb_streams = p->nb_stream_indexes;
2580 }
2581 }
2582 for (i = 0; i < nb_streams; i++) {
2583 int real_stream_index = program ? program[i] : i;
2584 AVStream *st = ic->streams[real_stream_index];
2585 AVCodecContext *avctx = st->codec;
2586 if (avctx->codec_type != type)
2587 continue;
2588 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2589 continue;
2590 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2591 continue;
2592 if (decoder_ret) {
2593 decoder = avcodec_find_decoder(st->codec->codec_id);
2594 if (!decoder) {
2595 if (ret < 0)
2596 ret = AVERROR_DECODER_NOT_FOUND;
2597 continue;
2598 }
2599 }
2600 if (best_count >= st->codec_info_nb_frames)
2601 continue;
2602 best_count = st->codec_info_nb_frames;
2603 ret = real_stream_index;
2604 best_decoder = decoder;
2605 if (program && i == nb_streams - 1 && ret < 0) {
2606 program = NULL;
2607 nb_streams = ic->nb_streams;
2608 i = 0; /* no related stream found, try again with everything */
2609 }
2610 }
2611 if (decoder_ret)
2612 *decoder_ret = best_decoder;
2613 return ret;
2614 }
2615
2616 /*******************************************************/
2617
2618 int av_read_play(AVFormatContext *s)
2619 {
2620 if (s->iformat->read_play)
2621 return s->iformat->read_play(s);
2622 if (s->pb)
2623 return avio_pause(s->pb, 0);
2624 return AVERROR(ENOSYS);
2625 }
2626
2627 int av_read_pause(AVFormatContext *s)
2628 {
2629 if (s->iformat->read_pause)
2630 return s->iformat->read_pause(s);
2631 if (s->pb)
2632 return avio_pause(s->pb, 1);
2633 return AVERROR(ENOSYS);
2634 }
2635
2636 void av_close_input_stream(AVFormatContext *s)
2637 {
2638 flush_packet_queue(s);
2639 if (s->iformat->read_close)
2640 s->iformat->read_close(s);
2641 avformat_free_context(s);
2642 }
2643
2644 void avformat_free_context(AVFormatContext *s)
2645 {
2646 int i;
2647 AVStream *st;
2648
2649 av_opt_free(s);
2650 if (s->iformat && s->iformat->priv_class && s->priv_data)
2651 av_opt_free(s->priv_data);
2652
2653 for(i=0;i<s->nb_streams;i++) {
2654 /* free all data in a stream component */
2655 st = s->streams[i];
2656 if (st->parser) {
2657 av_parser_close(st->parser);
2658 av_free_packet(&st->cur_pkt);
2659 }
2660 av_dict_free(&st->metadata);
2661 av_free(st->index_entries);
2662 av_free(st->codec->extradata);
2663 av_free(st->codec->subtitle_header);
2664 av_free(st->codec);
2665 av_free(st->priv_data);
2666 av_free(st->info);
2667 av_free(st);
2668 }
2669 for(i=s->nb_programs-1; i>=0; i--) {
2670 av_dict_free(&s->programs[i]->metadata);
2671 av_freep(&s->programs[i]->stream_index);
2672 av_freep(&s->programs[i]);
2673 }
2674 av_freep(&s->programs);
2675 av_freep(&s->priv_data);
2676 while(s->nb_chapters--) {
2677 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2678 av_free(s->chapters[s->nb_chapters]);
2679 }
2680 av_freep(&s->chapters);
2681 av_dict_free(&s->metadata);
2682 av_freep(&s->streams);
2683 av_free(s);
2684 }
2685
2686 void av_close_input_file(AVFormatContext *s)
2687 {
2688 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2689 NULL : s->pb;
2690 av_close_input_stream(s);
2691 if (pb)
2692 avio_close(pb);
2693 }
2694
2695 #if FF_API_NEW_STREAM
2696 AVStream *av_new_stream(AVFormatContext *s, int id)
2697 {
2698 AVStream *st = avformat_new_stream(s, NULL);
2699 if (st)
2700 st->id = id;
2701 return st;
2702 }
2703 #endif
2704
2705 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2706 {
2707 AVStream *st;
2708 int i;
2709 AVStream **streams;
2710
2711 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2712 return NULL;
2713 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2714 if (!streams)
2715 return NULL;
2716 s->streams = streams;
2717
2718 st = av_mallocz(sizeof(AVStream));
2719 if (!st)
2720 return NULL;
2721 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2722 av_free(st);
2723 return NULL;
2724 }
2725
2726 st->codec = avcodec_alloc_context3(c);
2727 if (s->iformat) {
2728 /* no default bitrate if decoding */
2729 st->codec->bit_rate = 0;
2730 }
2731 st->index = s->nb_streams;
2732 st->start_time = AV_NOPTS_VALUE;
2733 st->duration = AV_NOPTS_VALUE;
2734 /* we set the current DTS to 0 so that formats without any timestamps
2735 but durations get some timestamps, formats with some unknown
2736 timestamps have their first few packets buffered and the
2737 timestamps corrected before they are returned to the user */
2738 st->cur_dts = 0;
2739 st->first_dts = AV_NOPTS_VALUE;
2740 st->probe_packets = MAX_PROBE_PACKETS;
2741
2742 /* default pts setting is MPEG-like */
2743 av_set_pts_info(st, 33, 1, 90000);
2744 st->last_IP_pts = AV_NOPTS_VALUE;
2745 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2746 st->pts_buffer[i]= AV_NOPTS_VALUE;
2747 st->reference_dts = AV_NOPTS_VALUE;
2748
2749 st->sample_aspect_ratio = (AVRational){0,1};
2750
2751 s->streams[s->nb_streams++] = st;
2752 return st;
2753 }
2754
2755 AVProgram *av_new_program(AVFormatContext *ac, int id)
2756 {
2757 AVProgram *program=NULL;
2758 int i;
2759
2760 av_dlog(ac, "new_program: id=0x%04x\n", id);
2761
2762 for(i=0; i<ac->nb_programs; i++)
2763 if(ac->programs[i]->id == id)
2764 program = ac->programs[i];
2765
2766 if(!program){
2767 program = av_mallocz(sizeof(AVProgram));
2768 if (!program)
2769 return NULL;
2770 dynarray_add(&ac->programs, &ac->nb_programs, program);
2771 program->discard = AVDISCARD_NONE;
2772 }
2773 program->id = id;
2774
2775 return program;
2776 }
2777
2778 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2779 {
2780 AVChapter *chapter = NULL;
2781 int i;
2782
2783 for(i=0; i<s->nb_chapters; i++)
2784 if(s->chapters[i]->id == id)
2785 chapter = s->chapters[i];
2786
2787 if(!chapter){
2788 chapter= av_mallocz(sizeof(AVChapter));
2789 if(!chapter)
2790 return NULL;
2791 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2792 }
2793 av_dict_set(&chapter->metadata, "title", title, 0);
2794 chapter->id = id;
2795 chapter->time_base= time_base;
2796 chapter->start = start;
2797 chapter->end = end;
2798
2799 return chapter;
2800 }
2801
2802 /************************************************************/
2803 /* output media file */
2804
2805 #if FF_API_FORMAT_PARAMETERS
2806 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2807 {
2808 int ret;
2809
2810 if (s->oformat->priv_data_size > 0) {
2811 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2812 if (!s->priv_data)
2813 return AVERROR(ENOMEM);
2814 if (s->oformat->priv_class) {
2815 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2816 av_opt_set_defaults(s->priv_data);
2817 }
2818 } else
2819 s->priv_data = NULL;
2820
2821 if (s->oformat->set_parameters) {
2822 ret = s->oformat->set_parameters(s, ap);
2823 if (ret < 0)
2824 return ret;
2825 }
2826 return 0;
2827 }
2828 #endif
2829
2830 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2831 {
2832 const AVCodecTag *avctag;
2833 int n;
2834 enum CodecID id = CODEC_ID_NONE;
2835 unsigned int tag = 0;
2836
2837 /**
2838 * Check that tag + id is in the table
2839 * If neither is in the table -> OK
2840 * If tag is in the table with another id -> FAIL
2841 * If id is in the table with another tag -> FAIL unless strict < normal
2842 */
2843 for (n = 0; s->oformat->codec_tag[n]; n++) {
2844 avctag = s->oformat->codec_tag[n];
2845 while (avctag->id != CODEC_ID_NONE) {
2846 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
2847 id = avctag->id;
2848 if (id == st->codec->codec_id)
2849 return 1;
2850 }
2851 if (avctag->id == st->codec->codec_id)
2852 tag = avctag->tag;
2853 avctag++;
2854 }
2855 }
2856 if (id != CODEC_ID_NONE)
2857 return 0;
2858 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2859 return 0;
2860 return 1;
2861 }
2862
2863 #if FF_API_FORMAT_PARAMETERS
2864 int av_write_header(AVFormatContext *s)
2865 {
2866 return avformat_write_header(s, NULL);
2867 }
2868 #endif
2869
2870 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
2871 {
2872 int ret = 0, i;
2873 AVStream *st;
2874 AVDictionary *tmp = NULL;
2875
2876 if (options)
2877 av_dict_copy(&tmp, *options, 0);
2878 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
2879 goto fail;
2880
2881 // some sanity checks
2882 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2883 av_log(s, AV_LOG_ERROR, "no streams\n");
2884 ret = AVERROR(EINVAL);
2885 goto fail;
2886 }
2887
2888 for(i=0;i<s->nb_streams;i++) {
2889 st = s->streams[i];
2890
2891 switch (st->codec->codec_type) {
2892 case AVMEDIA_TYPE_AUDIO:
2893 if(st->codec->sample_rate<=0){
2894 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2895 ret = AVERROR(EINVAL);
2896 goto fail;
2897 }
2898 if(!st->codec->block_align)
2899 st->codec->block_align = st->codec->channels *
2900 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2901 break;
2902 case AVMEDIA_TYPE_VIDEO:
2903 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2904 av_log(s, AV_LOG_ERROR, "time base not set\n");
2905 ret = AVERROR(EINVAL);
2906 goto fail;
2907 }
2908 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2909 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2910 ret = AVERROR(EINVAL);
2911 goto fail;
2912 }
2913 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2914 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2915 ret = AVERROR(EINVAL);
2916 goto fail;
2917 }
2918 break;
2919 }
2920
2921 if(s->oformat->codec_tag){
2922 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2923 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2924 st->codec->codec_tag= 0;
2925 }
2926 if(st->codec->codec_tag){
2927 if (!validate_codec_tag(s, st)) {
2928 char tagbuf[32];
2929 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2930 av_log(s, AV_LOG_ERROR,
2931 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2932 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2933 ret = AVERROR_INVALIDDATA;
2934 goto fail;
2935 }
2936 }else
2937 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2938 }
2939
2940 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2941 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2942 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2943 }
2944
2945 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2946 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2947 if (!s->priv_data) {
2948 ret = AVERROR(ENOMEM);
2949 goto fail;
2950 }
2951 if (s->oformat->priv_class) {
2952 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2953 av_opt_set_defaults(s->priv_data);
2954 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
2955 goto fail;
2956 }
2957 }
2958
2959 /* set muxer identification string */
2960 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2961 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2962 }
2963
2964 if(s->oformat->write_header){
2965 ret = s->oformat->write_header(s);
2966 if (ret < 0)
2967 goto fail;
2968 }
2969
2970 /* init PTS generation */
2971 for(i=0;i<s->nb_streams;i++) {
2972 int64_t den = AV_NOPTS_VALUE;
2973 st = s->streams[i];
2974
2975 switch (st->codec->codec_type) {
2976 case AVMEDIA_TYPE_AUDIO:
2977 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2978 break;
2979 case AVMEDIA_TYPE_VIDEO:
2980 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2981 break;
2982 default:
2983 break;
2984 }
2985 if (den != AV_NOPTS_VALUE) {
2986 if (den <= 0) {
2987 ret = AVERROR_INVALIDDATA;
2988 goto fail;
2989 }
2990 frac_init(&st->pts, 0, 0, den);
2991 }
2992 }
2993
2994 if (options) {
2995 av_dict_free(options);
2996 *options = tmp;
2997 }
2998 return 0;
2999 fail:
3000 av_dict_free(&tmp);
3001 return ret;
3002 }
3003
3004 //FIXME merge with compute_pkt_fields
3005 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3006 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3007 int num, den, frame_size, i;
3008
3009 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
3010 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
3011
3012 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
3013 return AVERROR(EINVAL);*/
3014
3015 /* duration field */
3016 if (pkt->duration == 0) {
3017 compute_frame_duration(&num, &den, st, NULL, pkt);
3018 if (den && num) {
3019 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3020 }
3021 }
3022
3023 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3024 pkt->pts= pkt->dts;
3025
3026 //XXX/FIXME this is a temporary hack until all encoders output pts
3027 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3028 pkt->dts=
3029 // pkt->pts= st->cur_dts;
3030 pkt->pts= st->pts.val;
3031 }
3032
3033 //calculate dts from pts
3034 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3035 st->pts_buffer[0]= pkt->pts;
3036 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3037 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3038 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3039 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3040
3041 pkt->dts= st->pts_buffer[0];
3042 }
3043
3044 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
3045 av_log(s, AV_LOG_ERROR,
3046 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
3047 st->index, st->cur_dts, pkt->dts);
3048 return AVERROR(EINVAL);
3049 }
3050 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3051 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
3052 return AVERROR(EINVAL);
3053 }
3054
3055 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
3056 st->cur_dts= pkt->dts;
3057 st->pts.val= pkt->dts;
3058
3059 /* update pts */
3060 switch (st->codec->codec_type) {
3061 case AVMEDIA_TYPE_AUDIO:
3062 frame_size = get_audio_frame_size(st->codec, pkt->size);
3063
3064 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3065 likely equal to the encoder delay, but it would be better if we
3066 had the real timestamps from the encoder */
3067 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3068 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3069 }
3070 break;
3071 case AVMEDIA_TYPE_VIDEO:
3072 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3073 break;
3074 default:
3075 break;
3076 }
3077 return 0;
3078 }
3079
3080 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3081 {
3082 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3083
3084 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3085 return ret;
3086
3087 ret= s->oformat->write_packet(s, pkt);
3088
3089 if (ret >= 0)
3090 s->streams[pkt->stream_index]->nb_frames++;
3091 return ret;
3092 }
3093
3094 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3095 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3096 {
3097 AVPacketList **next_point, *this_pktl;
3098
3099 this_pktl = av_mallocz(sizeof(AVPacketList));
3100 this_pktl->pkt= *pkt;
3101 pkt->destruct= NULL; // do not free original but only the copy
3102 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3103
3104 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3105 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3106 }else
3107 next_point = &s->packet_buffer;
3108
3109 if(*next_point){
3110 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3111 while(!compare(s, &(*next_point)->pkt, pkt)){
3112 next_point= &(*next_point)->next;
3113 }
3114 goto next_non_null;
3115 }else{
3116 next_point = &(s->packet_buffer_end->next);
3117 }
3118 }
3119 assert(!*next_point);
3120
3121 s->packet_buffer_end= this_pktl;
3122 next_non_null:
3123
3124 this_pktl->next= *next_point;
3125
3126 s->streams[pkt->stream_index]->last_in_packet_buffer=
3127 *next_point= this_pktl;
3128 }
3129
3130 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3131 {
3132 AVStream *st = s->streams[ pkt ->stream_index];
3133 AVStream *st2= s->streams[ next->stream_index];
3134 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3135 st->time_base);
3136
3137 if (comp == 0)
3138 return pkt->stream_index < next->stream_index;
3139 return comp > 0;
3140 }
3141
3142 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3143 AVPacketList *pktl;
3144 int stream_count=0;
3145 int i;
3146
3147 if(pkt){
3148 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3149 }
3150
3151 for(i=0; i < s->nb_streams; i++)
3152 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3153
3154 if(stream_count && (s->nb_streams == stream_count || flush)){
3155 pktl= s->packet_buffer;
3156 *out= pktl->pkt;
3157
3158 s->packet_buffer= pktl->next;
3159 if(!s->packet_buffer)
3160 s->packet_buffer_end= NULL;
3161
3162 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3163 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3164 av_freep(&pktl);
3165 return 1;
3166 }else{
3167 av_init_packet(out);
3168 return 0;
3169 }
3170 }
3171
3172 /**
3173 * Interleave an AVPacket correctly so it can be muxed.
3174 * @param out the interleaved packet will be output here
3175 * @param in the input packet
3176 * @param flush 1 if no further packets are available as input and all
3177 * remaining packets should be output
3178 * @return 1 if a packet was output, 0 if no packet could be output,
3179 * < 0 if an error occurred
3180 */
3181 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3182 if(s->oformat->interleave_packet)
3183 return s->oformat->interleave_packet(s, out, in, flush);
3184 else
3185 return av_interleave_packet_per_dts(s, out, in, flush);
3186 }
3187
3188 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3189 AVStream *st= s->streams[ pkt->stream_index];
3190 int ret;
3191
3192 //FIXME/XXX/HACK drop zero sized packets
3193 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3194 return 0;
3195
3196 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3197 pkt->size, pkt->dts, pkt->pts);
3198 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3199 return ret;
3200
3201 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3202 return AVERROR(EINVAL);
3203
3204 for(;;){
3205 AVPacket opkt;
3206 int ret= interleave_packet(s, &opkt, pkt, 0);
3207 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3208 return ret;
3209
3210 ret= s->oformat->write_packet(s, &opkt);
3211 if (ret >= 0)
3212 s->streams[opkt.stream_index]->nb_frames++;
3213
3214 av_free_packet(&opkt);
3215 pkt= NULL;
3216
3217 if(ret<0)
3218 return ret;
3219 }
3220 }
3221
3222 int av_write_trailer(AVFormatContext *s)
3223 {
3224 int ret, i;
3225
3226 for(;;){
3227 AVPacket pkt;
3228 ret= interleave_packet(s, &pkt, NULL, 1);
3229 if(ret<0) //FIXME cleanup needed for ret<0 ?
3230 goto fail;
3231 if(!ret)
3232 break;
3233
3234 ret= s->oformat->write_packet(s, &pkt);
3235 if (ret >= 0)
3236 s->streams[pkt.stream_index]->nb_frames++;
3237
3238 av_free_packet(&pkt);
3239
3240 if(ret<0)
3241 goto fail;
3242 }
3243
3244 if(s->oformat->write_trailer)
3245 ret = s->oformat->write_trailer(s);
3246 fail:
3247 for(i=0;i<s->nb_streams;i++) {
3248 av_freep(&s->streams[i]->priv_data);
3249 av_freep(&s->streams[i]->index_entries);
3250 }
3251 if (s->iformat && s->iformat->priv_class)
3252 av_opt_free(s->priv_data);
3253 av_freep(&s->priv_data);
3254 return ret;
3255 }
3256
3257 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3258 {
3259 int i, j;
3260 AVProgram *program=NULL;
3261 void *tmp;
3262
3263 if (idx >= ac->nb_streams) {
3264 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3265 return;
3266 }
3267
3268 for(i=0; i<ac->nb_programs; i++){
3269 if(ac->programs[i]->id != progid)
3270 continue;
3271 program = ac->programs[i];
3272 for(j=0; j<program->nb_stream_indexes; j++)
3273 if(program->stream_index[j] == idx)
3274 return;
3275
3276 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3277 if(!tmp)
3278 return;
3279 program->stream_index = tmp;
3280 program->stream_index[program->nb_stream_indexes++] = idx;
3281 return;
3282 }
3283 }
3284
3285 static void print_fps(double d, const char *postfix){
3286 uint64_t v= lrintf(d*100);
3287 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3288 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3289 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3290 }
3291
3292 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3293 {
3294 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3295 AVDictionaryEntry *tag=NULL;
3296
3297 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3298 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3299 if(strcmp("language", tag->key))
3300 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3301 }
3302 }
3303 }
3304
3305 /* "user interface" functions */
3306 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3307 {
3308 char buf[256];
3309 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3310 AVStream *st = ic->streams[i];
3311 int g = av_gcd(st->time_base.num, st->time_base.den);
3312 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3313 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3314 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3315 /* the pid is an important information, so we display it */
3316 /* XXX: add a generic system */
3317 if (flags & AVFMT_SHOW_IDS)
3318 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3319 if (lang)
3320 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3321 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3322 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3323 if (st->sample_aspect_ratio.num && // default
3324 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3325 AVRational display_aspect_ratio;
3326 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3327 st->codec->width*st->sample_aspect_ratio.num,
3328 st->codec->height*st->sample_aspect_ratio.den,
3329 1024*1024);
3330 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3331 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3332 display_aspect_ratio.num, display_aspect_ratio.den);
3333 }
3334 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3335 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3336 print_fps(av_q2d(st->avg_frame_rate), "fps");
3337 if(st->r_frame_rate.den && st->r_frame_rate.num)
3338 print_fps(av_q2d(st->r_frame_rate), "tbr");
3339 if(st->time_base.den && st->time_base.num)
3340 print_fps(1/av_q2d(st->time_base), "tbn");
3341 if(st->codec->time_base.den && st->codec->time_base.num)
3342 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3343 }
3344 if (st->disposition & AV_DISPOSITION_DEFAULT)
3345 av_log(NULL, AV_LOG_INFO, " (default)");
3346 if (st->disposition & AV_DISPOSITION_DUB)
3347 av_log(NULL, AV_LOG_INFO, " (dub)");
3348 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3349 av_log(NULL, AV_LOG_INFO, " (original)");
3350 if (st->disposition & AV_DISPOSITION_COMMENT)
3351 av_log(NULL, AV_LOG_INFO, " (comment)");
3352 if (st->disposition & AV_DISPOSITION_LYRICS)
3353 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3354 if (st->disposition & AV_DISPOSITION_KARAOKE)
3355 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3356 if (st->disposition & AV_DISPOSITION_FORCED)
3357 av_log(NULL, AV_LOG_INFO, " (forced)");
3358 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3359 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3360 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3361 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3362 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3363 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3364 av_log(NULL, AV_LOG_INFO, "\n");
3365 dump_metadata(NULL, st->metadata, " ");
3366 }
3367
3368 #if FF_API_DUMP_FORMAT
3369 void dump_format(AVFormatContext *ic,
3370 int index,
3371 const char *url,
3372 int is_output)
3373 {
3374 av_dump_format(ic, index, url, is_output);
3375 }
3376 #endif
3377
3378 void av_dump_format(AVFormatContext *ic,
3379 int index,
3380 const char *url,
3381 int is_output)
3382 {
3383 int i;
3384 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3385 if (ic->nb_streams && !printed)
3386 return;
3387
3388 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3389 is_output ? "Output" : "Input",
3390 index,
3391 is_output ? ic->oformat->name : ic->iformat->name,
3392 is_output ? "to" : "from", url);
3393 dump_metadata(NULL, ic->metadata, " ");
3394 if (!is_output) {
3395 av_log(NULL, AV_LOG_INFO, " Duration: ");
3396 if (ic->duration != AV_NOPTS_VALUE) {
3397 int hours, mins, secs, us;
3398 secs = ic->duration / AV_TIME_BASE;
3399 us = ic->duration % AV_TIME_BASE;
3400 mins = secs / 60;
3401 secs %= 60;
3402 hours = mins / 60;
3403 mins %= 60;
3404 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3405 (100 * us) / AV_TIME_BASE);
3406 } else {
3407 av_log(NULL, AV_LOG_INFO, "N/A");
3408 }
3409 if (ic->start_time != AV_NOPTS_VALUE) {
3410 int secs, us;
3411 av_log(NULL, AV_LOG_INFO, ", start: ");
3412 secs = ic->start_time / AV_TIME_BASE;
3413 us = abs(ic->start_time % AV_TIME_BASE);
3414 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3415 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3416 }
3417 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3418 if (ic->bit_rate) {
3419 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3420 } else {
3421 av_log(NULL, AV_LOG_INFO, "N/A");
3422 }
3423 av_log(NULL, AV_LOG_INFO, "\n");
3424 }
3425 for (i = 0; i < ic->nb_chapters; i++) {
3426 AVChapter *ch = ic->chapters[i];
3427 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3428 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3429 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3430
3431 dump_metadata(NULL, ch->metadata, " ");
3432 }
3433 if(ic->nb_programs) {
3434 int j, k, total = 0;
3435 for(j=0; j<ic->nb_programs; j++) {
3436 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3437 "name", NULL, 0);
3438 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3439 name ? name->value : "");
3440 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3441 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3442 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3443 printed[ic->programs[j]->stream_index[k]] = 1;
3444 }
3445 total += ic->programs[j]->nb_stream_indexes;
3446 }
3447 if (total < ic->nb_streams)
3448 av_log(NULL, AV_LOG_INFO, " No Program\n");
3449 }
3450 for(i=0;i<ic->nb_streams;i++)
3451 if (!printed[i])
3452 dump_stream_format(ic, i, index, is_output);
3453
3454 av_free(printed);
3455 }
3456
3457 int64_t av_gettime(void)
3458 {
3459 struct timeval tv;
3460 gettimeofday(&tv,NULL);
3461 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3462 }
3463
3464 uint64_t ff_ntp_time(void)
3465 {
3466 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3467 }
3468
3469 #if FF_API_PARSE_DATE
3470 #include "libavutil/parseutils.h"
3471
3472 int64_t parse_date(const char *timestr, int duration)
3473 {
3474 int64_t timeval;
3475 av_parse_time(&timeval, timestr, duration);
3476 return timeval;
3477 }
3478 #endif
3479
3480 #if FF_API_FIND_INFO_TAG
3481 #include "libavutil/parseutils.h"
3482
3483 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3484 {
3485 return av_find_info_tag(arg, arg_size, tag1, info);
3486 }
3487 #endif
3488
3489 int av_get_frame_filename(char *buf, int buf_size,
3490 const char *path, int number)
3491 {
3492 const char *p;
3493 char *q, buf1[20], c;
3494 int nd, len, percentd_found;
3495
3496 q = buf;
3497 p = path;
3498 percentd_found = 0;
3499 for(;;) {
3500 c = *p++;
3501 if (c == '\0')
3502 break;
3503 if (c == '%') {
3504 do {
3505 nd = 0;
3506 while (isdigit(*p)) {
3507 nd = nd * 10 + *p++ - '0';
3508 }
3509 c = *p++;
3510 } while (isdigit(c));
3511
3512 switch(c) {
3513 case '%':
3514 goto addchar;
3515 case 'd':
3516 if (percentd_found)
3517 goto fail;
3518 percentd_found = 1;
3519 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3520 len = strlen(buf1);
3521 if ((q - buf + len) > buf_size - 1)
3522 goto fail;
3523 memcpy(q, buf1, len);
3524 q += len;
3525 break;
3526 default:
3527 goto fail;
3528 }
3529 } else {
3530 addchar:
3531 if ((q - buf) < buf_size - 1)
3532 *q++ = c;
3533 }
3534 }
3535 if (!percentd_found)
3536 goto fail;
3537 *q = '\0';
3538 return 0;
3539 fail:
3540 *q = '\0';
3541 return -1;
3542 }
3543
3544 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)