1df042f0fef6a848fac84b776f375a9c9ca08d87
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* #define DEBUG */
23
24 #include "avformat.h"
25 #include "avio_internal.h"
26 #include "internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/pixdesc.h"
31 #include "metadata.h"
32 #include "id3v2.h"
33 #include "libavutil/avstring.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/parseutils.h"
36 #include "riff.h"
37 #include "audiointerleave.h"
38 #include "url.h"
39 #include <sys/time.h>
40 #include <time.h>
41 #include <stdarg.h>
42 #if CONFIG_NETWORK
43 #include "network.h"
44 #endif
45
46 #undef NDEBUG
47 #include <assert.h>
48
49 /**
50 * @file
51 * various utility functions for use within Libav
52 */
53
54 unsigned avformat_version(void)
55 {
56 return LIBAVFORMAT_VERSION_INT;
57 }
58
59 const char *avformat_configuration(void)
60 {
61 return LIBAV_CONFIGURATION;
62 }
63
64 const char *avformat_license(void)
65 {
66 #define LICENSE_PREFIX "libavformat license: "
67 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
68 }
69
70 /* fraction handling */
71
72 /**
73 * f = val + (num / den) + 0.5.
74 *
75 * 'num' is normalized so that it is such as 0 <= num < den.
76 *
77 * @param f fractional number
78 * @param val integer value
79 * @param num must be >= 0
80 * @param den must be >= 1
81 */
82 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
83 {
84 num += (den >> 1);
85 if (num >= den) {
86 val += num / den;
87 num = num % den;
88 }
89 f->val = val;
90 f->num = num;
91 f->den = den;
92 }
93
94 /**
95 * Fractional addition to f: f = f + (incr / f->den).
96 *
97 * @param f fractional number
98 * @param incr increment, can be positive or negative
99 */
100 static void frac_add(AVFrac *f, int64_t incr)
101 {
102 int64_t num, den;
103
104 num = f->num + incr;
105 den = f->den;
106 if (num < 0) {
107 f->val += num / den;
108 num = num % den;
109 if (num < 0) {
110 num += den;
111 f->val--;
112 }
113 } else if (num >= den) {
114 f->val += num / den;
115 num = num % den;
116 }
117 f->num = num;
118 }
119
120 /** head of registered input format linked list */
121 static AVInputFormat *first_iformat = NULL;
122 /** head of registered output format linked list */
123 static AVOutputFormat *first_oformat = NULL;
124
125 AVInputFormat *av_iformat_next(AVInputFormat *f)
126 {
127 if(f) return f->next;
128 else return first_iformat;
129 }
130
131 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
132 {
133 if(f) return f->next;
134 else return first_oformat;
135 }
136
137 void av_register_input_format(AVInputFormat *format)
138 {
139 AVInputFormat **p;
140 p = &first_iformat;
141 while (*p != NULL) p = &(*p)->next;
142 *p = format;
143 format->next = NULL;
144 }
145
146 void av_register_output_format(AVOutputFormat *format)
147 {
148 AVOutputFormat **p;
149 p = &first_oformat;
150 while (*p != NULL) p = &(*p)->next;
151 *p = format;
152 format->next = NULL;
153 }
154
155 int av_match_ext(const char *filename, const char *extensions)
156 {
157 const char *ext, *p;
158 char ext1[32], *q;
159
160 if(!filename)
161 return 0;
162
163 ext = strrchr(filename, '.');
164 if (ext) {
165 ext++;
166 p = extensions;
167 for(;;) {
168 q = ext1;
169 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
170 *q++ = *p++;
171 *q = '\0';
172 if (!av_strcasecmp(ext1, ext))
173 return 1;
174 if (*p == '\0')
175 break;
176 p++;
177 }
178 }
179 return 0;
180 }
181
182 static int match_format(const char *name, const char *names)
183 {
184 const char *p;
185 int len, namelen;
186
187 if (!name || !names)
188 return 0;
189
190 namelen = strlen(name);
191 while ((p = strchr(names, ','))) {
192 len = FFMAX(p - names, namelen);
193 if (!av_strncasecmp(name, names, len))
194 return 1;
195 names = p+1;
196 }
197 return !av_strcasecmp(name, names);
198 }
199
200 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
201 const char *mime_type)
202 {
203 AVOutputFormat *fmt = NULL, *fmt_found;
204 int score_max, score;
205
206 /* specific test for image sequences */
207 #if CONFIG_IMAGE2_MUXER
208 if (!short_name && filename &&
209 av_filename_number_test(filename) &&
210 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
211 return av_guess_format("image2", NULL, NULL);
212 }
213 #endif
214 /* Find the proper file type. */
215 fmt_found = NULL;
216 score_max = 0;
217 while ((fmt = av_oformat_next(fmt))) {
218 score = 0;
219 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
220 score += 100;
221 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
222 score += 10;
223 if (filename && fmt->extensions &&
224 av_match_ext(filename, fmt->extensions)) {
225 score += 5;
226 }
227 if (score > score_max) {
228 score_max = score;
229 fmt_found = fmt;
230 }
231 }
232 return fmt_found;
233 }
234
235 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
236 const char *filename, const char *mime_type, enum AVMediaType type){
237 if(type == AVMEDIA_TYPE_VIDEO){
238 enum CodecID codec_id= CODEC_ID_NONE;
239
240 #if CONFIG_IMAGE2_MUXER
241 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
242 codec_id= ff_guess_image2_codec(filename);
243 }
244 #endif
245 if(codec_id == CODEC_ID_NONE)
246 codec_id= fmt->video_codec;
247 return codec_id;
248 }else if(type == AVMEDIA_TYPE_AUDIO)
249 return fmt->audio_codec;
250 else if (type == AVMEDIA_TYPE_SUBTITLE)
251 return fmt->subtitle_codec;
252 else
253 return CODEC_ID_NONE;
254 }
255
256 AVInputFormat *av_find_input_format(const char *short_name)
257 {
258 AVInputFormat *fmt = NULL;
259 while ((fmt = av_iformat_next(fmt))) {
260 if (match_format(short_name, fmt->name))
261 return fmt;
262 }
263 return NULL;
264 }
265
266
267 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
268 {
269 int ret= av_new_packet(pkt, size);
270
271 if(ret<0)
272 return ret;
273
274 pkt->pos= avio_tell(s);
275
276 ret= avio_read(s, pkt->data, size);
277 if(ret<=0)
278 av_free_packet(pkt);
279 else
280 av_shrink_packet(pkt, ret);
281
282 return ret;
283 }
284
285 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
286 {
287 int ret;
288 int old_size;
289 if (!pkt->size)
290 return av_get_packet(s, pkt, size);
291 old_size = pkt->size;
292 ret = av_grow_packet(pkt, size);
293 if (ret < 0)
294 return ret;
295 ret = avio_read(s, pkt->data + old_size, size);
296 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
297 return ret;
298 }
299
300
301 int av_filename_number_test(const char *filename)
302 {
303 char buf[1024];
304 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
305 }
306
307 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
308 {
309 AVProbeData lpd = *pd;
310 AVInputFormat *fmt1 = NULL, *fmt;
311 int score, id3 = 0;
312
313 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
314 int id3len = ff_id3v2_tag_len(lpd.buf);
315 if (lpd.buf_size > id3len + 16) {
316 lpd.buf += id3len;
317 lpd.buf_size -= id3len;
318 }
319 id3 = 1;
320 }
321
322 fmt = NULL;
323 while ((fmt1 = av_iformat_next(fmt1))) {
324 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
325 continue;
326 score = 0;
327 if (fmt1->read_probe) {
328 score = fmt1->read_probe(&lpd);
329 } else if (fmt1->extensions) {
330 if (av_match_ext(lpd.filename, fmt1->extensions)) {
331 score = 50;
332 }
333 }
334 if (score > *score_max) {
335 *score_max = score;
336 fmt = fmt1;
337 }else if (score == *score_max)
338 fmt = NULL;
339 }
340
341 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
342 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_MAX/4) {
343 while ((fmt = av_iformat_next(fmt)))
344 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
345 *score_max = AVPROBE_SCORE_MAX/4;
346 break;
347 }
348 }
349
350 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4-1) {
351 while ((fmt = av_iformat_next(fmt)))
352 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
353 *score_max = AVPROBE_SCORE_MAX/4-1;
354 break;
355 }
356 }
357
358 return fmt;
359 }
360
361 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
362 int score=0;
363 return av_probe_input_format2(pd, is_opened, &score);
364 }
365
366 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
367 {
368 static const struct {
369 const char *name; enum CodecID id; enum AVMediaType type;
370 } fmt_id_type[] = {
371 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
372 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
373 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
374 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
375 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
376 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
377 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
378 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
379 { 0 }
380 };
381 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
382
383 if (fmt) {
384 int i;
385 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
386 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
387 for (i = 0; fmt_id_type[i].name; i++) {
388 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
389 st->codec->codec_id = fmt_id_type[i].id;
390 st->codec->codec_type = fmt_id_type[i].type;
391 break;
392 }
393 }
394 }
395 return !!fmt;
396 }
397
398 /************************************************************/
399 /* input media file */
400
401 #if FF_API_FORMAT_PARAMETERS
402 static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
403 {
404 char buf[1024];
405 AVDictionary *opts = NULL;
406
407 if (!ap)
408 return NULL;
409
410 if (ap->time_base.num) {
411 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
412 av_dict_set(&opts, "framerate", buf, 0);
413 }
414 if (ap->sample_rate) {
415 snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
416 av_dict_set(&opts, "sample_rate", buf, 0);
417 }
418 if (ap->channels) {
419 snprintf(buf, sizeof(buf), "%d", ap->channels);
420 av_dict_set(&opts, "channels", buf, 0);
421 }
422 if (ap->width || ap->height) {
423 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
424 av_dict_set(&opts, "video_size", buf, 0);
425 }
426 if (ap->pix_fmt != PIX_FMT_NONE) {
427 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
428 }
429 if (ap->channel) {
430 snprintf(buf, sizeof(buf), "%d", ap->channel);
431 av_dict_set(&opts, "channel", buf, 0);
432 }
433 if (ap->standard) {
434 av_dict_set(&opts, "standard", ap->standard, 0);
435 }
436 if (ap->mpeg2ts_compute_pcr) {
437 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
438 }
439 if (ap->initial_pause) {
440 av_dict_set(&opts, "initial_pause", "1", 0);
441 }
442 return opts;
443 }
444
445 /**
446 * Open a media file from an IO stream. 'fmt' must be specified.
447 */
448 int av_open_input_stream(AVFormatContext **ic_ptr,
449 AVIOContext *pb, const char *filename,
450 AVInputFormat *fmt, AVFormatParameters *ap)
451 {
452 int err;
453 AVDictionary *opts;
454 AVFormatContext *ic;
455 AVFormatParameters default_ap;
456
457 if(!ap){
458 ap=&default_ap;
459 memset(ap, 0, sizeof(default_ap));
460 }
461 opts = convert_format_parameters(ap);
462
463 if(!ap->prealloced_context)
464 ic = avformat_alloc_context();
465 else
466 ic = *ic_ptr;
467 if (!ic) {
468 err = AVERROR(ENOMEM);
469 goto fail;
470 }
471 if (pb && fmt && fmt->flags & AVFMT_NOFILE)
472 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
473 "will be ignored with AVFMT_NOFILE format.\n");
474 else
475 ic->pb = pb;
476
477 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0)
478 goto fail;
479 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above
480
481 fail:
482 *ic_ptr = ic;
483 av_dict_free(&opts);
484 return err;
485 }
486 #endif
487
488 /** size of probe buffer, for guessing file type from file contents */
489 #define PROBE_BUF_MIN 2048
490 #define PROBE_BUF_MAX (1<<20)
491
492 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
493 const char *filename, void *logctx,
494 unsigned int offset, unsigned int max_probe_size)
495 {
496 AVProbeData pd = { filename ? filename : "", NULL, -offset };
497 unsigned char *buf = NULL;
498 int ret = 0, probe_size;
499
500 if (!max_probe_size) {
501 max_probe_size = PROBE_BUF_MAX;
502 } else if (max_probe_size > PROBE_BUF_MAX) {
503 max_probe_size = PROBE_BUF_MAX;
504 } else if (max_probe_size < PROBE_BUF_MIN) {
505 return AVERROR(EINVAL);
506 }
507
508 if (offset >= max_probe_size) {
509 return AVERROR(EINVAL);
510 }
511
512 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
513 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
514 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
515 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
516
517 if (probe_size < offset) {
518 continue;
519 }
520
521 /* read probe data */
522 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
523 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
524 /* fail if error was not end of file, otherwise, lower score */
525 if (ret != AVERROR_EOF) {
526 av_free(buf);
527 return ret;
528 }
529 score = 0;
530 ret = 0; /* error was end of file, nothing read */
531 }
532 pd.buf_size += ret;
533 pd.buf = &buf[offset];
534
535 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
536
537 /* guess file format */
538 *fmt = av_probe_input_format2(&pd, 1, &score);
539 if(*fmt){
540 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
541 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
542 }else
543 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
544 }
545 }
546
547 if (!*fmt) {
548 av_free(buf);
549 return AVERROR_INVALIDDATA;
550 }
551
552 /* rewind. reuse probe buffer to avoid seeking */
553 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
554 av_free(buf);
555
556 return ret;
557 }
558
559 #if FF_API_FORMAT_PARAMETERS
560 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
561 AVInputFormat *fmt,
562 int buf_size,
563 AVFormatParameters *ap)
564 {
565 int err;
566 AVDictionary *opts = convert_format_parameters(ap);
567
568 if (!ap || !ap->prealloced_context)
569 *ic_ptr = NULL;
570
571 err = avformat_open_input(ic_ptr, filename, fmt, &opts);
572
573 av_dict_free(&opts);
574 return err;
575 }
576 #endif
577
578 /* open input file and probe the format if necessary */
579 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
580 {
581 int ret;
582 AVProbeData pd = {filename, NULL, 0};
583
584 if (s->pb) {
585 s->flags |= AVFMT_FLAG_CUSTOM_IO;
586 if (!s->iformat)
587 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
588 else if (s->iformat->flags & AVFMT_NOFILE)
589 return AVERROR(EINVAL);
590 return 0;
591 }
592
593 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
594 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
595 return 0;
596
597 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
598 &s->interrupt_callback, options)) < 0)
599 return ret;
600 if (s->iformat)
601 return 0;
602 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
603 }
604
605 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
606 {
607 AVFormatContext *s = *ps;
608 int ret = 0;
609 AVFormatParameters ap = { { 0 } };
610 AVDictionary *tmp = NULL;
611
612 if (!s && !(s = avformat_alloc_context()))
613 return AVERROR(ENOMEM);
614 if (fmt)
615 s->iformat = fmt;
616
617 if (options)
618 av_dict_copy(&tmp, *options, 0);
619
620 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
621 goto fail;
622
623 if ((ret = init_input(s, filename, &tmp)) < 0)
624 goto fail;
625
626 /* check filename in case an image number is expected */
627 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
628 if (!av_filename_number_test(filename)) {
629 ret = AVERROR(EINVAL);
630 goto fail;
631 }
632 }
633
634 s->duration = s->start_time = AV_NOPTS_VALUE;
635 av_strlcpy(s->filename, filename, sizeof(s->filename));
636
637 /* allocate private data */
638 if (s->iformat->priv_data_size > 0) {
639 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
640 ret = AVERROR(ENOMEM);
641 goto fail;
642 }
643 if (s->iformat->priv_class) {
644 *(const AVClass**)s->priv_data = s->iformat->priv_class;
645 av_opt_set_defaults(s->priv_data);
646 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
647 goto fail;
648 }
649 }
650
651 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
652 if (s->pb)
653 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
654
655 if (s->iformat->read_header)
656 if ((ret = s->iformat->read_header(s, &ap)) < 0)
657 goto fail;
658
659 if (s->pb && !s->data_offset)
660 s->data_offset = avio_tell(s->pb);
661
662 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
663
664 if (options) {
665 av_dict_free(options);
666 *options = tmp;
667 }
668 *ps = s;
669 return 0;
670
671 fail:
672 av_dict_free(&tmp);
673 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
674 avio_close(s->pb);
675 avformat_free_context(s);
676 *ps = NULL;
677 return ret;
678 }
679
680 /*******************************************************/
681
682 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
683 AVPacketList **plast_pktl){
684 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
685 if (!pktl)
686 return NULL;
687
688 if (*packet_buffer)
689 (*plast_pktl)->next = pktl;
690 else
691 *packet_buffer = pktl;
692
693 /* add the packet in the buffered packet list */
694 *plast_pktl = pktl;
695 pktl->pkt= *pkt;
696 return &pktl->pkt;
697 }
698
699 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
700 {
701 int ret, i;
702 AVStream *st;
703
704 for(;;){
705 AVPacketList *pktl = s->raw_packet_buffer;
706
707 if (pktl) {
708 *pkt = pktl->pkt;
709 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
710 !s->streams[pkt->stream_index]->probe_packets ||
711 s->raw_packet_buffer_remaining_size < pkt->size){
712 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
713 av_freep(&pd->buf);
714 pd->buf_size = 0;
715 s->raw_packet_buffer = pktl->next;
716 s->raw_packet_buffer_remaining_size += pkt->size;
717 av_free(pktl);
718 return 0;
719 }
720 }
721
722 av_init_packet(pkt);
723 ret= s->iformat->read_packet(s, pkt);
724 if (ret < 0) {
725 if (!pktl || ret == AVERROR(EAGAIN))
726 return ret;
727 for (i = 0; i < s->nb_streams; i++)
728 s->streams[i]->probe_packets = 0;
729 continue;
730 }
731
732 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
733 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
734 av_log(s, AV_LOG_WARNING,
735 "Dropped corrupted packet (stream = %d)\n",
736 pkt->stream_index);
737 av_free_packet(pkt);
738 continue;
739 }
740
741 st= s->streams[pkt->stream_index];
742
743 switch(st->codec->codec_type){
744 case AVMEDIA_TYPE_VIDEO:
745 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
746 break;
747 case AVMEDIA_TYPE_AUDIO:
748 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
749 break;
750 case AVMEDIA_TYPE_SUBTITLE:
751 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
752 break;
753 }
754
755 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
756 !st->probe_packets))
757 return ret;
758
759 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
760 s->raw_packet_buffer_remaining_size -= pkt->size;
761
762 if(st->codec->codec_id == CODEC_ID_PROBE){
763 AVProbeData *pd = &st->probe_data;
764 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
765 --st->probe_packets;
766
767 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
768 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
769 pd->buf_size += pkt->size;
770 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
771
772 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
773 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
774 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
775 if(st->codec->codec_id != CODEC_ID_PROBE){
776 pd->buf_size=0;
777 av_freep(&pd->buf);
778 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
779 }
780 }
781 }
782 }
783 }
784
785 /**********************************************************/
786
787 /**
788 * Get the number of samples of an audio frame. Return -1 on error.
789 */
790 static int get_audio_frame_size(AVCodecContext *enc, int size)
791 {
792 int frame_size;
793
794 if(enc->codec_id == CODEC_ID_VORBIS)
795 return -1;
796
797 if (enc->frame_size <= 1) {
798 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
799
800 if (bits_per_sample) {
801 if (enc->channels == 0)
802 return -1;
803 frame_size = (size << 3) / (bits_per_sample * enc->channels);
804 } else {
805 /* used for example by ADPCM codecs */
806 if (enc->bit_rate == 0)
807 return -1;
808 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
809 }
810 } else {
811 frame_size = enc->frame_size;
812 }
813 return frame_size;
814 }
815
816
817 /**
818 * Return the frame duration in seconds. Return 0 if not available.
819 */
820 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
821 AVCodecParserContext *pc, AVPacket *pkt)
822 {
823 int frame_size;
824
825 *pnum = 0;
826 *pden = 0;
827 switch(st->codec->codec_type) {
828 case AVMEDIA_TYPE_VIDEO:
829 if (st->r_frame_rate.num) {
830 *pnum = st->r_frame_rate.den;
831 *pden = st->r_frame_rate.num;
832 } else if(st->time_base.num*1000LL > st->time_base.den) {
833 *pnum = st->time_base.num;
834 *pden = st->time_base.den;
835 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
836 *pnum = st->codec->time_base.num;
837 *pden = st->codec->time_base.den;
838 if (pc && pc->repeat_pict) {
839 *pnum = (*pnum) * (1 + pc->repeat_pict);
840 }
841 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
842 //Thus if we have no parser in such case leave duration undefined.
843 if(st->codec->ticks_per_frame>1 && !pc){
844 *pnum = *pden = 0;
845 }
846 }
847 break;
848 case AVMEDIA_TYPE_AUDIO:
849 frame_size = get_audio_frame_size(st->codec, pkt->size);
850 if (frame_size <= 0 || st->codec->sample_rate <= 0)
851 break;
852 *pnum = frame_size;
853 *pden = st->codec->sample_rate;
854 break;
855 default:
856 break;
857 }
858 }
859
860 static int is_intra_only(AVCodecContext *enc){
861 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
862 return 1;
863 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
864 switch(enc->codec_id){
865 case CODEC_ID_MJPEG:
866 case CODEC_ID_MJPEGB:
867 case CODEC_ID_LJPEG:
868 case CODEC_ID_PRORES:
869 case CODEC_ID_RAWVIDEO:
870 case CODEC_ID_DVVIDEO:
871 case CODEC_ID_HUFFYUV:
872 case CODEC_ID_FFVHUFF:
873 case CODEC_ID_ASV1:
874 case CODEC_ID_ASV2:
875 case CODEC_ID_VCR1:
876 case CODEC_ID_DNXHD:
877 case CODEC_ID_JPEG2000:
878 return 1;
879 default: break;
880 }
881 }
882 return 0;
883 }
884
885 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
886 int64_t dts, int64_t pts)
887 {
888 AVStream *st= s->streams[stream_index];
889 AVPacketList *pktl= s->packet_buffer;
890
891 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
892 return;
893
894 st->first_dts= dts - st->cur_dts;
895 st->cur_dts= dts;
896
897 for(; pktl; pktl= pktl->next){
898 if(pktl->pkt.stream_index != stream_index)
899 continue;
900 //FIXME think more about this check
901 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
902 pktl->pkt.pts += st->first_dts;
903
904 if(pktl->pkt.dts != AV_NOPTS_VALUE)
905 pktl->pkt.dts += st->first_dts;
906
907 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
908 st->start_time= pktl->pkt.pts;
909 }
910 if (st->start_time == AV_NOPTS_VALUE)
911 st->start_time = pts;
912 }
913
914 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
915 {
916 AVPacketList *pktl= s->packet_buffer;
917 int64_t cur_dts= 0;
918
919 if(st->first_dts != AV_NOPTS_VALUE){
920 cur_dts= st->first_dts;
921 for(; pktl; pktl= pktl->next){
922 if(pktl->pkt.stream_index == pkt->stream_index){
923 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
924 break;
925 cur_dts -= pkt->duration;
926 }
927 }
928 pktl= s->packet_buffer;
929 st->first_dts = cur_dts;
930 }else if(st->cur_dts)
931 return;
932
933 for(; pktl; pktl= pktl->next){
934 if(pktl->pkt.stream_index != pkt->stream_index)
935 continue;
936 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
937 && !pktl->pkt.duration){
938 pktl->pkt.dts= cur_dts;
939 if(!st->codec->has_b_frames)
940 pktl->pkt.pts= cur_dts;
941 cur_dts += pkt->duration;
942 pktl->pkt.duration= pkt->duration;
943 }else
944 break;
945 }
946 if(st->first_dts == AV_NOPTS_VALUE)
947 st->cur_dts= cur_dts;
948 }
949
950 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
951 AVCodecParserContext *pc, AVPacket *pkt)
952 {
953 int num, den, presentation_delayed, delay, i;
954 int64_t offset;
955
956 if (s->flags & AVFMT_FLAG_NOFILLIN)
957 return;
958
959 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
960 pkt->dts= AV_NOPTS_VALUE;
961
962 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
963 //FIXME Set low_delay = 0 when has_b_frames = 1
964 st->codec->has_b_frames = 1;
965
966 /* do we have a video B-frame ? */
967 delay= st->codec->has_b_frames;
968 presentation_delayed = 0;
969
970 /* XXX: need has_b_frame, but cannot get it if the codec is
971 not initialized */
972 if (delay &&
973 pc && pc->pict_type != AV_PICTURE_TYPE_B)
974 presentation_delayed = 1;
975
976 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
977 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
978 pkt->dts -= 1LL<<st->pts_wrap_bits;
979 }
980
981 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
982 // we take the conservative approach and discard both
983 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
984 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
985 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
986 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
987 }
988
989 if (pkt->duration == 0) {
990 compute_frame_duration(&num, &den, st, pc, pkt);
991 if (den && num) {
992 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
993
994 if(pkt->duration != 0 && s->packet_buffer)
995 update_initial_durations(s, st, pkt);
996 }
997 }
998
999 /* correct timestamps with byte offset if demuxers only have timestamps
1000 on packet boundaries */
1001 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1002 /* this will estimate bitrate based on this frame's duration and size */
1003 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1004 if(pkt->pts != AV_NOPTS_VALUE)
1005 pkt->pts += offset;
1006 if(pkt->dts != AV_NOPTS_VALUE)
1007 pkt->dts += offset;
1008 }
1009
1010 if (pc && pc->dts_sync_point >= 0) {
1011 // we have synchronization info from the parser
1012 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1013 if (den > 0) {
1014 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1015 if (pkt->dts != AV_NOPTS_VALUE) {
1016 // got DTS from the stream, update reference timestamp
1017 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1018 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1019 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1020 // compute DTS based on reference timestamp
1021 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1022 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1023 }
1024 if (pc->dts_sync_point > 0)
1025 st->reference_dts = pkt->dts; // new reference
1026 }
1027 }
1028
1029 /* This may be redundant, but it should not hurt. */
1030 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1031 presentation_delayed = 1;
1032
1033 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1034 /* interpolate PTS and DTS if they are not present */
1035 //We skip H264 currently because delay and has_b_frames are not reliably set
1036 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1037 if (presentation_delayed) {
1038 /* DTS = decompression timestamp */
1039 /* PTS = presentation timestamp */
1040 if (pkt->dts == AV_NOPTS_VALUE)
1041 pkt->dts = st->last_IP_pts;
1042 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1043 if (pkt->dts == AV_NOPTS_VALUE)
1044 pkt->dts = st->cur_dts;
1045
1046 /* this is tricky: the dts must be incremented by the duration
1047 of the frame we are displaying, i.e. the last I- or P-frame */
1048 if (st->last_IP_duration == 0)
1049 st->last_IP_duration = pkt->duration;
1050 if(pkt->dts != AV_NOPTS_VALUE)
1051 st->cur_dts = pkt->dts + st->last_IP_duration;
1052 st->last_IP_duration = pkt->duration;
1053 st->last_IP_pts= pkt->pts;
1054 /* cannot compute PTS if not present (we can compute it only
1055 by knowing the future */
1056 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1057 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1058 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1059 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1060 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1061 pkt->pts += pkt->duration;
1062 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1063 }
1064 }
1065
1066 /* presentation is not delayed : PTS and DTS are the same */
1067 if(pkt->pts == AV_NOPTS_VALUE)
1068 pkt->pts = pkt->dts;
1069 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1070 if(pkt->pts == AV_NOPTS_VALUE)
1071 pkt->pts = st->cur_dts;
1072 pkt->dts = pkt->pts;
1073 if(pkt->pts != AV_NOPTS_VALUE)
1074 st->cur_dts = pkt->pts + pkt->duration;
1075 }
1076 }
1077
1078 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1079 st->pts_buffer[0]= pkt->pts;
1080 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1081 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1082 if(pkt->dts == AV_NOPTS_VALUE)
1083 pkt->dts= st->pts_buffer[0];
1084 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1085 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1086 }
1087 if(pkt->dts > st->cur_dts)
1088 st->cur_dts = pkt->dts;
1089 }
1090
1091 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1092
1093 /* update flags */
1094 if(is_intra_only(st->codec))
1095 pkt->flags |= AV_PKT_FLAG_KEY;
1096 else if (pc) {
1097 pkt->flags = 0;
1098 /* keyframe computation */
1099 if (pc->key_frame == 1)
1100 pkt->flags |= AV_PKT_FLAG_KEY;
1101 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1102 pkt->flags |= AV_PKT_FLAG_KEY;
1103 }
1104 if (pc)
1105 pkt->convergence_duration = pc->convergence_duration;
1106 }
1107
1108
1109 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1110 {
1111 AVStream *st;
1112 int len, ret, i;
1113
1114 av_init_packet(pkt);
1115
1116 for(;;) {
1117 /* select current input stream component */
1118 st = s->cur_st;
1119 if (st) {
1120 if (!st->need_parsing || !st->parser) {
1121 /* no parsing needed: we just output the packet as is */
1122 /* raw data support */
1123 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1124 compute_pkt_fields(s, st, NULL, pkt);
1125 s->cur_st = NULL;
1126 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1127 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1128 ff_reduce_index(s, st->index);
1129 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1130 }
1131 break;
1132 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1133 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1134 st->cur_ptr, st->cur_len,
1135 st->cur_pkt.pts, st->cur_pkt.dts,
1136 st->cur_pkt.pos);
1137 st->cur_pkt.pts = AV_NOPTS_VALUE;
1138 st->cur_pkt.dts = AV_NOPTS_VALUE;
1139 /* increment read pointer */
1140 st->cur_ptr += len;
1141 st->cur_len -= len;
1142
1143 /* return packet if any */
1144 if (pkt->size) {
1145 got_packet:
1146 pkt->duration = 0;
1147 pkt->stream_index = st->index;
1148 pkt->pts = st->parser->pts;
1149 pkt->dts = st->parser->dts;
1150 pkt->pos = st->parser->pos;
1151 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1152 s->cur_st = NULL;
1153 pkt->destruct= st->cur_pkt.destruct;
1154 st->cur_pkt.destruct= NULL;
1155 st->cur_pkt.data = NULL;
1156 assert(st->cur_len == 0);
1157 }else{
1158 pkt->destruct = NULL;
1159 }
1160 compute_pkt_fields(s, st, st->parser, pkt);
1161
1162 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1163 ff_reduce_index(s, st->index);
1164 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1165 0, 0, AVINDEX_KEYFRAME);
1166 }
1167
1168 break;
1169 }
1170 } else {
1171 /* free packet */
1172 av_free_packet(&st->cur_pkt);
1173 s->cur_st = NULL;
1174 }
1175 } else {
1176 AVPacket cur_pkt;
1177 /* read next packet */
1178 ret = av_read_packet(s, &cur_pkt);
1179 if (ret < 0) {
1180 if (ret == AVERROR(EAGAIN))
1181 return ret;
1182 /* return the last frames, if any */
1183 for(i = 0; i < s->nb_streams; i++) {
1184 st = s->streams[i];
1185 if (st->parser && st->need_parsing) {
1186 av_parser_parse2(st->parser, st->codec,
1187 &pkt->data, &pkt->size,
1188 NULL, 0,
1189 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1190 AV_NOPTS_VALUE);
1191 if (pkt->size)
1192 goto got_packet;
1193 }
1194 }
1195 /* no more packets: really terminate parsing */
1196 return ret;
1197 }
1198 st = s->streams[cur_pkt.stream_index];
1199 st->cur_pkt= cur_pkt;
1200
1201 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1202 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1203 st->cur_pkt.pts < st->cur_pkt.dts){
1204 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1205 st->cur_pkt.stream_index,
1206 st->cur_pkt.pts,
1207 st->cur_pkt.dts,
1208 st->cur_pkt.size);
1209 // av_free_packet(&st->cur_pkt);
1210 // return -1;
1211 }
1212
1213 if(s->debug & FF_FDEBUG_TS)
1214 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1215 st->cur_pkt.stream_index,
1216 st->cur_pkt.pts,
1217 st->cur_pkt.dts,
1218 st->cur_pkt.size,
1219 st->cur_pkt.duration,
1220 st->cur_pkt.flags);
1221
1222 s->cur_st = st;
1223 st->cur_ptr = st->cur_pkt.data;
1224 st->cur_len = st->cur_pkt.size;
1225 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1226 st->parser = av_parser_init(st->codec->codec_id);
1227 if (!st->parser) {
1228 /* no parser available: just output the raw packets */
1229 st->need_parsing = AVSTREAM_PARSE_NONE;
1230 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1231 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1232 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1233 st->parser->flags |= PARSER_FLAG_ONCE;
1234 }
1235 }
1236 }
1237 }
1238 if(s->debug & FF_FDEBUG_TS)
1239 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1240 pkt->stream_index,
1241 pkt->pts,
1242 pkt->dts,
1243 pkt->size,
1244 pkt->duration,
1245 pkt->flags);
1246
1247 return 0;
1248 }
1249
1250 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1251 {
1252 AVPacketList *pktl;
1253 int eof=0;
1254 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1255
1256 for(;;){
1257 pktl = s->packet_buffer;
1258 if (pktl) {
1259 AVPacket *next_pkt= &pktl->pkt;
1260
1261 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1262 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1263 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1264 if( pktl->pkt.stream_index == next_pkt->stream_index
1265 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1266 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1267 next_pkt->pts= pktl->pkt.dts;
1268 }
1269 pktl= pktl->next;
1270 }
1271 pktl = s->packet_buffer;
1272 }
1273
1274 if( next_pkt->pts != AV_NOPTS_VALUE
1275 || next_pkt->dts == AV_NOPTS_VALUE
1276 || !genpts || eof){
1277 /* read packet from packet buffer, if there is data */
1278 *pkt = *next_pkt;
1279 s->packet_buffer = pktl->next;
1280 av_free(pktl);
1281 return 0;
1282 }
1283 }
1284 if(genpts){
1285 int ret= read_frame_internal(s, pkt);
1286 if(ret<0){
1287 if(pktl && ret != AVERROR(EAGAIN)){
1288 eof=1;
1289 continue;
1290 }else
1291 return ret;
1292 }
1293
1294 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1295 &s->packet_buffer_end)) < 0)
1296 return AVERROR(ENOMEM);
1297 }else{
1298 assert(!s->packet_buffer);
1299 return read_frame_internal(s, pkt);
1300 }
1301 }
1302 }
1303
1304 /* XXX: suppress the packet queue */
1305 static void flush_packet_queue(AVFormatContext *s)
1306 {
1307 AVPacketList *pktl;
1308
1309 for(;;) {
1310 pktl = s->packet_buffer;
1311 if (!pktl)
1312 break;
1313 s->packet_buffer = pktl->next;
1314 av_free_packet(&pktl->pkt);
1315 av_free(pktl);
1316 }
1317 while(s->raw_packet_buffer){
1318 pktl = s->raw_packet_buffer;
1319 s->raw_packet_buffer = pktl->next;
1320 av_free_packet(&pktl->pkt);
1321 av_free(pktl);
1322 }
1323 s->packet_buffer_end=
1324 s->raw_packet_buffer_end= NULL;
1325 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1326 }
1327
1328 /*******************************************************/
1329 /* seek support */
1330
1331 int av_find_default_stream_index(AVFormatContext *s)
1332 {
1333 int first_audio_index = -1;
1334 int i;
1335 AVStream *st;
1336
1337 if (s->nb_streams <= 0)
1338 return -1;
1339 for(i = 0; i < s->nb_streams; i++) {
1340 st = s->streams[i];
1341 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1342 return i;
1343 }
1344 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1345 first_audio_index = i;
1346 }
1347 return first_audio_index >= 0 ? first_audio_index : 0;
1348 }
1349
1350 /**
1351 * Flush the frame reader.
1352 */
1353 void ff_read_frame_flush(AVFormatContext *s)
1354 {
1355 AVStream *st;
1356 int i, j;
1357
1358 flush_packet_queue(s);
1359
1360 s->cur_st = NULL;
1361
1362 /* for each stream, reset read state */
1363 for(i = 0; i < s->nb_streams; i++) {
1364 st = s->streams[i];
1365
1366 if (st->parser) {
1367 av_parser_close(st->parser);
1368 st->parser = NULL;
1369 av_free_packet(&st->cur_pkt);
1370 }
1371 st->last_IP_pts = AV_NOPTS_VALUE;
1372 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1373 st->reference_dts = AV_NOPTS_VALUE;
1374 /* fail safe */
1375 st->cur_ptr = NULL;
1376 st->cur_len = 0;
1377
1378 st->probe_packets = MAX_PROBE_PACKETS;
1379
1380 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1381 st->pts_buffer[j]= AV_NOPTS_VALUE;
1382 }
1383 }
1384
1385 #if FF_API_SEEK_PUBLIC
1386 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1387 {
1388 ff_update_cur_dts(s, ref_st, timestamp);
1389 }
1390 #endif
1391
1392 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1393 {
1394 int i;
1395
1396 for(i = 0; i < s->nb_streams; i++) {
1397 AVStream *st = s->streams[i];
1398
1399 st->cur_dts = av_rescale(timestamp,
1400 st->time_base.den * (int64_t)ref_st->time_base.num,
1401 st->time_base.num * (int64_t)ref_st->time_base.den);
1402 }
1403 }
1404
1405 void ff_reduce_index(AVFormatContext *s, int stream_index)
1406 {
1407 AVStream *st= s->streams[stream_index];
1408 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1409
1410 if((unsigned)st->nb_index_entries >= max_entries){
1411 int i;
1412 for(i=0; 2*i<st->nb_index_entries; i++)
1413 st->index_entries[i]= st->index_entries[2*i];
1414 st->nb_index_entries= i;
1415 }
1416 }
1417
1418 int ff_add_index_entry(AVIndexEntry **index_entries,
1419 int *nb_index_entries,
1420 unsigned int *index_entries_allocated_size,
1421 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1422 {
1423 AVIndexEntry *entries, *ie;
1424 int index;
1425
1426 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1427 return -1;
1428
1429 entries = av_fast_realloc(*index_entries,
1430 index_entries_allocated_size,
1431 (*nb_index_entries + 1) *
1432 sizeof(AVIndexEntry));
1433 if(!entries)
1434 return -1;
1435
1436 *index_entries= entries;
1437
1438 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1439
1440 if(index<0){
1441 index= (*nb_index_entries)++;
1442 ie= &entries[index];
1443 assert(index==0 || ie[-1].timestamp < timestamp);
1444 }else{
1445 ie= &entries[index];
1446 if(ie->timestamp != timestamp){
1447 if(ie->timestamp <= timestamp)
1448 return -1;
1449 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1450 (*nb_index_entries)++;
1451 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1452 distance= ie->min_distance;
1453 }
1454
1455 ie->pos = pos;
1456 ie->timestamp = timestamp;
1457 ie->min_distance= distance;
1458 ie->size= size;
1459 ie->flags = flags;
1460
1461 return index;
1462 }
1463
1464 int av_add_index_entry(AVStream *st,
1465 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1466 {
1467 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1468 &st->index_entries_allocated_size, pos,
1469 timestamp, size, distance, flags);
1470 }
1471
1472 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1473 int64_t wanted_timestamp, int flags)
1474 {
1475 int a, b, m;
1476 int64_t timestamp;
1477
1478 a = - 1;
1479 b = nb_entries;
1480
1481 //optimize appending index entries at the end
1482 if(b && entries[b-1].timestamp < wanted_timestamp)
1483 a= b-1;
1484
1485 while (b - a > 1) {
1486 m = (a + b) >> 1;
1487 timestamp = entries[m].timestamp;
1488 if(timestamp >= wanted_timestamp)
1489 b = m;
1490 if(timestamp <= wanted_timestamp)
1491 a = m;
1492 }
1493 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1494
1495 if(!(flags & AVSEEK_FLAG_ANY)){
1496 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1497 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1498 }
1499 }
1500
1501 if(m == nb_entries)
1502 return -1;
1503 return m;
1504 }
1505
1506 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1507 int flags)
1508 {
1509 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1510 wanted_timestamp, flags);
1511 }
1512
1513 #if FF_API_SEEK_PUBLIC
1514 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1515 return ff_seek_frame_binary(s, stream_index, target_ts, flags);
1516 }
1517 #endif
1518
1519 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1520 {
1521 AVInputFormat *avif= s->iformat;
1522 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1523 int64_t ts_min, ts_max, ts;
1524 int index;
1525 int64_t ret;
1526 AVStream *st;
1527
1528 if (stream_index < 0)
1529 return -1;
1530
1531 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1532
1533 ts_max=
1534 ts_min= AV_NOPTS_VALUE;
1535 pos_limit= -1; //gcc falsely says it may be uninitialized
1536
1537 st= s->streams[stream_index];
1538 if(st->index_entries){
1539 AVIndexEntry *e;
1540
1541 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1542 index= FFMAX(index, 0);
1543 e= &st->index_entries[index];
1544
1545 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1546 pos_min= e->pos;
1547 ts_min= e->timestamp;
1548 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1549 pos_min,ts_min);
1550 }else{
1551 assert(index==0);
1552 }
1553
1554 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1555 assert(index < st->nb_index_entries);
1556 if(index >= 0){
1557 e= &st->index_entries[index];
1558 assert(e->timestamp >= target_ts);
1559 pos_max= e->pos;
1560 ts_max= e->timestamp;
1561 pos_limit= pos_max - e->min_distance;
1562 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1563 pos_max,pos_limit, ts_max);
1564 }
1565 }
1566
1567 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1568 if(pos<0)
1569 return -1;
1570
1571 /* do the seek */
1572 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1573 return ret;
1574
1575 ff_update_cur_dts(s, st, ts);
1576
1577 return 0;
1578 }
1579
1580 #if FF_API_SEEK_PUBLIC
1581 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1582 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1583 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1584 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1585 {
1586 return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max,
1587 pos_limit, ts_min, ts_max, flags, ts_ret,
1588 read_timestamp);
1589 }
1590 #endif
1591
1592 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1593 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1594 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1595 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1596 {
1597 int64_t pos, ts;
1598 int64_t start_pos, filesize;
1599 int no_change;
1600
1601 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1602
1603 if(ts_min == AV_NOPTS_VALUE){
1604 pos_min = s->data_offset;
1605 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1606 if (ts_min == AV_NOPTS_VALUE)
1607 return -1;
1608 }
1609
1610 if(ts_max == AV_NOPTS_VALUE){
1611 int step= 1024;
1612 filesize = avio_size(s->pb);
1613 pos_max = filesize - 1;
1614 do{
1615 pos_max -= step;
1616 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1617 step += step;
1618 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1619 if (ts_max == AV_NOPTS_VALUE)
1620 return -1;
1621
1622 for(;;){
1623 int64_t tmp_pos= pos_max + 1;
1624 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1625 if(tmp_ts == AV_NOPTS_VALUE)
1626 break;
1627 ts_max= tmp_ts;
1628 pos_max= tmp_pos;
1629 if(tmp_pos >= filesize)
1630 break;
1631 }
1632 pos_limit= pos_max;
1633 }
1634
1635 if(ts_min > ts_max){
1636 return -1;
1637 }else if(ts_min == ts_max){
1638 pos_limit= pos_min;
1639 }
1640
1641 no_change=0;
1642 while (pos_min < pos_limit) {
1643 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1644 pos_min, pos_max, ts_min, ts_max);
1645 assert(pos_limit <= pos_max);
1646
1647 if(no_change==0){
1648 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1649 // interpolate position (better than dichotomy)
1650 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1651 + pos_min - approximate_keyframe_distance;
1652 }else if(no_change==1){
1653 // bisection, if interpolation failed to change min or max pos last time
1654 pos = (pos_min + pos_limit)>>1;
1655 }else{
1656 /* linear search if bisection failed, can only happen if there
1657 are very few or no keyframes between min/max */
1658 pos=pos_min;
1659 }
1660 if(pos <= pos_min)
1661 pos= pos_min + 1;
1662 else if(pos > pos_limit)
1663 pos= pos_limit;
1664 start_pos= pos;
1665
1666 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1667 if(pos == pos_max)
1668 no_change++;
1669 else
1670 no_change=0;
1671 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1672 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1673 pos_limit, start_pos, no_change);
1674 if(ts == AV_NOPTS_VALUE){
1675 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1676 return -1;
1677 }
1678 assert(ts != AV_NOPTS_VALUE);
1679 if (target_ts <= ts) {
1680 pos_limit = start_pos - 1;
1681 pos_max = pos;
1682 ts_max = ts;
1683 }
1684 if (target_ts >= ts) {
1685 pos_min = pos;
1686 ts_min = ts;
1687 }
1688 }
1689
1690 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1691 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1692 pos_min = pos;
1693 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1694 pos_min++;
1695 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1696 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1697 pos, ts_min, target_ts, ts_max);
1698 *ts_ret= ts;
1699 return pos;
1700 }
1701
1702 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1703 int64_t pos_min, pos_max;
1704 #if 0
1705 AVStream *st;
1706
1707 if (stream_index < 0)
1708 return -1;
1709
1710 st= s->streams[stream_index];
1711 #endif
1712
1713 pos_min = s->data_offset;
1714 pos_max = avio_size(s->pb) - 1;
1715
1716 if (pos < pos_min) pos= pos_min;
1717 else if(pos > pos_max) pos= pos_max;
1718
1719 avio_seek(s->pb, pos, SEEK_SET);
1720
1721 #if 0
1722 av_update_cur_dts(s, st, ts);
1723 #endif
1724 return 0;
1725 }
1726
1727 static int seek_frame_generic(AVFormatContext *s,
1728 int stream_index, int64_t timestamp, int flags)
1729 {
1730 int index;
1731 int64_t ret;
1732 AVStream *st;
1733 AVIndexEntry *ie;
1734
1735 st = s->streams[stream_index];
1736
1737 index = av_index_search_timestamp(st, timestamp, flags);
1738
1739 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1740 return -1;
1741
1742 if(index < 0 || index==st->nb_index_entries-1){
1743 AVPacket pkt;
1744
1745 if(st->nb_index_entries){
1746 assert(st->index_entries);
1747 ie= &st->index_entries[st->nb_index_entries-1];
1748 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1749 return ret;
1750 ff_update_cur_dts(s, st, ie->timestamp);
1751 }else{
1752 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1753 return ret;
1754 }
1755 for (;;) {
1756 int read_status;
1757 do{
1758 read_status = av_read_frame(s, &pkt);
1759 } while (read_status == AVERROR(EAGAIN));
1760 if (read_status < 0)
1761 break;
1762 av_free_packet(&pkt);
1763 if(stream_index == pkt.stream_index){
1764 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1765 break;
1766 }
1767 }
1768 index = av_index_search_timestamp(st, timestamp, flags);
1769 }
1770 if (index < 0)
1771 return -1;
1772
1773 ff_read_frame_flush(s);
1774 if (s->iformat->read_seek){
1775 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1776 return 0;
1777 }
1778 ie = &st->index_entries[index];
1779 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1780 return ret;
1781 ff_update_cur_dts(s, st, ie->timestamp);
1782
1783 return 0;
1784 }
1785
1786 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1787 {
1788 int ret;
1789 AVStream *st;
1790
1791 if (flags & AVSEEK_FLAG_BYTE) {
1792 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1793 return -1;
1794 ff_read_frame_flush(s);
1795 return seek_frame_byte(s, stream_index, timestamp, flags);
1796 }
1797
1798 if(stream_index < 0){
1799 stream_index= av_find_default_stream_index(s);
1800 if(stream_index < 0)
1801 return -1;
1802
1803 st= s->streams[stream_index];
1804 /* timestamp for default must be expressed in AV_TIME_BASE units */
1805 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1806 }
1807
1808 /* first, we try the format specific seek */
1809 if (s->iformat->read_seek) {
1810 ff_read_frame_flush(s);
1811 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1812 } else
1813 ret = -1;
1814 if (ret >= 0) {
1815 return 0;
1816 }
1817
1818 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1819 ff_read_frame_flush(s);
1820 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1821 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1822 ff_read_frame_flush(s);
1823 return seek_frame_generic(s, stream_index, timestamp, flags);
1824 }
1825 else
1826 return -1;
1827 }
1828
1829 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1830 {
1831 if(min_ts > ts || max_ts < ts)
1832 return -1;
1833
1834 if (s->iformat->read_seek2) {
1835 ff_read_frame_flush(s);
1836 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1837 }
1838
1839 if(s->iformat->read_timestamp){
1840 //try to seek via read_timestamp()
1841 }
1842
1843 //Fallback to old API if new is not implemented but old is
1844 //Note the old has somewat different sematics
1845 if(s->iformat->read_seek || 1)
1846 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1847
1848 // try some generic seek like seek_frame_generic() but with new ts semantics
1849 }
1850
1851 /*******************************************************/
1852
1853 /**
1854 * Return TRUE if the stream has accurate duration in any stream.
1855 *
1856 * @return TRUE if the stream has accurate duration for at least one component.
1857 */
1858 static int has_duration(AVFormatContext *ic)
1859 {
1860 int i;
1861 AVStream *st;
1862
1863 for(i = 0;i < ic->nb_streams; i++) {
1864 st = ic->streams[i];
1865 if (st->duration != AV_NOPTS_VALUE)
1866 return 1;
1867 }
1868 return 0;
1869 }
1870
1871 /**
1872 * Estimate the stream timings from the one of each components.
1873 *
1874 * Also computes the global bitrate if possible.
1875 */
1876 static void update_stream_timings(AVFormatContext *ic)
1877 {
1878 int64_t start_time, start_time1, end_time, end_time1;
1879 int64_t duration, duration1, filesize;
1880 int i;
1881 AVStream *st;
1882
1883 start_time = INT64_MAX;
1884 end_time = INT64_MIN;
1885 duration = INT64_MIN;
1886 for(i = 0;i < ic->nb_streams; i++) {
1887 st = ic->streams[i];
1888 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1889 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1890 start_time = FFMIN(start_time, start_time1);
1891 if (st->duration != AV_NOPTS_VALUE) {
1892 end_time1 = start_time1
1893 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1894 end_time = FFMAX(end_time, end_time1);
1895 }
1896 }
1897 if (st->duration != AV_NOPTS_VALUE) {
1898 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1899 duration = FFMAX(duration, duration1);
1900 }
1901 }
1902 if (start_time != INT64_MAX) {
1903 ic->start_time = start_time;
1904 if (end_time != INT64_MIN)
1905 duration = FFMAX(duration, end_time - start_time);
1906 }
1907 if (duration != INT64_MIN) {
1908 ic->duration = duration;
1909 if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
1910 /* compute the bitrate */
1911 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
1912 (double)ic->duration;
1913 }
1914 }
1915 }
1916
1917 static void fill_all_stream_timings(AVFormatContext *ic)
1918 {
1919 int i;
1920 AVStream *st;
1921
1922 update_stream_timings(ic);
1923 for(i = 0;i < ic->nb_streams; i++) {
1924 st = ic->streams[i];
1925 if (st->start_time == AV_NOPTS_VALUE) {
1926 if(ic->start_time != AV_NOPTS_VALUE)
1927 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1928 if(ic->duration != AV_NOPTS_VALUE)
1929 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1930 }
1931 }
1932 }
1933
1934 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1935 {
1936 int64_t filesize, duration;
1937 int bit_rate, i;
1938 AVStream *st;
1939
1940 /* if bit_rate is already set, we believe it */
1941 if (ic->bit_rate <= 0) {
1942 bit_rate = 0;
1943 for(i=0;i<ic->nb_streams;i++) {
1944 st = ic->streams[i];
1945 if (st->codec->bit_rate > 0)
1946 bit_rate += st->codec->bit_rate;
1947 }
1948 ic->bit_rate = bit_rate;
1949 }
1950
1951 /* if duration is already set, we believe it */
1952 if (ic->duration == AV_NOPTS_VALUE &&
1953 ic->bit_rate != 0) {
1954 filesize = ic->pb ? avio_size(ic->pb) : 0;
1955 if (filesize > 0) {
1956 for(i = 0; i < ic->nb_streams; i++) {
1957 st = ic->streams[i];
1958 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1959 if (st->duration == AV_NOPTS_VALUE)
1960 st->duration = duration;
1961 }
1962 }
1963 }
1964 }
1965
1966 #define DURATION_MAX_READ_SIZE 250000
1967 #define DURATION_MAX_RETRY 3
1968
1969 /* only usable for MPEG-PS streams */
1970 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1971 {
1972 AVPacket pkt1, *pkt = &pkt1;
1973 AVStream *st;
1974 int read_size, i, ret;
1975 int64_t end_time;
1976 int64_t filesize, offset, duration;
1977 int retry=0;
1978
1979 ic->cur_st = NULL;
1980
1981 /* flush packet queue */
1982 flush_packet_queue(ic);
1983
1984 for (i=0; i<ic->nb_streams; i++) {
1985 st = ic->streams[i];
1986 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1987 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
1988
1989 if (st->parser) {
1990 av_parser_close(st->parser);
1991 st->parser= NULL;
1992 av_free_packet(&st->cur_pkt);
1993 }
1994 }
1995
1996 /* estimate the end time (duration) */
1997 /* XXX: may need to support wrapping */
1998 filesize = ic->pb ? avio_size(ic->pb) : 0;
1999 end_time = AV_NOPTS_VALUE;
2000 do{
2001 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2002 if (offset < 0)
2003 offset = 0;
2004
2005 avio_seek(ic->pb, offset, SEEK_SET);
2006 read_size = 0;
2007 for(;;) {
2008 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2009 break;
2010
2011 do {
2012 ret = av_read_packet(ic, pkt);
2013 } while(ret == AVERROR(EAGAIN));
2014 if (ret != 0)
2015 break;
2016 read_size += pkt->size;
2017 st = ic->streams[pkt->stream_index];
2018 if (pkt->pts != AV_NOPTS_VALUE &&
2019 (st->start_time != AV_NOPTS_VALUE ||
2020 st->first_dts != AV_NOPTS_VALUE)) {
2021 duration = end_time = pkt->pts;
2022 if (st->start_time != AV_NOPTS_VALUE)
2023 duration -= st->start_time;
2024 else
2025 duration -= st->first_dts;
2026 if (duration < 0)
2027 duration += 1LL<<st->pts_wrap_bits;
2028 if (duration > 0) {
2029 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
2030 st->duration = duration;
2031 }
2032 }
2033 av_free_packet(pkt);
2034 }
2035 }while( end_time==AV_NOPTS_VALUE
2036 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2037 && ++retry <= DURATION_MAX_RETRY);
2038
2039 fill_all_stream_timings(ic);
2040
2041 avio_seek(ic->pb, old_offset, SEEK_SET);
2042 for (i=0; i<ic->nb_streams; i++) {
2043 st= ic->streams[i];
2044 st->cur_dts= st->first_dts;
2045 st->last_IP_pts = AV_NOPTS_VALUE;
2046 st->reference_dts = AV_NOPTS_VALUE;
2047 }
2048 }
2049
2050 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2051 {
2052 int64_t file_size;
2053
2054 /* get the file size, if possible */
2055 if (ic->iformat->flags & AVFMT_NOFILE) {
2056 file_size = 0;
2057 } else {
2058 file_size = avio_size(ic->pb);
2059 file_size = FFMAX(0, file_size);
2060 }
2061
2062 if ((!strcmp(ic->iformat->name, "mpeg") ||
2063 !strcmp(ic->iformat->name, "mpegts")) &&
2064 file_size && ic->pb->seekable) {
2065 /* get accurate estimate from the PTSes */
2066 estimate_timings_from_pts(ic, old_offset);
2067 } else if (has_duration(ic)) {
2068 /* at least one component has timings - we use them for all
2069 the components */
2070 fill_all_stream_timings(ic);
2071 } else {
2072 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2073 /* less precise: use bitrate info */
2074 estimate_timings_from_bit_rate(ic);
2075 }
2076 update_stream_timings(ic);
2077
2078 {
2079 int i;
2080 AVStream av_unused *st;
2081 for(i = 0;i < ic->nb_streams; i++) {
2082 st = ic->streams[i];
2083 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2084 (double) st->start_time / AV_TIME_BASE,
2085 (double) st->duration / AV_TIME_BASE);
2086 }
2087 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2088 (double) ic->start_time / AV_TIME_BASE,
2089 (double) ic->duration / AV_TIME_BASE,
2090 ic->bit_rate / 1000);
2091 }
2092 }
2093
2094 static int has_codec_parameters(AVCodecContext *avctx)
2095 {
2096 int val;
2097 switch (avctx->codec_type) {
2098 case AVMEDIA_TYPE_AUDIO:
2099 val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
2100 if (!avctx->frame_size &&
2101 (avctx->codec_id == CODEC_ID_VORBIS ||
2102 avctx->codec_id == CODEC_ID_AAC ||
2103 avctx->codec_id == CODEC_ID_MP1 ||
2104 avctx->codec_id == CODEC_ID_MP2 ||
2105 avctx->codec_id == CODEC_ID_MP3 ||
2106 avctx->codec_id == CODEC_ID_CELT))
2107 return 0;
2108 break;
2109 case AVMEDIA_TYPE_VIDEO:
2110 val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
2111 break;
2112 default:
2113 val = 1;
2114 break;
2115 }
2116 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2117 }
2118
2119 static int has_decode_delay_been_guessed(AVStream *st)
2120 {
2121 return st->codec->codec_id != CODEC_ID_H264 ||
2122 st->info->nb_decoded_frames >= 6;
2123 }
2124
2125 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2126 {
2127 AVCodec *codec;
2128 int got_picture, ret = 0;
2129 AVFrame picture;
2130 AVPacket pkt = *avpkt;
2131
2132 if(!st->codec->codec){
2133 codec = avcodec_find_decoder(st->codec->codec_id);
2134 if (!codec)
2135 return -1;
2136 ret = avcodec_open2(st->codec, codec, options);
2137 if (ret < 0)
2138 return ret;
2139 }
2140
2141 while (pkt.size > 0 && ret >= 0 &&
2142 (!has_codec_parameters(st->codec) ||
2143 !has_decode_delay_been_guessed(st) ||
2144 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2145 got_picture = 0;
2146 avcodec_get_frame_defaults(&picture);
2147 switch(st->codec->codec_type) {
2148 case AVMEDIA_TYPE_VIDEO:
2149 ret = avcodec_decode_video2(st->codec, &picture,
2150 &got_picture, &pkt);
2151 break;
2152 case AVMEDIA_TYPE_AUDIO:
2153 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2154 break;
2155 default:
2156 break;
2157 }
2158 if (ret >= 0) {
2159 if (got_picture)
2160 st->info->nb_decoded_frames++;
2161 pkt.data += ret;
2162 pkt.size -= ret;
2163 }
2164 }
2165 return ret;
2166 }
2167
2168 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2169 {
2170 while (tags->id != CODEC_ID_NONE) {
2171 if (tags->id == id)
2172 return tags->tag;
2173 tags++;
2174 }
2175 return 0;
2176 }
2177
2178 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2179 {
2180 int i;
2181 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2182 if(tag == tags[i].tag)
2183 return tags[i].id;
2184 }
2185 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2186 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2187 return tags[i].id;
2188 }
2189 return CODEC_ID_NONE;
2190 }
2191
2192 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2193 {
2194 int i;
2195 for(i=0; tags && tags[i]; i++){
2196 int tag= ff_codec_get_tag(tags[i], id);
2197 if(tag) return tag;
2198 }
2199 return 0;
2200 }
2201
2202 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2203 {
2204 int i;
2205 for(i=0; tags && tags[i]; i++){
2206 enum CodecID id= ff_codec_get_id(tags[i], tag);
2207 if(id!=CODEC_ID_NONE) return id;
2208 }
2209 return CODEC_ID_NONE;
2210 }
2211
2212 static void compute_chapters_end(AVFormatContext *s)
2213 {
2214 unsigned int i, j;
2215 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2216
2217 for (i = 0; i < s->nb_chapters; i++)
2218 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2219 AVChapter *ch = s->chapters[i];
2220 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2221 : INT64_MAX;
2222
2223 for (j = 0; j < s->nb_chapters; j++) {
2224 AVChapter *ch1 = s->chapters[j];
2225 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2226 if (j != i && next_start > ch->start && next_start < end)
2227 end = next_start;
2228 }
2229 ch->end = (end == INT64_MAX) ? ch->start : end;
2230 }
2231 }
2232
2233 static int get_std_framerate(int i){
2234 if(i<60*12) return i*1001;
2235 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2236 }
2237
2238 /*
2239 * Is the time base unreliable.
2240 * This is a heuristic to balance between quick acceptance of the values in
2241 * the headers vs. some extra checks.
2242 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2243 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2244 * And there are "variable" fps files this needs to detect as well.
2245 */
2246 static int tb_unreliable(AVCodecContext *c){
2247 if( c->time_base.den >= 101L*c->time_base.num
2248 || c->time_base.den < 5L*c->time_base.num
2249 /* || c->codec_tag == AV_RL32("DIVX")
2250 || c->codec_tag == AV_RL32("XVID")*/
2251 || c->codec_id == CODEC_ID_MPEG2VIDEO
2252 || c->codec_id == CODEC_ID_H264
2253 )
2254 return 1;
2255 return 0;
2256 }
2257
2258 #if FF_API_FORMAT_PARAMETERS
2259 int av_find_stream_info(AVFormatContext *ic)
2260 {
2261 return avformat_find_stream_info(ic, NULL);
2262 }
2263 #endif
2264
2265 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2266 {
2267 int i, count, ret, read_size, j;
2268 AVStream *st;
2269 AVPacket pkt1, *pkt;
2270 int64_t old_offset = avio_tell(ic->pb);
2271 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2272
2273 for(i=0;i<ic->nb_streams;i++) {
2274 AVCodec *codec;
2275 st = ic->streams[i];
2276
2277 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2278 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2279 /* if(!st->time_base.num)
2280 st->time_base= */
2281 if(!st->codec->time_base.num)
2282 st->codec->time_base= st->time_base;
2283 }
2284 //only for the split stuff
2285 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2286 st->parser = av_parser_init(st->codec->codec_id);
2287 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2288 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2289 }
2290 }
2291 assert(!st->codec->codec);
2292 codec = avcodec_find_decoder(st->codec->codec_id);
2293
2294 /* Ensure that subtitle_header is properly set. */
2295 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2296 && codec && !st->codec->codec)
2297 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2298
2299 //try to just open decoders, in case this is enough to get parameters
2300 if(!has_codec_parameters(st->codec)){
2301 if (codec && !st->codec->codec)
2302 avcodec_open2(st->codec, codec, options ? &options[i] : NULL);
2303 }
2304 }
2305
2306 for (i=0; i<ic->nb_streams; i++) {
2307 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2308 }
2309
2310 count = 0;
2311 read_size = 0;
2312 for(;;) {
2313 if (ff_check_interrupt(&ic->interrupt_callback)){
2314 ret= AVERROR_EXIT;
2315 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2316 break;
2317 }
2318
2319 /* check if one codec still needs to be handled */
2320 for(i=0;i<ic->nb_streams;i++) {
2321 int fps_analyze_framecount = 20;
2322
2323 st = ic->streams[i];
2324 if (!has_codec_parameters(st->codec))
2325 break;
2326 /* if the timebase is coarse (like the usual millisecond precision
2327 of mkv), we need to analyze more frames to reliably arrive at
2328 the correct fps */
2329 if (av_q2d(st->time_base) > 0.0005)
2330 fps_analyze_framecount *= 2;
2331 if (ic->fps_probe_size >= 0)
2332 fps_analyze_framecount = ic->fps_probe_size;
2333 /* variable fps and no guess at the real fps */
2334 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2335 && st->info->duration_count < fps_analyze_framecount
2336 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2337 break;
2338 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2339 break;
2340 if(st->first_dts == AV_NOPTS_VALUE)
2341 break;
2342 }
2343 if (i == ic->nb_streams) {
2344 /* NOTE: if the format has no header, then we need to read
2345 some packets to get most of the streams, so we cannot
2346 stop here */
2347 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2348 /* if we found the info for all the codecs, we can stop */
2349 ret = count;
2350 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2351 break;
2352 }
2353 }
2354 /* we did not get all the codec info, but we read too much data */
2355 if (read_size >= ic->probesize) {
2356 ret = count;
2357 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2358 break;
2359 }
2360
2361 /* NOTE: a new stream can be added there if no header in file
2362 (AVFMTCTX_NOHEADER) */
2363 ret = read_frame_internal(ic, &pkt1);
2364 if (ret == AVERROR(EAGAIN))
2365 continue;
2366
2367 if (ret < 0) {
2368 /* EOF or error */
2369 ret = -1; /* we could not have all the codec parameters before EOF */
2370 for(i=0;i<ic->nb_streams;i++) {
2371 st = ic->streams[i];
2372 if (!has_codec_parameters(st->codec)){
2373 char buf[256];
2374 avcodec_string(buf, sizeof(buf), st->codec, 0);
2375 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2376 } else {
2377 ret = 0;
2378 }
2379 }
2380 break;
2381 }
2382
2383 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2384 if ((ret = av_dup_packet(pkt)) < 0)
2385 goto find_stream_info_err;
2386
2387 read_size += pkt->size;
2388
2389 st = ic->streams[pkt->stream_index];
2390 if (st->codec_info_nb_frames>1) {
2391 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2392 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2393 break;
2394 }
2395 st->info->codec_info_duration += pkt->duration;
2396 }
2397 {
2398 int64_t last = st->info->last_dts;
2399
2400 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2401 int64_t duration= pkt->dts - last;
2402 double dur= duration * av_q2d(st->time_base);
2403
2404 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2405 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2406 if (st->info->duration_count < 2)
2407 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2408 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2409 int framerate= get_std_framerate(i);
2410 int ticks= lrintf(dur*framerate/(1001*12));
2411 double error = dur - (double)ticks*1001*12 / framerate;
2412 st->info->duration_error[i] += error*error;
2413 }
2414 st->info->duration_count++;
2415 // ignore the first 4 values, they might have some random jitter
2416 if (st->info->duration_count > 3)
2417 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2418 }
2419 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2420 st->info->last_dts = pkt->dts;
2421 }
2422 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2423 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2424 if(i){
2425 st->codec->extradata_size= i;
2426 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2427 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2428 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2429 }
2430 }
2431
2432 /* if still no information, we try to open the codec and to
2433 decompress the frame. We try to avoid that in most cases as
2434 it takes longer and uses more memory. For MPEG-4, we need to
2435 decompress for QuickTime.
2436
2437 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2438 least one frame of codec data, this makes sure the codec initializes
2439 the channel configuration and does not only trust the values from the container.
2440 */
2441 try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL);
2442
2443 st->codec_info_nb_frames++;
2444 count++;
2445 }
2446
2447 // close codecs which were opened in try_decode_frame()
2448 for(i=0;i<ic->nb_streams;i++) {
2449 st = ic->streams[i];
2450 if(st->codec->codec)
2451 avcodec_close(st->codec);
2452 }
2453 for(i=0;i<ic->nb_streams;i++) {
2454 st = ic->streams[i];
2455 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2456 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2457 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2458 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2459 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2460 // the check for tb_unreliable() is not completely correct, since this is not about handling
2461 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2462 // ipmovie.c produces.
2463 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2464 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2465 if (st->info->duration_count && !st->r_frame_rate.num
2466 && tb_unreliable(st->codec) /*&&
2467 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2468 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2469 int num = 0;
2470 double best_error= 2*av_q2d(st->time_base);
2471 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2472
2473 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2474 double error = st->info->duration_error[j] * get_std_framerate(j);
2475 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2476 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2477 if(error < best_error){
2478 best_error= error;
2479 num = get_std_framerate(j);
2480 }
2481 }
2482 // do not increase frame rate by more than 1 % in order to match a standard rate.
2483 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2484 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2485 }
2486
2487 if (!st->r_frame_rate.num){
2488 if( st->codec->time_base.den * (int64_t)st->time_base.num
2489 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2490 st->r_frame_rate.num = st->codec->time_base.den;
2491 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2492 }else{
2493 st->r_frame_rate.num = st->time_base.den;
2494 st->r_frame_rate.den = st->time_base.num;
2495 }
2496 }
2497 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2498 if(!st->codec->bits_per_coded_sample)
2499 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2500 // set stream disposition based on audio service type
2501 switch (st->codec->audio_service_type) {
2502 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2503 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2504 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2505 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2506 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2507 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2508 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2509 st->disposition = AV_DISPOSITION_COMMENT; break;
2510 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2511 st->disposition = AV_DISPOSITION_KARAOKE; break;
2512 }
2513 }
2514 }
2515
2516 estimate_timings(ic, old_offset);
2517
2518 compute_chapters_end(ic);
2519
2520 #if 0
2521 /* correct DTS for B-frame streams with no timestamps */
2522 for(i=0;i<ic->nb_streams;i++) {
2523 st = ic->streams[i];
2524 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2525 if(b-frames){
2526 ppktl = &ic->packet_buffer;
2527 while(ppkt1){
2528 if(ppkt1->stream_index != i)
2529 continue;
2530 if(ppkt1->pkt->dts < 0)
2531 break;
2532 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2533 break;
2534 ppkt1->pkt->dts -= delta;
2535 ppkt1= ppkt1->next;
2536 }
2537 if(ppkt1)
2538 continue;
2539 st->cur_dts -= delta;
2540 }
2541 }
2542 }
2543 #endif
2544
2545 find_stream_info_err:
2546 for (i=0; i < ic->nb_streams; i++)
2547 av_freep(&ic->streams[i]->info);
2548 return ret;
2549 }
2550
2551 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2552 {
2553 int i, j;
2554
2555 for (i = 0; i < ic->nb_programs; i++)
2556 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2557 if (ic->programs[i]->stream_index[j] == s)
2558 return ic->programs[i];
2559 return NULL;
2560 }
2561
2562 int av_find_best_stream(AVFormatContext *ic,
2563 enum AVMediaType type,
2564 int wanted_stream_nb,
2565 int related_stream,
2566 AVCodec **decoder_ret,
2567 int flags)
2568 {
2569 int i, nb_streams = ic->nb_streams;
2570 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2571 unsigned *program = NULL;
2572 AVCodec *decoder = NULL, *best_decoder = NULL;
2573
2574 if (related_stream >= 0 && wanted_stream_nb < 0) {
2575 AVProgram *p = find_program_from_stream(ic, related_stream);
2576 if (p) {
2577 program = p->stream_index;
2578 nb_streams = p->nb_stream_indexes;
2579 }
2580 }
2581 for (i = 0; i < nb_streams; i++) {
2582 int real_stream_index = program ? program[i] : i;
2583 AVStream *st = ic->streams[real_stream_index];
2584 AVCodecContext *avctx = st->codec;
2585 if (avctx->codec_type != type)
2586 continue;
2587 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2588 continue;
2589 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2590 continue;
2591 if (decoder_ret) {
2592 decoder = avcodec_find_decoder(st->codec->codec_id);
2593 if (!decoder) {
2594 if (ret < 0)
2595 ret = AVERROR_DECODER_NOT_FOUND;
2596 continue;
2597 }
2598 }
2599 if (best_count >= st->codec_info_nb_frames)
2600 continue;
2601 best_count = st->codec_info_nb_frames;
2602 ret = real_stream_index;
2603 best_decoder = decoder;
2604 if (program && i == nb_streams - 1 && ret < 0) {
2605 program = NULL;
2606 nb_streams = ic->nb_streams;
2607 i = 0; /* no related stream found, try again with everything */
2608 }
2609 }
2610 if (decoder_ret)
2611 *decoder_ret = best_decoder;
2612 return ret;
2613 }
2614
2615 /*******************************************************/
2616
2617 int av_read_play(AVFormatContext *s)
2618 {
2619 if (s->iformat->read_play)
2620 return s->iformat->read_play(s);
2621 if (s->pb)
2622 return avio_pause(s->pb, 0);
2623 return AVERROR(ENOSYS);
2624 }
2625
2626 int av_read_pause(AVFormatContext *s)
2627 {
2628 if (s->iformat->read_pause)
2629 return s->iformat->read_pause(s);
2630 if (s->pb)
2631 return avio_pause(s->pb, 1);
2632 return AVERROR(ENOSYS);
2633 }
2634
2635 void av_close_input_stream(AVFormatContext *s)
2636 {
2637 flush_packet_queue(s);
2638 if (s->iformat->read_close)
2639 s->iformat->read_close(s);
2640 avformat_free_context(s);
2641 }
2642
2643 void avformat_free_context(AVFormatContext *s)
2644 {
2645 int i;
2646 AVStream *st;
2647
2648 av_opt_free(s);
2649 if (s->iformat && s->iformat->priv_class && s->priv_data)
2650 av_opt_free(s->priv_data);
2651
2652 for(i=0;i<s->nb_streams;i++) {
2653 /* free all data in a stream component */
2654 st = s->streams[i];
2655 if (st->parser) {
2656 av_parser_close(st->parser);
2657 av_free_packet(&st->cur_pkt);
2658 }
2659 av_dict_free(&st->metadata);
2660 av_free(st->index_entries);
2661 av_free(st->codec->extradata);
2662 av_free(st->codec->subtitle_header);
2663 av_free(st->codec);
2664 av_free(st->priv_data);
2665 av_free(st->info);
2666 av_free(st);
2667 }
2668 for(i=s->nb_programs-1; i>=0; i--) {
2669 av_dict_free(&s->programs[i]->metadata);
2670 av_freep(&s->programs[i]->stream_index);
2671 av_freep(&s->programs[i]);
2672 }
2673 av_freep(&s->programs);
2674 av_freep(&s->priv_data);
2675 while(s->nb_chapters--) {
2676 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2677 av_free(s->chapters[s->nb_chapters]);
2678 }
2679 av_freep(&s->chapters);
2680 av_dict_free(&s->metadata);
2681 av_freep(&s->streams);
2682 av_free(s);
2683 }
2684
2685 void av_close_input_file(AVFormatContext *s)
2686 {
2687 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2688 NULL : s->pb;
2689 av_close_input_stream(s);
2690 if (pb)
2691 avio_close(pb);
2692 }
2693
2694 #if FF_API_NEW_STREAM
2695 AVStream *av_new_stream(AVFormatContext *s, int id)
2696 {
2697 AVStream *st = avformat_new_stream(s, NULL);
2698 if (st)
2699 st->id = id;
2700 return st;
2701 }
2702 #endif
2703
2704 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2705 {
2706 AVStream *st;
2707 int i;
2708 AVStream **streams;
2709
2710 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2711 return NULL;
2712 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2713 if (!streams)
2714 return NULL;
2715 s->streams = streams;
2716
2717 st = av_mallocz(sizeof(AVStream));
2718 if (!st)
2719 return NULL;
2720 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2721 av_free(st);
2722 return NULL;
2723 }
2724
2725 st->codec = avcodec_alloc_context3(c);
2726 if (s->iformat) {
2727 /* no default bitrate if decoding */
2728 st->codec->bit_rate = 0;
2729 }
2730 st->index = s->nb_streams;
2731 st->start_time = AV_NOPTS_VALUE;
2732 st->duration = AV_NOPTS_VALUE;
2733 /* we set the current DTS to 0 so that formats without any timestamps
2734 but durations get some timestamps, formats with some unknown
2735 timestamps have their first few packets buffered and the
2736 timestamps corrected before they are returned to the user */
2737 st->cur_dts = 0;
2738 st->first_dts = AV_NOPTS_VALUE;
2739 st->probe_packets = MAX_PROBE_PACKETS;
2740
2741 /* default pts setting is MPEG-like */
2742 avpriv_set_pts_info(st, 33, 1, 90000);
2743 st->last_IP_pts = AV_NOPTS_VALUE;
2744 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2745 st->pts_buffer[i]= AV_NOPTS_VALUE;
2746 st->reference_dts = AV_NOPTS_VALUE;
2747
2748 st->sample_aspect_ratio = (AVRational){0,1};
2749
2750 s->streams[s->nb_streams++] = st;
2751 return st;
2752 }
2753
2754 AVProgram *av_new_program(AVFormatContext *ac, int id)
2755 {
2756 AVProgram *program=NULL;
2757 int i;
2758
2759 av_dlog(ac, "new_program: id=0x%04x\n", id);
2760
2761 for(i=0; i<ac->nb_programs; i++)
2762 if(ac->programs[i]->id == id)
2763 program = ac->programs[i];
2764
2765 if(!program){
2766 program = av_mallocz(sizeof(AVProgram));
2767 if (!program)
2768 return NULL;
2769 dynarray_add(&ac->programs, &ac->nb_programs, program);
2770 program->discard = AVDISCARD_NONE;
2771 }
2772 program->id = id;
2773
2774 return program;
2775 }
2776
2777 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2778 {
2779 AVChapter *chapter = NULL;
2780 int i;
2781
2782 for(i=0; i<s->nb_chapters; i++)
2783 if(s->chapters[i]->id == id)
2784 chapter = s->chapters[i];
2785
2786 if(!chapter){
2787 chapter= av_mallocz(sizeof(AVChapter));
2788 if(!chapter)
2789 return NULL;
2790 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2791 }
2792 av_dict_set(&chapter->metadata, "title", title, 0);
2793 chapter->id = id;
2794 chapter->time_base= time_base;
2795 chapter->start = start;
2796 chapter->end = end;
2797
2798 return chapter;
2799 }
2800
2801 /************************************************************/
2802 /* output media file */
2803
2804 #if FF_API_FORMAT_PARAMETERS
2805 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2806 {
2807 int ret;
2808
2809 if (s->oformat->priv_data_size > 0) {
2810 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2811 if (!s->priv_data)
2812 return AVERROR(ENOMEM);
2813 if (s->oformat->priv_class) {
2814 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2815 av_opt_set_defaults(s->priv_data);
2816 }
2817 } else
2818 s->priv_data = NULL;
2819
2820 if (s->oformat->set_parameters) {
2821 ret = s->oformat->set_parameters(s, ap);
2822 if (ret < 0)
2823 return ret;
2824 }
2825 return 0;
2826 }
2827 #endif
2828
2829 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2830 {
2831 const AVCodecTag *avctag;
2832 int n;
2833 enum CodecID id = CODEC_ID_NONE;
2834 unsigned int tag = 0;
2835
2836 /**
2837 * Check that tag + id is in the table
2838 * If neither is in the table -> OK
2839 * If tag is in the table with another id -> FAIL
2840 * If id is in the table with another tag -> FAIL unless strict < normal
2841 */
2842 for (n = 0; s->oformat->codec_tag[n]; n++) {
2843 avctag = s->oformat->codec_tag[n];
2844 while (avctag->id != CODEC_ID_NONE) {
2845 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
2846 id = avctag->id;
2847 if (id == st->codec->codec_id)
2848 return 1;
2849 }
2850 if (avctag->id == st->codec->codec_id)
2851 tag = avctag->tag;
2852 avctag++;
2853 }
2854 }
2855 if (id != CODEC_ID_NONE)
2856 return 0;
2857 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2858 return 0;
2859 return 1;
2860 }
2861
2862 #if FF_API_FORMAT_PARAMETERS
2863 int av_write_header(AVFormatContext *s)
2864 {
2865 return avformat_write_header(s, NULL);
2866 }
2867 #endif
2868
2869 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
2870 {
2871 int ret = 0, i;
2872 AVStream *st;
2873 AVDictionary *tmp = NULL;
2874
2875 if (options)
2876 av_dict_copy(&tmp, *options, 0);
2877 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
2878 goto fail;
2879
2880 // some sanity checks
2881 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2882 av_log(s, AV_LOG_ERROR, "no streams\n");
2883 ret = AVERROR(EINVAL);
2884 goto fail;
2885 }
2886
2887 for(i=0;i<s->nb_streams;i++) {
2888 st = s->streams[i];
2889
2890 switch (st->codec->codec_type) {
2891 case AVMEDIA_TYPE_AUDIO:
2892 if(st->codec->sample_rate<=0){
2893 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2894 ret = AVERROR(EINVAL);
2895 goto fail;
2896 }
2897 if(!st->codec->block_align)
2898 st->codec->block_align = st->codec->channels *
2899 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2900 break;
2901 case AVMEDIA_TYPE_VIDEO:
2902 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2903 av_log(s, AV_LOG_ERROR, "time base not set\n");
2904 ret = AVERROR(EINVAL);
2905 goto fail;
2906 }
2907 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2908 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2909 ret = AVERROR(EINVAL);
2910 goto fail;
2911 }
2912 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2913 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2914 ret = AVERROR(EINVAL);
2915 goto fail;
2916 }
2917 break;
2918 }
2919
2920 if(s->oformat->codec_tag){
2921 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2922 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2923 st->codec->codec_tag= 0;
2924 }
2925 if(st->codec->codec_tag){
2926 if (!validate_codec_tag(s, st)) {
2927 char tagbuf[32];
2928 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2929 av_log(s, AV_LOG_ERROR,
2930 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2931 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2932 ret = AVERROR_INVALIDDATA;
2933 goto fail;
2934 }
2935 }else
2936 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2937 }
2938
2939 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2940 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2941 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2942 }
2943
2944 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2945 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2946 if (!s->priv_data) {
2947 ret = AVERROR(ENOMEM);
2948 goto fail;
2949 }
2950 if (s->oformat->priv_class) {
2951 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2952 av_opt_set_defaults(s->priv_data);
2953 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
2954 goto fail;
2955 }
2956 }
2957
2958 /* set muxer identification string */
2959 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2960 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2961 }
2962
2963 if(s->oformat->write_header){
2964 ret = s->oformat->write_header(s);
2965 if (ret < 0)
2966 goto fail;
2967 }
2968
2969 /* init PTS generation */
2970 for(i=0;i<s->nb_streams;i++) {
2971 int64_t den = AV_NOPTS_VALUE;
2972 st = s->streams[i];
2973
2974 switch (st->codec->codec_type) {
2975 case AVMEDIA_TYPE_AUDIO:
2976 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2977 break;
2978 case AVMEDIA_TYPE_VIDEO:
2979 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2980 break;
2981 default:
2982 break;
2983 }
2984 if (den != AV_NOPTS_VALUE) {
2985 if (den <= 0) {
2986 ret = AVERROR_INVALIDDATA;
2987 goto fail;
2988 }
2989 frac_init(&st->pts, 0, 0, den);
2990 }
2991 }
2992
2993 if (options) {
2994 av_dict_free(options);
2995 *options = tmp;
2996 }
2997 return 0;
2998 fail:
2999 av_dict_free(&tmp);
3000 return ret;
3001 }
3002
3003 //FIXME merge with compute_pkt_fields
3004 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
3005 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
3006 int num, den, frame_size, i;
3007
3008 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
3009 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
3010
3011 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
3012 return AVERROR(EINVAL);*/
3013
3014 /* duration field */
3015 if (pkt->duration == 0) {
3016 compute_frame_duration(&num, &den, st, NULL, pkt);
3017 if (den && num) {
3018 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
3019 }
3020 }
3021
3022 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
3023 pkt->pts= pkt->dts;
3024
3025 //XXX/FIXME this is a temporary hack until all encoders output pts
3026 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
3027 pkt->dts=
3028 // pkt->pts= st->cur_dts;
3029 pkt->pts= st->pts.val;
3030 }
3031
3032 //calculate dts from pts
3033 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
3034 st->pts_buffer[0]= pkt->pts;
3035 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
3036 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
3037 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
3038 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
3039
3040 pkt->dts= st->pts_buffer[0];
3041 }
3042
3043 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
3044 av_log(s, AV_LOG_ERROR,
3045 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
3046 st->index, st->cur_dts, pkt->dts);
3047 return AVERROR(EINVAL);
3048 }
3049 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
3050 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
3051 return AVERROR(EINVAL);
3052 }
3053
3054 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
3055 st->cur_dts= pkt->dts;
3056 st->pts.val= pkt->dts;
3057
3058 /* update pts */
3059 switch (st->codec->codec_type) {
3060 case AVMEDIA_TYPE_AUDIO:
3061 frame_size = get_audio_frame_size(st->codec, pkt->size);
3062
3063 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3064 likely equal to the encoder delay, but it would be better if we
3065 had the real timestamps from the encoder */
3066 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3067 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3068 }
3069 break;
3070 case AVMEDIA_TYPE_VIDEO:
3071 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3072 break;
3073 default:
3074 break;
3075 }
3076 return 0;
3077 }
3078
3079 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3080 {
3081 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3082
3083 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3084 return ret;
3085
3086 ret= s->oformat->write_packet(s, pkt);
3087
3088 if (ret >= 0)
3089 s->streams[pkt->stream_index]->nb_frames++;
3090 return ret;
3091 }
3092
3093 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3094 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3095 {
3096 AVPacketList **next_point, *this_pktl;
3097
3098 this_pktl = av_mallocz(sizeof(AVPacketList));
3099 this_pktl->pkt= *pkt;
3100 pkt->destruct= NULL; // do not free original but only the copy
3101 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3102
3103 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3104 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3105 }else
3106 next_point = &s->packet_buffer;
3107
3108 if(*next_point){
3109 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3110 while(!compare(s, &(*next_point)->pkt, pkt)){
3111 next_point= &(*next_point)->next;
3112 }
3113 goto next_non_null;
3114 }else{
3115 next_point = &(s->packet_buffer_end->next);
3116 }
3117 }
3118 assert(!*next_point);
3119
3120 s->packet_buffer_end= this_pktl;
3121 next_non_null:
3122
3123 this_pktl->next= *next_point;
3124
3125 s->streams[pkt->stream_index]->last_in_packet_buffer=
3126 *next_point= this_pktl;
3127 }
3128
3129 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3130 {
3131 AVStream *st = s->streams[ pkt ->stream_index];
3132 AVStream *st2= s->streams[ next->stream_index];
3133 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3134 st->time_base);
3135
3136 if (comp == 0)
3137 return pkt->stream_index < next->stream_index;
3138 return comp > 0;
3139 }
3140
3141 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3142 AVPacketList *pktl;
3143 int stream_count=0;
3144 int i;
3145
3146 if(pkt){
3147 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3148 }
3149
3150 for(i=0; i < s->nb_streams; i++)
3151 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3152
3153 if(stream_count && (s->nb_streams == stream_count || flush)){
3154 pktl= s->packet_buffer;
3155 *out= pktl->pkt;
3156
3157 s->packet_buffer= pktl->next;
3158 if(!s->packet_buffer)
3159 s->packet_buffer_end= NULL;
3160
3161 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3162 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3163 av_freep(&pktl);
3164 return 1;
3165 }else{
3166 av_init_packet(out);
3167 return 0;
3168 }
3169 }
3170
3171 /**
3172 * Interleave an AVPacket correctly so it can be muxed.
3173 * @param out the interleaved packet will be output here
3174 * @param in the input packet
3175 * @param flush 1 if no further packets are available as input and all
3176 * remaining packets should be output
3177 * @return 1 if a packet was output, 0 if no packet could be output,
3178 * < 0 if an error occurred
3179 */
3180 static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3181 if(s->oformat->interleave_packet)
3182 return s->oformat->interleave_packet(s, out, in, flush);
3183 else
3184 return av_interleave_packet_per_dts(s, out, in, flush);
3185 }
3186
3187 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3188 AVStream *st= s->streams[ pkt->stream_index];
3189 int ret;
3190
3191 //FIXME/XXX/HACK drop zero sized packets
3192 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3193 return 0;
3194
3195 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3196 pkt->size, pkt->dts, pkt->pts);
3197 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3198 return ret;
3199
3200 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3201 return AVERROR(EINVAL);
3202
3203 for(;;){
3204 AVPacket opkt;
3205 int ret= interleave_packet(s, &opkt, pkt, 0);
3206 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3207 return ret;
3208
3209 ret= s->oformat->write_packet(s, &opkt);
3210 if (ret >= 0)
3211 s->streams[opkt.stream_index]->nb_frames++;
3212
3213 av_free_packet(&opkt);
3214 pkt= NULL;
3215
3216 if(ret<0)
3217 return ret;
3218 }
3219 }
3220
3221 int av_write_trailer(AVFormatContext *s)
3222 {
3223 int ret, i;
3224
3225 for(;;){
3226 AVPacket pkt;
3227 ret= interleave_packet(s, &pkt, NULL, 1);
3228 if(ret<0) //FIXME cleanup needed for ret<0 ?
3229 goto fail;
3230 if(!ret)
3231 break;
3232
3233 ret= s->oformat->write_packet(s, &pkt);
3234 if (ret >= 0)
3235 s->streams[pkt.stream_index]->nb_frames++;
3236
3237 av_free_packet(&pkt);
3238
3239 if(ret<0)
3240 goto fail;
3241 }
3242
3243 if(s->oformat->write_trailer)
3244 ret = s->oformat->write_trailer(s);
3245 fail:
3246 for(i=0;i<s->nb_streams;i++) {
3247 av_freep(&s->streams[i]->priv_data);
3248 av_freep(&s->streams[i]->index_entries);
3249 }
3250 if (s->iformat && s->iformat->priv_class)
3251 av_opt_free(s->priv_data);
3252 av_freep(&s->priv_data);
3253 return ret;
3254 }
3255
3256 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3257 {
3258 int i, j;
3259 AVProgram *program=NULL;
3260 void *tmp;
3261
3262 if (idx >= ac->nb_streams) {
3263 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3264 return;
3265 }
3266
3267 for(i=0; i<ac->nb_programs; i++){
3268 if(ac->programs[i]->id != progid)
3269 continue;
3270 program = ac->programs[i];
3271 for(j=0; j<program->nb_stream_indexes; j++)
3272 if(program->stream_index[j] == idx)
3273 return;
3274
3275 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3276 if(!tmp)
3277 return;
3278 program->stream_index = tmp;
3279 program->stream_index[program->nb_stream_indexes++] = idx;
3280 return;
3281 }
3282 }
3283
3284 static void print_fps(double d, const char *postfix){
3285 uint64_t v= lrintf(d*100);
3286 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3287 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3288 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3289 }
3290
3291 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3292 {
3293 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3294 AVDictionaryEntry *tag=NULL;
3295
3296 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3297 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3298 if(strcmp("language", tag->key))
3299 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3300 }
3301 }
3302 }
3303
3304 /* "user interface" functions */
3305 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3306 {
3307 char buf[256];
3308 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3309 AVStream *st = ic->streams[i];
3310 int g = av_gcd(st->time_base.num, st->time_base.den);
3311 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3312 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3313 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3314 /* the pid is an important information, so we display it */
3315 /* XXX: add a generic system */
3316 if (flags & AVFMT_SHOW_IDS)
3317 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3318 if (lang)
3319 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3320 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3321 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3322 if (st->sample_aspect_ratio.num && // default
3323 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3324 AVRational display_aspect_ratio;
3325 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3326 st->codec->width*st->sample_aspect_ratio.num,
3327 st->codec->height*st->sample_aspect_ratio.den,
3328 1024*1024);
3329 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3330 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3331 display_aspect_ratio.num, display_aspect_ratio.den);
3332 }
3333 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3334 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3335 print_fps(av_q2d(st->avg_frame_rate), "fps");
3336 if(st->r_frame_rate.den && st->r_frame_rate.num)
3337 print_fps(av_q2d(st->r_frame_rate), "tbr");
3338 if(st->time_base.den && st->time_base.num)
3339 print_fps(1/av_q2d(st->time_base), "tbn");
3340 if(st->codec->time_base.den && st->codec->time_base.num)
3341 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3342 }
3343 if (st->disposition & AV_DISPOSITION_DEFAULT)
3344 av_log(NULL, AV_LOG_INFO, " (default)");
3345 if (st->disposition & AV_DISPOSITION_DUB)
3346 av_log(NULL, AV_LOG_INFO, " (dub)");
3347 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3348 av_log(NULL, AV_LOG_INFO, " (original)");
3349 if (st->disposition & AV_DISPOSITION_COMMENT)
3350 av_log(NULL, AV_LOG_INFO, " (comment)");
3351 if (st->disposition & AV_DISPOSITION_LYRICS)
3352 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3353 if (st->disposition & AV_DISPOSITION_KARAOKE)
3354 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3355 if (st->disposition & AV_DISPOSITION_FORCED)
3356 av_log(NULL, AV_LOG_INFO, " (forced)");
3357 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3358 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3359 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3360 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3361 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3362 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3363 av_log(NULL, AV_LOG_INFO, "\n");
3364 dump_metadata(NULL, st->metadata, " ");
3365 }
3366
3367 #if FF_API_DUMP_FORMAT
3368 void dump_format(AVFormatContext *ic,
3369 int index,
3370 const char *url,
3371 int is_output)
3372 {
3373 av_dump_format(ic, index, url, is_output);
3374 }
3375 #endif
3376
3377 void av_dump_format(AVFormatContext *ic,
3378 int index,
3379 const char *url,
3380 int is_output)
3381 {
3382 int i;
3383 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3384 if (ic->nb_streams && !printed)
3385 return;
3386
3387 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3388 is_output ? "Output" : "Input",
3389 index,
3390 is_output ? ic->oformat->name : ic->iformat->name,
3391 is_output ? "to" : "from", url);
3392 dump_metadata(NULL, ic->metadata, " ");
3393 if (!is_output) {
3394 av_log(NULL, AV_LOG_INFO, " Duration: ");
3395 if (ic->duration != AV_NOPTS_VALUE) {
3396 int hours, mins, secs, us;
3397 secs = ic->duration / AV_TIME_BASE;
3398 us = ic->duration % AV_TIME_BASE;
3399 mins = secs / 60;
3400 secs %= 60;
3401 hours = mins / 60;
3402 mins %= 60;
3403 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3404 (100 * us) / AV_TIME_BASE);
3405 } else {
3406 av_log(NULL, AV_LOG_INFO, "N/A");
3407 }
3408 if (ic->start_time != AV_NOPTS_VALUE) {
3409 int secs, us;
3410 av_log(NULL, AV_LOG_INFO, ", start: ");
3411 secs = ic->start_time / AV_TIME_BASE;
3412 us = abs(ic->start_time % AV_TIME_BASE);
3413 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3414 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3415 }
3416 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3417 if (ic->bit_rate) {
3418 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3419 } else {
3420 av_log(NULL, AV_LOG_INFO, "N/A");
3421 }
3422 av_log(NULL, AV_LOG_INFO, "\n");
3423 }
3424 for (i = 0; i < ic->nb_chapters; i++) {
3425 AVChapter *ch = ic->chapters[i];
3426 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3427 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3428 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3429
3430 dump_metadata(NULL, ch->metadata, " ");
3431 }
3432 if(ic->nb_programs) {
3433 int j, k, total = 0;
3434 for(j=0; j<ic->nb_programs; j++) {
3435 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3436 "name", NULL, 0);
3437 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3438 name ? name->value : "");
3439 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3440 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3441 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3442 printed[ic->programs[j]->stream_index[k]] = 1;
3443 }
3444 total += ic->programs[j]->nb_stream_indexes;
3445 }
3446 if (total < ic->nb_streams)
3447 av_log(NULL, AV_LOG_INFO, " No Program\n");
3448 }
3449 for(i=0;i<ic->nb_streams;i++)
3450 if (!printed[i])
3451 dump_stream_format(ic, i, index, is_output);
3452
3453 av_free(printed);
3454 }
3455
3456 int64_t av_gettime(void)
3457 {
3458 struct timeval tv;
3459 gettimeofday(&tv,NULL);
3460 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3461 }
3462
3463 uint64_t ff_ntp_time(void)
3464 {
3465 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3466 }
3467
3468 #if FF_API_PARSE_DATE
3469 #include "libavutil/parseutils.h"
3470
3471 int64_t parse_date(const char *timestr, int duration)
3472 {
3473 int64_t timeval;
3474 av_parse_time(&timeval, timestr, duration);
3475 return timeval;
3476 }
3477 #endif
3478
3479 #if FF_API_FIND_INFO_TAG
3480 #include "libavutil/parseutils.h"
3481
3482 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3483 {
3484 return av_find_info_tag(arg, arg_size, tag1, info);
3485 }
3486 #endif
3487
3488 int av_get_frame_filename(char *buf, int buf_size,
3489 const char *path, int number)
3490 {
3491 const char *p;
3492 char *q, buf1[20], c;
3493 int nd, len, percentd_found;
3494
3495 q = buf;
3496 p = path;
3497 percentd_found = 0;
3498 for(;;) {
3499 c = *p++;
3500 if (c == '\0')
3501 break;
3502 if (c == '%') {
3503 do {
3504 nd = 0;
3505 while (isdigit(*p)) {
3506 nd = nd * 10 + *p++ - '0';
3507 }
3508 c = *p++;
3509 } while (isdigit(c));
3510
3511 switch(c) {
3512 case '%':
3513 goto addchar;
3514 case 'd':
3515 if (percentd_found)
3516 goto fail;
3517 percentd_found = 1;
3518 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3519 len = strlen(buf1);
3520 if ((q - buf + len) > buf_size - 1)
3521 goto fail;
3522 memcpy(q, buf1, len);
3523 q += len;
3524 break;
3525 default:
3526 goto fail;
3527 }
3528 } else {
3529 addchar:
3530 if ((q - buf) < buf_size - 1)
3531 *q++ = c;
3532 }
3533 }
3534 if (!percentd_found)
3535 goto fail;
3536 *q = '\0';
3537 return 0;
3538 fail:
3539 *q = '\0';
3540 return -1;
3541 }
3542
3543 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3544 {