lavf: add an AVStream field for exporting stream-global side data
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #undef NDEBUG
23 #include <assert.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26
27 #include "config.h"
28
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
38
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
41
42 #include "audiointerleave.h"
43 #include "avformat.h"
44 #include "avio_internal.h"
45 #include "id3v2.h"
46 #include "internal.h"
47 #include "metadata.h"
48 #if CONFIG_NETWORK
49 #include "network.h"
50 #endif
51 #include "riff.h"
52 #include "url.h"
53
54 /**
55 * @file
56 * various utility functions for use within Libav
57 */
58
59 unsigned avformat_version(void)
60 {
61 return LIBAVFORMAT_VERSION_INT;
62 }
63
64 const char *avformat_configuration(void)
65 {
66 return LIBAV_CONFIGURATION;
67 }
68
69 const char *avformat_license(void)
70 {
71 #define LICENSE_PREFIX "libavformat license: "
72 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
73 }
74
75 /* an arbitrarily chosen "sane" max packet size -- 50M */
76 #define SANE_CHUNK_SIZE (50000000)
77
78 /* Read the data in sane-sized chunks and append to pkt.
79 * Return the number of bytes read or an error. */
80 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 {
82 int64_t chunk_size = size;
83 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
84 int orig_size = pkt->size;
85 int ret = 0;
86
87 do {
88 int prev_size = pkt->size;
89 int read_size;
90
91 /* When the caller requests a lot of data, limit it to the amount
92 * left in file or SANE_CHUNK_SIZE when it is not known. */
93 if (size > SANE_CHUNK_SIZE) {
94 int64_t filesize = avio_size(s) - avio_tell(s);
95 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 }
97 read_size = FFMIN(size, chunk_size);
98
99 ret = av_grow_packet(pkt, read_size);
100 if (ret < 0)
101 break;
102
103 ret = avio_read(s, pkt->data + prev_size, read_size);
104 if (ret != read_size) {
105 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
106 break;
107 }
108
109 size -= read_size;
110 } while (size > 0);
111
112 pkt->pos = orig_pos;
113 if (!pkt->size)
114 av_free_packet(pkt);
115 return pkt->size > orig_size ? pkt->size - orig_size : ret;
116 }
117
118 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
119 {
120 av_init_packet(pkt);
121 pkt->data = NULL;
122 pkt->size = 0;
123 pkt->pos = avio_tell(s);
124
125 return append_packet_chunked(s, pkt, size);
126 }
127
128 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
129 {
130 if (!pkt->size)
131 return av_get_packet(s, pkt, size);
132 return append_packet_chunked(s, pkt, size);
133 }
134
135 int av_filename_number_test(const char *filename)
136 {
137 char buf[1024];
138 return filename &&
139 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
140 }
141
142 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened,
143 int *score_max)
144 {
145 AVProbeData lpd = *pd;
146 AVInputFormat *fmt1 = NULL, *fmt;
147 int score, id3 = 0;
148
149 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
150 int id3len = ff_id3v2_tag_len(lpd.buf);
151 if (lpd.buf_size > id3len + 16) {
152 lpd.buf += id3len;
153 lpd.buf_size -= id3len;
154 }
155 id3 = 1;
156 }
157
158 fmt = NULL;
159 while ((fmt1 = av_iformat_next(fmt1))) {
160 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
161 continue;
162 score = 0;
163 if (fmt1->read_probe) {
164 score = fmt1->read_probe(&lpd);
165 } else if (fmt1->extensions) {
166 if (av_match_ext(lpd.filename, fmt1->extensions))
167 score = AVPROBE_SCORE_EXTENSION;
168 }
169 if (score > *score_max) {
170 *score_max = score;
171 fmt = fmt1;
172 } else if (score == *score_max)
173 fmt = NULL;
174 }
175
176 // A hack for files with huge id3v2 tags -- try to guess by file extension.
177 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_EXTENSION / 2) {
178 while ((fmt = av_iformat_next(fmt)))
179 if (fmt->extensions &&
180 av_match_ext(lpd.filename, fmt->extensions)) {
181 *score_max = AVPROBE_SCORE_EXTENSION / 2;
182 break;
183 }
184 }
185
186 if (!fmt && id3 && *score_max < AVPROBE_SCORE_EXTENSION / 2 - 1) {
187 while ((fmt = av_iformat_next(fmt)))
188 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
189 *score_max = AVPROBE_SCORE_EXTENSION / 2 - 1;
190 break;
191 }
192 }
193
194 return fmt;
195 }
196
197 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
198 {
199 int score = 0;
200 return av_probe_input_format2(pd, is_opened, &score);
201 }
202
203 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
204 AVProbeData *pd, int score)
205 {
206 static const struct {
207 const char *name;
208 enum AVCodecID id;
209 enum AVMediaType type;
210 } fmt_id_type[] = {
211 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
212 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
213 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
214 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
215 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
216 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
217 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
218 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
219 { 0 }
220 };
221 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
222
223 if (fmt) {
224 int i;
225 av_log(s, AV_LOG_DEBUG,
226 "Probe with size=%d, packets=%d detected %s with score=%d\n",
227 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
228 fmt->name, score);
229 for (i = 0; fmt_id_type[i].name; i++) {
230 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
231 st->codec->codec_id = fmt_id_type[i].id;
232 st->codec->codec_type = fmt_id_type[i].type;
233 break;
234 }
235 }
236 }
237 return !!fmt;
238 }
239
240 /************************************************************/
241 /* input media file */
242
243 /** size of probe buffer, for guessing file type from file contents */
244 #define PROBE_BUF_MIN 2048
245 #define PROBE_BUF_MAX (1 << 20)
246
247 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
248 const char *filename, void *logctx,
249 unsigned int offset, unsigned int max_probe_size)
250 {
251 AVProbeData pd = { filename ? filename : "" };
252 uint8_t *buf = NULL;
253 int ret = 0, probe_size;
254
255 if (!max_probe_size)
256 max_probe_size = PROBE_BUF_MAX;
257 else if (max_probe_size > PROBE_BUF_MAX)
258 max_probe_size = PROBE_BUF_MAX;
259 else if (max_probe_size < PROBE_BUF_MIN)
260 return AVERROR(EINVAL);
261
262 if (offset >= max_probe_size)
263 return AVERROR(EINVAL);
264 avio_skip(pb, offset);
265 max_probe_size -= offset;
266
267 for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt;
268 probe_size = FFMIN(probe_size << 1,
269 FFMAX(max_probe_size, probe_size + 1))) {
270 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX / 4 : 0;
271
272 /* Read probe data. */
273 if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
274 return ret;
275 if ((ret = avio_read(pb, buf + pd.buf_size,
276 probe_size - pd.buf_size)) < 0) {
277 /* Fail if error was not end of file, otherwise, lower score. */
278 if (ret != AVERROR_EOF) {
279 av_free(buf);
280 return ret;
281 }
282 score = 0;
283 ret = 0; /* error was end of file, nothing read */
284 }
285 pd.buf_size += ret;
286 pd.buf = buf;
287
288 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
289
290 /* Guess file format. */
291 *fmt = av_probe_input_format2(&pd, 1, &score);
292 if (*fmt) {
293 /* This can only be true in the last iteration. */
294 if (score <= AVPROBE_SCORE_MAX / 4) {
295 av_log(logctx, AV_LOG_WARNING,
296 "Format detected only with low score of %d, "
297 "misdetection possible!\n", score);
298 } else
299 av_log(logctx, AV_LOG_DEBUG,
300 "Probed with size=%d and score=%d\n", probe_size, score);
301 }
302 }
303
304 if (!*fmt) {
305 av_free(buf);
306 return AVERROR_INVALIDDATA;
307 }
308
309 /* Rewind. Reuse probe buffer to avoid seeking. */
310 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
311 av_free(buf);
312
313 return ret;
314 }
315
316 /* Open input file and probe the format if necessary. */
317 static int init_input(AVFormatContext *s, const char *filename,
318 AVDictionary **options)
319 {
320 int ret;
321 AVProbeData pd = { filename, NULL, 0 };
322
323 if (s->pb) {
324 s->flags |= AVFMT_FLAG_CUSTOM_IO;
325 if (!s->iformat)
326 return av_probe_input_buffer(s->pb, &s->iformat, filename,
327 s, 0, s->probesize);
328 else if (s->iformat->flags & AVFMT_NOFILE)
329 return AVERROR(EINVAL);
330 return 0;
331 }
332
333 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
334 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
335 return 0;
336
337 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
338 &s->interrupt_callback, options)) < 0)
339 return ret;
340 if (s->iformat)
341 return 0;
342 return av_probe_input_buffer(s->pb, &s->iformat, filename,
343 s, 0, s->probesize);
344 }
345
346 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
347 AVPacketList **plast_pktl)
348 {
349 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
350 if (!pktl)
351 return NULL;
352
353 if (*packet_buffer)
354 (*plast_pktl)->next = pktl;
355 else
356 *packet_buffer = pktl;
357
358 /* Add the packet in the buffered packet list. */
359 *plast_pktl = pktl;
360 pktl->pkt = *pkt;
361 return &pktl->pkt;
362 }
363
364 static int queue_attached_pictures(AVFormatContext *s)
365 {
366 int i;
367 for (i = 0; i < s->nb_streams; i++)
368 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
369 s->streams[i]->discard < AVDISCARD_ALL) {
370 AVPacket copy = s->streams[i]->attached_pic;
371 copy.buf = av_buffer_ref(copy.buf);
372 if (!copy.buf)
373 return AVERROR(ENOMEM);
374
375 add_to_pktbuf(&s->raw_packet_buffer, &copy,
376 &s->raw_packet_buffer_end);
377 }
378 return 0;
379 }
380
381 int avformat_open_input(AVFormatContext **ps, const char *filename,
382 AVInputFormat *fmt, AVDictionary **options)
383 {
384 AVFormatContext *s = *ps;
385 int ret = 0;
386 AVDictionary *tmp = NULL;
387 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
388
389 if (!s && !(s = avformat_alloc_context()))
390 return AVERROR(ENOMEM);
391 if (fmt)
392 s->iformat = fmt;
393
394 if (options)
395 av_dict_copy(&tmp, *options, 0);
396
397 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
398 goto fail;
399
400 if ((ret = init_input(s, filename, &tmp)) < 0)
401 goto fail;
402
403 /* Check filename in case an image number is expected. */
404 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
405 if (!av_filename_number_test(filename)) {
406 ret = AVERROR(EINVAL);
407 goto fail;
408 }
409 }
410
411 s->duration = s->start_time = AV_NOPTS_VALUE;
412 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
413
414 /* Allocate private data. */
415 if (s->iformat->priv_data_size > 0) {
416 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
417 ret = AVERROR(ENOMEM);
418 goto fail;
419 }
420 if (s->iformat->priv_class) {
421 *(const AVClass **) s->priv_data = s->iformat->priv_class;
422 av_opt_set_defaults(s->priv_data);
423 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
424 goto fail;
425 }
426 }
427
428 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
429 if (s->pb)
430 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
431
432 if (s->iformat->read_header)
433 if ((ret = s->iformat->read_header(s)) < 0)
434 goto fail;
435
436 if (id3v2_extra_meta &&
437 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
438 goto fail;
439 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
440
441 if ((ret = queue_attached_pictures(s)) < 0)
442 goto fail;
443
444 if (s->pb && !s->data_offset)
445 s->data_offset = avio_tell(s->pb);
446
447 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
448
449 if (options) {
450 av_dict_free(options);
451 *options = tmp;
452 }
453 *ps = s;
454 return 0;
455
456 fail:
457 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
458 av_dict_free(&tmp);
459 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
460 avio_close(s->pb);
461 avformat_free_context(s);
462 *ps = NULL;
463 return ret;
464 }
465
466 /*******************************************************/
467
468 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
469 {
470 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
471 AVProbeData *pd = &st->probe_data;
472 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
473 --st->probe_packets;
474
475 if (pkt) {
476 int err;
477 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
478 AVPROBE_PADDING_SIZE)) < 0)
479 return err;
480 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
481 pd->buf_size += pkt->size;
482 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
483 } else {
484 st->probe_packets = 0;
485 if (!pd->buf_size) {
486 av_log(s, AV_LOG_ERROR,
487 "nothing to probe for stream %d\n", st->index);
488 return 0;
489 }
490 }
491
492 if (!st->probe_packets ||
493 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
494 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
495 ? AVPROBE_SCORE_MAX / 4 : 0);
496 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
497 pd->buf_size = 0;
498 av_freep(&pd->buf);
499 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
500 }
501 }
502 }
503 return 0;
504 }
505
506 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
507 {
508 int ret, i, err;
509 AVStream *st;
510
511 for (;;) {
512 AVPacketList *pktl = s->raw_packet_buffer;
513
514 if (pktl) {
515 *pkt = pktl->pkt;
516 st = s->streams[pkt->stream_index];
517 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
518 !st->probe_packets ||
519 s->raw_packet_buffer_remaining_size < pkt->size) {
520 AVProbeData *pd;
521 if (st->probe_packets)
522 if ((err = probe_codec(s, st, NULL)) < 0)
523 return err;
524 pd = &st->probe_data;
525 av_freep(&pd->buf);
526 pd->buf_size = 0;
527 s->raw_packet_buffer = pktl->next;
528 s->raw_packet_buffer_remaining_size += pkt->size;
529 av_free(pktl);
530 return 0;
531 }
532 }
533
534 pkt->data = NULL;
535 pkt->size = 0;
536 av_init_packet(pkt);
537 ret = s->iformat->read_packet(s, pkt);
538 if (ret < 0) {
539 if (!pktl || ret == AVERROR(EAGAIN))
540 return ret;
541 for (i = 0; i < s->nb_streams; i++) {
542 st = s->streams[i];
543 if (st->probe_packets)
544 if ((err = probe_codec(s, st, NULL)) < 0)
545 return err;
546 }
547 continue;
548 }
549
550 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
551 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
552 av_log(s, AV_LOG_WARNING,
553 "Dropped corrupted packet (stream = %d)\n",
554 pkt->stream_index);
555 av_free_packet(pkt);
556 continue;
557 }
558
559 st = s->streams[pkt->stream_index];
560
561 switch (st->codec->codec_type) {
562 case AVMEDIA_TYPE_VIDEO:
563 if (s->video_codec_id)
564 st->codec->codec_id = s->video_codec_id;
565 break;
566 case AVMEDIA_TYPE_AUDIO:
567 if (s->audio_codec_id)
568 st->codec->codec_id = s->audio_codec_id;
569 break;
570 case AVMEDIA_TYPE_SUBTITLE:
571 if (s->subtitle_codec_id)
572 st->codec->codec_id = s->subtitle_codec_id;
573 break;
574 }
575
576 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
577 !st->probe_packets))
578 return ret;
579
580 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
581 s->raw_packet_buffer_remaining_size -= pkt->size;
582
583 if ((err = probe_codec(s, st, pkt)) < 0)
584 return err;
585 }
586 }
587
588 /**********************************************************/
589
590 /**
591 * Get the number of samples of an audio frame. Return -1 on error.
592 */
593 int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
594 {
595 int frame_size;
596
597 /* give frame_size priority if demuxing */
598 if (!mux && enc->frame_size > 1)
599 return enc->frame_size;
600
601 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
602 return frame_size;
603
604 /* Fall back on using frame_size if muxing. */
605 if (enc->frame_size > 1)
606 return enc->frame_size;
607
608 return -1;
609 }
610
611 /**
612 * Return the frame duration in seconds. Return 0 if not available.
613 */
614 void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
615 AVCodecParserContext *pc, AVPacket *pkt)
616 {
617 int frame_size;
618
619 *pnum = 0;
620 *pden = 0;
621 switch (st->codec->codec_type) {
622 case AVMEDIA_TYPE_VIDEO:
623 if (st->avg_frame_rate.num) {
624 *pnum = st->avg_frame_rate.den;
625 *pden = st->avg_frame_rate.num;
626 } else if (st->time_base.num * 1000LL > st->time_base.den) {
627 *pnum = st->time_base.num;
628 *pden = st->time_base.den;
629 } else if (st->codec->time_base.num * 1000LL > st->codec->time_base.den) {
630 *pnum = st->codec->time_base.num;
631 *pden = st->codec->time_base.den;
632 if (pc && pc->repeat_pict) {
633 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
634 *pden /= 1 + pc->repeat_pict;
635 else
636 *pnum *= 1 + pc->repeat_pict;
637 }
638 /* If this codec can be interlaced or progressive then we need
639 * a parser to compute duration of a packet. Thus if we have
640 * no parser in such case leave duration undefined. */
641 if (st->codec->ticks_per_frame > 1 && !pc)
642 *pnum = *pden = 0;
643 }
644 break;
645 case AVMEDIA_TYPE_AUDIO:
646 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
647 if (frame_size <= 0 || st->codec->sample_rate <= 0)
648 break;
649 *pnum = frame_size;
650 *pden = st->codec->sample_rate;
651 break;
652 default:
653 break;
654 }
655 }
656
657 static int is_intra_only(enum AVCodecID id)
658 {
659 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
660 if (!d)
661 return 0;
662 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
663 return 0;
664 return 1;
665 }
666
667 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
668 int64_t dts, int64_t pts)
669 {
670 AVStream *st = s->streams[stream_index];
671 AVPacketList *pktl = s->packet_buffer;
672
673 if (st->first_dts != AV_NOPTS_VALUE ||
674 dts == AV_NOPTS_VALUE ||
675 st->cur_dts == AV_NOPTS_VALUE)
676 return;
677
678 st->first_dts = dts - st->cur_dts;
679 st->cur_dts = dts;
680
681 for (; pktl; pktl = pktl->next) {
682 if (pktl->pkt.stream_index != stream_index)
683 continue;
684 // FIXME: think more about this check
685 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
686 pktl->pkt.pts += st->first_dts;
687
688 if (pktl->pkt.dts != AV_NOPTS_VALUE)
689 pktl->pkt.dts += st->first_dts;
690
691 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
692 st->start_time = pktl->pkt.pts;
693 }
694 if (st->start_time == AV_NOPTS_VALUE)
695 st->start_time = pts;
696 }
697
698 static void update_initial_durations(AVFormatContext *s, AVStream *st,
699 int stream_index, int duration)
700 {
701 AVPacketList *pktl = s->packet_buffer;
702 int64_t cur_dts = 0;
703
704 if (st->first_dts != AV_NOPTS_VALUE) {
705 cur_dts = st->first_dts;
706 for (; pktl; pktl = pktl->next) {
707 if (pktl->pkt.stream_index == stream_index) {
708 if (pktl->pkt.pts != pktl->pkt.dts ||
709 pktl->pkt.dts != AV_NOPTS_VALUE ||
710 pktl->pkt.duration)
711 break;
712 cur_dts -= duration;
713 }
714 }
715 pktl = s->packet_buffer;
716 st->first_dts = cur_dts;
717 } else if (st->cur_dts)
718 return;
719
720 for (; pktl; pktl = pktl->next) {
721 if (pktl->pkt.stream_index != stream_index)
722 continue;
723 if (pktl->pkt.pts == pktl->pkt.dts &&
724 pktl->pkt.dts == AV_NOPTS_VALUE &&
725 !pktl->pkt.duration) {
726 pktl->pkt.dts = cur_dts;
727 if (!st->codec->has_b_frames)
728 pktl->pkt.pts = cur_dts;
729 cur_dts += duration;
730 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
731 pktl->pkt.duration = duration;
732 } else
733 break;
734 }
735 if (st->first_dts == AV_NOPTS_VALUE)
736 st->cur_dts = cur_dts;
737 }
738
739 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
740 AVCodecParserContext *pc, AVPacket *pkt)
741 {
742 int num, den, presentation_delayed, delay, i;
743 int64_t offset;
744
745 if (s->flags & AVFMT_FLAG_NOFILLIN)
746 return;
747
748 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
749 pkt->dts = AV_NOPTS_VALUE;
750
751 /* do we have a video B-frame ? */
752 delay = st->codec->has_b_frames;
753 presentation_delayed = 0;
754
755 /* XXX: need has_b_frame, but cannot get it if the codec is
756 * not initialized */
757 if (delay &&
758 pc && pc->pict_type != AV_PICTURE_TYPE_B)
759 presentation_delayed = 1;
760
761 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
762 st->pts_wrap_bits < 63 &&
763 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
764 pkt->dts -= 1LL << st->pts_wrap_bits;
765 }
766
767 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
768 * We take the conservative approach and discard both.
769 * Note: If this is misbehaving for an H.264 file, then possibly
770 * presentation_delayed is not set correctly. */
771 if (delay == 1 && pkt->dts == pkt->pts &&
772 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
773 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
774 pkt->dts = pkt->pts = AV_NOPTS_VALUE;
775 }
776
777 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
778 ff_compute_frame_duration(&num, &den, st, pc, pkt);
779 if (den && num) {
780 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
781 den * (int64_t) st->time_base.num,
782 AV_ROUND_DOWN);
783
784 if (pkt->duration != 0 && s->packet_buffer)
785 update_initial_durations(s, st, pkt->stream_index,
786 pkt->duration);
787 }
788 }
789
790 /* Correct timestamps with byte offset if demuxers only have timestamps
791 * on packet boundaries */
792 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
793 /* this will estimate bitrate based on this frame's duration and size */
794 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
795 if (pkt->pts != AV_NOPTS_VALUE)
796 pkt->pts += offset;
797 if (pkt->dts != AV_NOPTS_VALUE)
798 pkt->dts += offset;
799 }
800
801 /* This may be redundant, but it should not hurt. */
802 if (pkt->dts != AV_NOPTS_VALUE &&
803 pkt->pts != AV_NOPTS_VALUE &&
804 pkt->pts > pkt->dts)
805 presentation_delayed = 1;
806
807 av_dlog(NULL,
808 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
809 "cur_dts:%"PRId64" st:%d pc:%p\n",
810 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
811 pkt->stream_index, pc);
812 /* Interpolate PTS and DTS if they are not present. We skip H.264
813 * currently because delay and has_b_frames are not reliably set. */
814 if ((delay == 0 || (delay == 1 && pc)) &&
815 st->codec->codec_id != AV_CODEC_ID_H264) {
816 if (presentation_delayed) {
817 /* DTS = decompression timestamp */
818 /* PTS = presentation timestamp */
819 if (pkt->dts == AV_NOPTS_VALUE)
820 pkt->dts = st->last_IP_pts;
821 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
822 if (pkt->dts == AV_NOPTS_VALUE)
823 pkt->dts = st->cur_dts;
824
825 /* This is tricky: the dts must be incremented by the duration
826 * of the frame we are displaying, i.e. the last I- or P-frame. */
827 if (st->last_IP_duration == 0)
828 st->last_IP_duration = pkt->duration;
829 if (pkt->dts != AV_NOPTS_VALUE)
830 st->cur_dts = pkt->dts + st->last_IP_duration;
831 st->last_IP_duration = pkt->duration;
832 st->last_IP_pts = pkt->pts;
833 /* Cannot compute PTS if not present (we can compute it only
834 * by knowing the future. */
835 } else if (pkt->pts != AV_NOPTS_VALUE ||
836 pkt->dts != AV_NOPTS_VALUE ||
837 pkt->duration ||
838 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
839 int duration = pkt->duration;
840 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
841 ff_compute_frame_duration(&num, &den, st, pc, pkt);
842 if (den && num) {
843 duration = av_rescale_rnd(1,
844 num * (int64_t) st->time_base.den,
845 den * (int64_t) st->time_base.num,
846 AV_ROUND_DOWN);
847 if (duration != 0 && s->packet_buffer)
848 update_initial_durations(s, st, pkt->stream_index,
849 duration);
850 }
851 }
852
853 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
854 duration) {
855 /* presentation is not delayed : PTS and DTS are the same */
856 if (pkt->pts == AV_NOPTS_VALUE)
857 pkt->pts = pkt->dts;
858 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
859 pkt->pts);
860 if (pkt->pts == AV_NOPTS_VALUE)
861 pkt->pts = st->cur_dts;
862 pkt->dts = pkt->pts;
863 if (pkt->pts != AV_NOPTS_VALUE)
864 st->cur_dts = pkt->pts + duration;
865 }
866 }
867 }
868
869 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
870 st->pts_buffer[0] = pkt->pts;
871 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
872 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
873 if (pkt->dts == AV_NOPTS_VALUE)
874 pkt->dts = st->pts_buffer[0];
875 // We skipped it above so we try here.
876 if (st->codec->codec_id == AV_CODEC_ID_H264)
877 // This should happen on the first packet
878 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
879 if (pkt->dts > st->cur_dts)
880 st->cur_dts = pkt->dts;
881 }
882
883 av_dlog(NULL,
884 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
885 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
886
887 /* update flags */
888 if (is_intra_only(st->codec->codec_id))
889 pkt->flags |= AV_PKT_FLAG_KEY;
890 if (pc)
891 pkt->convergence_duration = pc->convergence_duration;
892 }
893
894 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
895 {
896 while (*pkt_buf) {
897 AVPacketList *pktl = *pkt_buf;
898 *pkt_buf = pktl->next;
899 av_free_packet(&pktl->pkt);
900 av_freep(&pktl);
901 }
902 *pkt_buf_end = NULL;
903 }
904
905 /**
906 * Parse a packet, add all split parts to parse_queue.
907 *
908 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
909 */
910 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
911 {
912 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
913 AVStream *st = s->streams[stream_index];
914 uint8_t *data = pkt ? pkt->data : NULL;
915 int size = pkt ? pkt->size : 0;
916 int ret = 0, got_output = 0;
917
918 if (!pkt) {
919 av_init_packet(&flush_pkt);
920 pkt = &flush_pkt;
921 got_output = 1;
922 }
923
924 while (size > 0 || (pkt == &flush_pkt && got_output)) {
925 int len;
926
927 av_init_packet(&out_pkt);
928 len = av_parser_parse2(st->parser, st->codec,
929 &out_pkt.data, &out_pkt.size, data, size,
930 pkt->pts, pkt->dts, pkt->pos);
931
932 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
933 /* increment read pointer */
934 data += len;
935 size -= len;
936
937 got_output = !!out_pkt.size;
938
939 if (!out_pkt.size)
940 continue;
941
942 if (pkt->side_data) {
943 out_pkt.side_data = pkt->side_data;
944 out_pkt.side_data_elems = pkt->side_data_elems;
945 pkt->side_data = NULL;
946 pkt->side_data_elems = 0;
947 }
948
949 /* set the duration */
950 out_pkt.duration = 0;
951 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
952 if (st->codec->sample_rate > 0) {
953 out_pkt.duration =
954 av_rescale_q_rnd(st->parser->duration,
955 (AVRational) { 1, st->codec->sample_rate },
956 st->time_base,
957 AV_ROUND_DOWN);
958 }
959 } else if (st->codec->time_base.num != 0 &&
960 st->codec->time_base.den != 0) {
961 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
962 st->codec->time_base,
963 st->time_base,
964 AV_ROUND_DOWN);
965 }
966
967 out_pkt.stream_index = st->index;
968 out_pkt.pts = st->parser->pts;
969 out_pkt.dts = st->parser->dts;
970 out_pkt.pos = st->parser->pos;
971
972 if (st->parser->key_frame == 1 ||
973 (st->parser->key_frame == -1 &&
974 st->parser->pict_type == AV_PICTURE_TYPE_I))
975 out_pkt.flags |= AV_PKT_FLAG_KEY;
976
977 compute_pkt_fields(s, st, st->parser, &out_pkt);
978
979 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
980 out_pkt.flags & AV_PKT_FLAG_KEY) {
981 ff_reduce_index(s, st->index);
982 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
983 0, 0, AVINDEX_KEYFRAME);
984 }
985
986 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
987 out_pkt.buf = pkt->buf;
988 pkt->buf = NULL;
989 #if FF_API_DESTRUCT_PACKET
990 FF_DISABLE_DEPRECATION_WARNINGS
991 out_pkt.destruct = pkt->destruct;
992 pkt->destruct = NULL;
993 FF_ENABLE_DEPRECATION_WARNINGS
994 #endif
995 }
996 if ((ret = av_dup_packet(&out_pkt)) < 0)
997 goto fail;
998
999 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1000 av_free_packet(&out_pkt);
1001 ret = AVERROR(ENOMEM);
1002 goto fail;
1003 }
1004 }
1005
1006 /* end of the stream => close and free the parser */
1007 if (pkt == &flush_pkt) {
1008 av_parser_close(st->parser);
1009 st->parser = NULL;
1010 }
1011
1012 fail:
1013 av_free_packet(pkt);
1014 return ret;
1015 }
1016
1017 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1018 AVPacketList **pkt_buffer_end,
1019 AVPacket *pkt)
1020 {
1021 AVPacketList *pktl;
1022 av_assert0(*pkt_buffer);
1023 pktl = *pkt_buffer;
1024 *pkt = pktl->pkt;
1025 *pkt_buffer = pktl->next;
1026 if (!pktl->next)
1027 *pkt_buffer_end = NULL;
1028 av_freep(&pktl);
1029 return 0;
1030 }
1031
1032 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1033 {
1034 int ret = 0, i, got_packet = 0;
1035
1036 av_init_packet(pkt);
1037
1038 while (!got_packet && !s->parse_queue) {
1039 AVStream *st;
1040 AVPacket cur_pkt;
1041
1042 /* read next packet */
1043 ret = ff_read_packet(s, &cur_pkt);
1044 if (ret < 0) {
1045 if (ret == AVERROR(EAGAIN))
1046 return ret;
1047 /* flush the parsers */
1048 for (i = 0; i < s->nb_streams; i++) {
1049 st = s->streams[i];
1050 if (st->parser && st->need_parsing)
1051 parse_packet(s, NULL, st->index);
1052 }
1053 /* all remaining packets are now in parse_queue =>
1054 * really terminate parsing */
1055 break;
1056 }
1057 ret = 0;
1058 st = s->streams[cur_pkt.stream_index];
1059
1060 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1061 cur_pkt.dts != AV_NOPTS_VALUE &&
1062 cur_pkt.pts < cur_pkt.dts) {
1063 av_log(s, AV_LOG_WARNING,
1064 "Invalid timestamps stream=%d, pts=%"PRId64", "
1065 "dts=%"PRId64", size=%d\n",
1066 cur_pkt.stream_index, cur_pkt.pts,
1067 cur_pkt.dts, cur_pkt.size);
1068 }
1069 if (s->debug & FF_FDEBUG_TS)
1070 av_log(s, AV_LOG_DEBUG,
1071 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
1072 "size=%d, duration=%d, flags=%d\n",
1073 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
1074 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
1075
1076 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1077 st->parser = av_parser_init(st->codec->codec_id);
1078 if (!st->parser)
1079 /* no parser available: just output the raw packets */
1080 st->need_parsing = AVSTREAM_PARSE_NONE;
1081 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
1082 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1083 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
1084 st->parser->flags |= PARSER_FLAG_ONCE;
1085 }
1086
1087 if (!st->need_parsing || !st->parser) {
1088 /* no parsing needed: we just output the packet as is */
1089 *pkt = cur_pkt;
1090 compute_pkt_fields(s, st, NULL, pkt);
1091 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1092 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1093 ff_reduce_index(s, st->index);
1094 av_add_index_entry(st, pkt->pos, pkt->dts,
1095 0, 0, AVINDEX_KEYFRAME);
1096 }
1097 got_packet = 1;
1098 } else if (st->discard < AVDISCARD_ALL) {
1099 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1100 return ret;
1101 } else {
1102 /* free packet */
1103 av_free_packet(&cur_pkt);
1104 }
1105 }
1106
1107 if (!got_packet && s->parse_queue)
1108 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1109
1110 if (s->debug & FF_FDEBUG_TS)
1111 av_log(s, AV_LOG_DEBUG,
1112 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
1113 "size=%d, duration=%d, flags=%d\n",
1114 pkt->stream_index, pkt->pts, pkt->dts,
1115 pkt->size, pkt->duration, pkt->flags);
1116
1117 return ret;
1118 }
1119
1120 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1121 {
1122 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1123 int eof = 0;
1124
1125 if (!genpts)
1126 return s->packet_buffer
1127 ? read_from_packet_buffer(&s->packet_buffer,
1128 &s->packet_buffer_end, pkt)
1129 : read_frame_internal(s, pkt);
1130
1131 for (;;) {
1132 int ret;
1133 AVPacketList *pktl = s->packet_buffer;
1134
1135 if (pktl) {
1136 AVPacket *next_pkt = &pktl->pkt;
1137
1138 if (next_pkt->dts != AV_NOPTS_VALUE) {
1139 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1140 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1141 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1142 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1143 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1144 // not B-frame
1145 next_pkt->pts = pktl->pkt.dts;
1146 }
1147 pktl = pktl->next;
1148 }
1149 pktl = s->packet_buffer;
1150 }
1151
1152 /* read packet from packet buffer, if there is data */
1153 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1154 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1155 return read_from_packet_buffer(&s->packet_buffer,
1156 &s->packet_buffer_end, pkt);
1157 }
1158
1159 ret = read_frame_internal(s, pkt);
1160 if (ret < 0) {
1161 if (pktl && ret != AVERROR(EAGAIN)) {
1162 eof = 1;
1163 continue;
1164 } else
1165 return ret;
1166 }
1167
1168 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1169 &s->packet_buffer_end)) < 0)
1170 return AVERROR(ENOMEM);
1171 }
1172 }
1173
1174 /* XXX: suppress the packet queue */
1175 static void flush_packet_queue(AVFormatContext *s)
1176 {
1177 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1178 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1179 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1180
1181 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1182 }
1183
1184 /*******************************************************/
1185 /* seek support */
1186
1187 int av_find_default_stream_index(AVFormatContext *s)
1188 {
1189 int first_audio_index = -1;
1190 int i;
1191 AVStream *st;
1192
1193 if (s->nb_streams <= 0)
1194 return -1;
1195 for (i = 0; i < s->nb_streams; i++) {
1196 st = s->streams[i];
1197 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1198 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1199 return i;
1200 }
1201 if (first_audio_index < 0 &&
1202 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1203 first_audio_index = i;
1204 }
1205 return first_audio_index >= 0 ? first_audio_index : 0;
1206 }
1207
1208 /** Flush the frame reader. */
1209 void ff_read_frame_flush(AVFormatContext *s)
1210 {
1211 AVStream *st;
1212 int i, j;
1213
1214 flush_packet_queue(s);
1215
1216 /* Reset read state for each stream. */
1217 for (i = 0; i < s->nb_streams; i++) {
1218 st = s->streams[i];
1219
1220 if (st->parser) {
1221 av_parser_close(st->parser);
1222 st->parser = NULL;
1223 }
1224 st->last_IP_pts = AV_NOPTS_VALUE;
1225 /* We set the current DTS to an unspecified origin. */
1226 st->cur_dts = AV_NOPTS_VALUE;
1227
1228 st->probe_packets = MAX_PROBE_PACKETS;
1229
1230 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1231 st->pts_buffer[j] = AV_NOPTS_VALUE;
1232 }
1233 }
1234
1235 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1236 {
1237 int i;
1238
1239 for (i = 0; i < s->nb_streams; i++) {
1240 AVStream *st = s->streams[i];
1241
1242 st->cur_dts =
1243 av_rescale(timestamp,
1244 st->time_base.den * (int64_t) ref_st->time_base.num,
1245 st->time_base.num * (int64_t) ref_st->time_base.den);
1246 }
1247 }
1248
1249 void ff_reduce_index(AVFormatContext *s, int stream_index)
1250 {
1251 AVStream *st = s->streams[stream_index];
1252 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1253
1254 if ((unsigned) st->nb_index_entries >= max_entries) {
1255 int i;
1256 for (i = 0; 2 * i < st->nb_index_entries; i++)
1257 st->index_entries[i] = st->index_entries[2 * i];
1258 st->nb_index_entries = i;
1259 }
1260 }
1261
1262 int ff_add_index_entry(AVIndexEntry **index_entries,
1263 int *nb_index_entries,
1264 unsigned int *index_entries_allocated_size,
1265 int64_t pos, int64_t timestamp,
1266 int size, int distance, int flags)
1267 {
1268 AVIndexEntry *entries, *ie;
1269 int index;
1270
1271 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1272 return -1;
1273
1274 entries = av_fast_realloc(*index_entries,
1275 index_entries_allocated_size,
1276 (*nb_index_entries + 1) *
1277 sizeof(AVIndexEntry));
1278 if (!entries)
1279 return -1;
1280
1281 *index_entries = entries;
1282
1283 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1284 timestamp, AVSEEK_FLAG_ANY);
1285
1286 if (index < 0) {
1287 index = (*nb_index_entries)++;
1288 ie = &entries[index];
1289 assert(index == 0 || ie[-1].timestamp < timestamp);
1290 } else {
1291 ie = &entries[index];
1292 if (ie->timestamp != timestamp) {
1293 if (ie->timestamp <= timestamp)
1294 return -1;
1295 memmove(entries + index + 1, entries + index,
1296 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1297 (*nb_index_entries)++;
1298 } else if (ie->pos == pos && distance < ie->min_distance)
1299 // do not reduce the distance
1300 distance = ie->min_distance;
1301 }
1302
1303 ie->pos = pos;
1304 ie->timestamp = timestamp;
1305 ie->min_distance = distance;
1306 ie->size = size;
1307 ie->flags = flags;
1308
1309 return index;
1310 }
1311
1312 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1313 int size, int distance, int flags)
1314 {
1315 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1316 &st->index_entries_allocated_size, pos,
1317 timestamp, size, distance, flags);
1318 }
1319
1320 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1321 int64_t wanted_timestamp, int flags)
1322 {
1323 int a, b, m;
1324 int64_t timestamp;
1325
1326 a = -1;
1327 b = nb_entries;
1328
1329 // Optimize appending index entries at the end.
1330 if (b && entries[b - 1].timestamp < wanted_timestamp)
1331 a = b - 1;
1332
1333 while (b - a > 1) {
1334 m = (a + b) >> 1;
1335 timestamp = entries[m].timestamp;
1336 if (timestamp >= wanted_timestamp)
1337 b = m;
1338 if (timestamp <= wanted_timestamp)
1339 a = m;
1340 }
1341 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1342
1343 if (!(flags & AVSEEK_FLAG_ANY))
1344 while (m >= 0 && m < nb_entries &&
1345 !(entries[m].flags & AVINDEX_KEYFRAME))
1346 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1347
1348 if (m == nb_entries)
1349 return -1;
1350 return m;
1351 }
1352
1353 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1354 {
1355 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1356 wanted_timestamp, flags);
1357 }
1358
1359 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1360 int64_t target_ts, int flags)
1361 {
1362 AVInputFormat *avif = s->iformat;
1363 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1364 int64_t ts_min, ts_max, ts;
1365 int index;
1366 int64_t ret;
1367 AVStream *st;
1368
1369 if (stream_index < 0)
1370 return -1;
1371
1372 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1373
1374 ts_max =
1375 ts_min = AV_NOPTS_VALUE;
1376 pos_limit = -1; // GCC falsely says it may be uninitialized.
1377
1378 st = s->streams[stream_index];
1379 if (st->index_entries) {
1380 AVIndexEntry *e;
1381
1382 /* FIXME: Whole function must be checked for non-keyframe entries in
1383 * index case, especially read_timestamp(). */
1384 index = av_index_search_timestamp(st, target_ts,
1385 flags | AVSEEK_FLAG_BACKWARD);
1386 index = FFMAX(index, 0);
1387 e = &st->index_entries[index];
1388
1389 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1390 pos_min = e->pos;
1391 ts_min = e->timestamp;
1392 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1393 pos_min, ts_min);
1394 } else {
1395 assert(index == 0);
1396 }
1397
1398 index = av_index_search_timestamp(st, target_ts,
1399 flags & ~AVSEEK_FLAG_BACKWARD);
1400 assert(index < st->nb_index_entries);
1401 if (index >= 0) {
1402 e = &st->index_entries[index];
1403 assert(e->timestamp >= target_ts);
1404 pos_max = e->pos;
1405 ts_max = e->timestamp;
1406 pos_limit = pos_max - e->min_distance;
1407 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1408 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1409 }
1410 }
1411
1412 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1413 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1414 if (pos < 0)
1415 return -1;
1416
1417 /* do the seek */
1418 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1419 return ret;
1420
1421 ff_update_cur_dts(s, st, ts);
1422
1423 return 0;
1424 }
1425
1426 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1427 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1428 int64_t ts_min, int64_t ts_max,
1429 int flags, int64_t *ts_ret,
1430 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1431 int64_t *, int64_t))
1432 {
1433 int64_t pos, ts;
1434 int64_t start_pos, filesize;
1435 int no_change;
1436
1437 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1438
1439 if (ts_min == AV_NOPTS_VALUE) {
1440 pos_min = s->data_offset;
1441 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1442 if (ts_min == AV_NOPTS_VALUE)
1443 return -1;
1444 }
1445
1446 if (ts_max == AV_NOPTS_VALUE) {
1447 int step = 1024;
1448 filesize = avio_size(s->pb);
1449 pos_max = filesize - 1;
1450 do {
1451 pos_max -= step;
1452 ts_max = read_timestamp(s, stream_index, &pos_max,
1453 pos_max + step);
1454 step += step;
1455 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1456 if (ts_max == AV_NOPTS_VALUE)
1457 return -1;
1458
1459 for (;;) {
1460 int64_t tmp_pos = pos_max + 1;
1461 int64_t tmp_ts = read_timestamp(s, stream_index,
1462 &tmp_pos, INT64_MAX);
1463 if (tmp_ts == AV_NOPTS_VALUE)
1464 break;
1465 ts_max = tmp_ts;
1466 pos_max = tmp_pos;
1467 if (tmp_pos >= filesize)
1468 break;
1469 }
1470 pos_limit = pos_max;
1471 }
1472
1473 if (ts_min > ts_max)
1474 return -1;
1475 else if (ts_min == ts_max)
1476 pos_limit = pos_min;
1477
1478 no_change = 0;
1479 while (pos_min < pos_limit) {
1480 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1481 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1482 assert(pos_limit <= pos_max);
1483
1484 if (no_change == 0) {
1485 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1486 // interpolate position (better than dichotomy)
1487 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1488 ts_max - ts_min) +
1489 pos_min - approximate_keyframe_distance;
1490 } else if (no_change == 1) {
1491 // bisection if interpolation did not change min / max pos last time
1492 pos = (pos_min + pos_limit) >> 1;
1493 } else {
1494 /* linear search if bisection failed, can only happen if there
1495 * are very few or no keyframes between min/max */
1496 pos = pos_min;
1497 }
1498 if (pos <= pos_min)
1499 pos = pos_min + 1;
1500 else if (pos > pos_limit)
1501 pos = pos_limit;
1502 start_pos = pos;
1503
1504 // May pass pos_limit instead of -1.
1505 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1506 if (pos == pos_max)
1507 no_change++;
1508 else
1509 no_change = 0;
1510 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1511 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1512 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1513 pos_limit, start_pos, no_change);
1514 if (ts == AV_NOPTS_VALUE) {
1515 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1516 return -1;
1517 }
1518 assert(ts != AV_NOPTS_VALUE);
1519 if (target_ts <= ts) {
1520 pos_limit = start_pos - 1;
1521 pos_max = pos;
1522 ts_max = ts;
1523 }
1524 if (target_ts >= ts) {
1525 pos_min = pos;
1526 ts_min = ts;
1527 }
1528 }
1529
1530 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1531 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1532 pos_min = pos;
1533 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1534 pos_min++;
1535 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1536 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1537 pos, ts_min, target_ts, ts_max);
1538 *ts_ret = ts;
1539 return pos;
1540 }
1541
1542 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1543 int64_t pos, int flags)
1544 {
1545 int64_t pos_min, pos_max;
1546
1547 pos_min = s->data_offset;
1548 pos_max = avio_size(s->pb) - 1;
1549
1550 if (pos < pos_min)
1551 pos = pos_min;
1552 else if (pos > pos_max)
1553 pos = pos_max;
1554
1555 avio_seek(s->pb, pos, SEEK_SET);
1556
1557 return 0;
1558 }
1559
1560 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1561 int64_t timestamp, int flags)
1562 {
1563 int index;
1564 int64_t ret;
1565 AVStream *st;
1566 AVIndexEntry *ie;
1567
1568 st = s->streams[stream_index];
1569
1570 index = av_index_search_timestamp(st, timestamp, flags);
1571
1572 if (index < 0 && st->nb_index_entries &&
1573 timestamp < st->index_entries[0].timestamp)
1574 return -1;
1575
1576 if (index < 0 || index == st->nb_index_entries - 1) {
1577 AVPacket pkt;
1578
1579 if (st->nb_index_entries) {
1580 assert(st->index_entries);
1581 ie = &st->index_entries[st->nb_index_entries - 1];
1582 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1583 return ret;
1584 ff_update_cur_dts(s, st, ie->timestamp);
1585 } else {
1586 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1587 return ret;
1588 }
1589 for (;;) {
1590 int read_status;
1591 do {
1592 read_status = av_read_frame(s, &pkt);
1593 } while (read_status == AVERROR(EAGAIN));
1594 if (read_status < 0)
1595 break;
1596 av_free_packet(&pkt);
1597 if (stream_index == pkt.stream_index)
1598 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1599 break;
1600 }
1601 index = av_index_search_timestamp(st, timestamp, flags);
1602 }
1603 if (index < 0)
1604 return -1;
1605
1606 ff_read_frame_flush(s);
1607 if (s->iformat->read_seek)
1608 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1609 return 0;
1610 ie = &st->index_entries[index];
1611 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1612 return ret;
1613 ff_update_cur_dts(s, st, ie->timestamp);
1614
1615 return 0;
1616 }
1617
1618 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1619 int64_t timestamp, int flags)
1620 {
1621 int ret;
1622 AVStream *st;
1623
1624 if (flags & AVSEEK_FLAG_BYTE) {
1625 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1626 return -1;
1627 ff_read_frame_flush(s);
1628 return seek_frame_byte(s, stream_index, timestamp, flags);
1629 }
1630
1631 if (stream_index < 0) {
1632 stream_index = av_find_default_stream_index(s);
1633 if (stream_index < 0)
1634 return -1;
1635
1636 st = s->streams[stream_index];
1637 /* timestamp for default must be expressed in AV_TIME_BASE units */
1638 timestamp = av_rescale(timestamp, st->time_base.den,
1639 AV_TIME_BASE * (int64_t) st->time_base.num);
1640 }
1641
1642 /* first, we try the format specific seek */
1643 if (s->iformat->read_seek) {
1644 ff_read_frame_flush(s);
1645 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1646 } else
1647 ret = -1;
1648 if (ret >= 0)
1649 return 0;
1650
1651 if (s->iformat->read_timestamp &&
1652 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1653 ff_read_frame_flush(s);
1654 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1655 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1656 ff_read_frame_flush(s);
1657 return seek_frame_generic(s, stream_index, timestamp, flags);
1658 } else
1659 return -1;
1660 }
1661
1662 int av_seek_frame(AVFormatContext *s, int stream_index,
1663 int64_t timestamp, int flags)
1664 {
1665 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1666
1667 if (ret >= 0)
1668 ret = queue_attached_pictures(s);
1669
1670 return ret;
1671 }
1672
1673 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1674 int64_t ts, int64_t max_ts, int flags)
1675 {
1676 if (min_ts > ts || max_ts < ts)
1677 return -1;
1678
1679 if (s->iformat->read_seek2) {
1680 int ret;
1681 ff_read_frame_flush(s);
1682 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1683 ts, max_ts, flags);
1684
1685 if (ret >= 0)
1686 ret = queue_attached_pictures(s);
1687 return ret;
1688 }
1689
1690 if (s->iformat->read_timestamp) {
1691 // try to seek via read_timestamp()
1692 }
1693
1694 // Fall back on old API if new is not implemented but old is.
1695 // Note the old API has somewhat different semantics.
1696 if (s->iformat->read_seek || 1)
1697 return av_seek_frame(s, stream_index, ts,
1698 flags | ((uint64_t) ts - min_ts >
1699 (uint64_t) max_ts - ts
1700 ? AVSEEK_FLAG_BACKWARD : 0));
1701
1702 // try some generic seek like seek_frame_generic() but with new ts semantics
1703 }
1704
1705 /*******************************************************/
1706
1707 /**
1708 * Return TRUE if the stream has accurate duration in any stream.
1709 *
1710 * @return TRUE if the stream has accurate duration for at least one component.
1711 */
1712 static int has_duration(AVFormatContext *ic)
1713 {
1714 int i;
1715 AVStream *st;
1716
1717 for (i = 0; i < ic->nb_streams; i++) {
1718 st = ic->streams[i];
1719 if (st->duration != AV_NOPTS_VALUE)
1720 return 1;
1721 }
1722 if (ic->duration != AV_NOPTS_VALUE)
1723 return 1;
1724 return 0;
1725 }
1726
1727 /**
1728 * Estimate the stream timings from the one of each components.
1729 *
1730 * Also computes the global bitrate if possible.
1731 */
1732 static void update_stream_timings(AVFormatContext *ic)
1733 {
1734 int64_t start_time, start_time1, end_time, end_time1;
1735 int64_t duration, duration1, filesize;
1736 int i;
1737 AVStream *st;
1738
1739 start_time = INT64_MAX;
1740 end_time = INT64_MIN;
1741 duration = INT64_MIN;
1742 for (i = 0; i < ic->nb_streams; i++) {
1743 st = ic->streams[i];
1744 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1745 start_time1 = av_rescale_q(st->start_time, st->time_base,
1746 AV_TIME_BASE_Q);
1747 start_time = FFMIN(start_time, start_time1);
1748 if (st->duration != AV_NOPTS_VALUE) {
1749 end_time1 = start_time1 +
1750 av_rescale_q(st->duration, st->time_base,
1751 AV_TIME_BASE_Q);
1752 end_time = FFMAX(end_time, end_time1);
1753 }
1754 }
1755 if (st->duration != AV_NOPTS_VALUE) {
1756 duration1 = av_rescale_q(st->duration, st->time_base,
1757 AV_TIME_BASE_Q);
1758 duration = FFMAX(duration, duration1);
1759 }
1760 }
1761 if (start_time != INT64_MAX) {
1762 ic->start_time = start_time;
1763 if (end_time != INT64_MIN)
1764 duration = FFMAX(duration, end_time - start_time);
1765 }
1766 if (duration != INT64_MIN) {
1767 ic->duration = duration;
1768 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1769 /* compute the bitrate */
1770 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1771 (double) ic->duration;
1772 }
1773 }
1774
1775 static void fill_all_stream_timings(AVFormatContext *ic)
1776 {
1777 int i;
1778 AVStream *st;
1779
1780 update_stream_timings(ic);
1781 for (i = 0; i < ic->nb_streams; i++) {
1782 st = ic->streams[i];
1783 if (st->start_time == AV_NOPTS_VALUE) {
1784 if (ic->start_time != AV_NOPTS_VALUE)
1785 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1786 st->time_base);
1787 if (ic->duration != AV_NOPTS_VALUE)
1788 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1789 st->time_base);
1790 }
1791 }
1792 }
1793
1794 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1795 {
1796 int64_t filesize, duration;
1797 int i;
1798 AVStream *st;
1799
1800 /* if bit_rate is already set, we believe it */
1801 if (ic->bit_rate <= 0) {
1802 int bit_rate = 0;
1803 for (i = 0; i < ic->nb_streams; i++) {
1804 st = ic->streams[i];
1805 if (st->codec->bit_rate > 0) {
1806 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1807 bit_rate = 0;
1808 break;
1809 }
1810 bit_rate += st->codec->bit_rate;
1811 }
1812 }
1813 ic->bit_rate = bit_rate;
1814 }
1815
1816 /* if duration is already set, we believe it */
1817 if (ic->duration == AV_NOPTS_VALUE &&
1818 ic->bit_rate != 0) {
1819 filesize = ic->pb ? avio_size(ic->pb) : 0;
1820 if (filesize > 0) {
1821 for (i = 0; i < ic->nb_streams; i++) {
1822 st = ic->streams[i];
1823 duration = av_rescale(8 * filesize, st->time_base.den,
1824 ic->bit_rate *
1825 (int64_t) st->time_base.num);
1826 if (st->duration == AV_NOPTS_VALUE)
1827 st->duration = duration;
1828 }
1829 }
1830 }
1831 }
1832
1833 #define DURATION_MAX_READ_SIZE 250000
1834 #define DURATION_MAX_RETRY 3
1835
1836 /* only usable for MPEG-PS streams */
1837 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1838 {
1839 AVPacket pkt1, *pkt = &pkt1;
1840 AVStream *st;
1841 int read_size, i, ret;
1842 int64_t end_time;
1843 int64_t filesize, offset, duration;
1844 int retry = 0;
1845
1846 /* flush packet queue */
1847 flush_packet_queue(ic);
1848
1849 for (i = 0; i < ic->nb_streams; i++) {
1850 st = ic->streams[i];
1851 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1852 av_log(st->codec, AV_LOG_WARNING,
1853 "start time is not set in estimate_timings_from_pts\n");
1854
1855 if (st->parser) {
1856 av_parser_close(st->parser);
1857 st->parser = NULL;
1858 }
1859 }
1860
1861 /* estimate the end time (duration) */
1862 /* XXX: may need to support wrapping */
1863 filesize = ic->pb ? avio_size(ic->pb) : 0;
1864 end_time = AV_NOPTS_VALUE;
1865 do {
1866 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1867 if (offset < 0)
1868 offset = 0;
1869
1870 avio_seek(ic->pb, offset, SEEK_SET);
1871 read_size = 0;
1872 for (;;) {
1873 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1874 break;
1875
1876 do {
1877 ret = ff_read_packet(ic, pkt);
1878 } while (ret == AVERROR(EAGAIN));
1879 if (ret != 0)
1880 break;
1881 read_size += pkt->size;
1882 st = ic->streams[pkt->stream_index];
1883 if (pkt->pts != AV_NOPTS_VALUE &&
1884 (st->start_time != AV_NOPTS_VALUE ||
1885 st->first_dts != AV_NOPTS_VALUE)) {
1886 duration = end_time = pkt->pts;
1887 if (st->start_time != AV_NOPTS_VALUE)
1888 duration -= st->start_time;
1889 else
1890 duration -= st->first_dts;
1891 if (duration < 0)
1892 duration += 1LL << st->pts_wrap_bits;
1893 if (duration > 0) {
1894 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1895 st->duration = duration;
1896 }
1897 }
1898 av_free_packet(pkt);
1899 }
1900 } while (end_time == AV_NOPTS_VALUE &&
1901 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1902 ++retry <= DURATION_MAX_RETRY);
1903
1904 fill_all_stream_timings(ic);
1905
1906 avio_seek(ic->pb, old_offset, SEEK_SET);
1907 for (i = 0; i < ic->nb_streams; i++) {
1908 st = ic->streams[i];
1909 st->cur_dts = st->first_dts;
1910 st->last_IP_pts = AV_NOPTS_VALUE;
1911 }
1912 }
1913
1914 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1915 {
1916 int64_t file_size;
1917
1918 /* get the file size, if possible */
1919 if (ic->iformat->flags & AVFMT_NOFILE) {
1920 file_size = 0;
1921 } else {
1922 file_size = avio_size(ic->pb);
1923 file_size = FFMAX(0, file_size);
1924 }
1925
1926 if ((!strcmp(ic->iformat->name, "mpeg") ||
1927 !strcmp(ic->iformat->name, "mpegts")) &&
1928 file_size && ic->pb->seekable) {
1929 /* get accurate estimate from the PTSes */
1930 estimate_timings_from_pts(ic, old_offset);
1931 } else if (has_duration(ic)) {
1932 /* at least one component has timings - we use them for all
1933 * the components */
1934 fill_all_stream_timings(ic);
1935 } else {
1936 av_log(ic, AV_LOG_WARNING,
1937 "Estimating duration from bitrate, this may be inaccurate\n");
1938 /* less precise: use bitrate info */
1939 estimate_timings_from_bit_rate(ic);
1940 }
1941 update_stream_timings(ic);
1942
1943 {
1944 int i;
1945 AVStream av_unused *st;
1946 for (i = 0; i < ic->nb_streams; i++) {
1947 st = ic->streams[i];
1948 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
1949 (double) st->start_time / AV_TIME_BASE,
1950 (double) st->duration / AV_TIME_BASE);
1951 }
1952 av_dlog(ic,
1953 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1954 (double) ic->start_time / AV_TIME_BASE,
1955 (double) ic->duration / AV_TIME_BASE,
1956 ic->bit_rate / 1000);
1957 }
1958 }
1959
1960 static int has_codec_parameters(AVStream *st)
1961 {
1962 AVCodecContext *avctx = st->codec;
1963 int val;
1964
1965 switch (avctx->codec_type) {
1966 case AVMEDIA_TYPE_AUDIO:
1967 val = avctx->sample_rate && avctx->channels;
1968 if (st->info->found_decoder >= 0 &&
1969 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1970 return 0;
1971 break;
1972 case AVMEDIA_TYPE_VIDEO:
1973 val = avctx->width;
1974 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1975 return 0;
1976 break;
1977 default:
1978 val = 1;
1979 break;
1980 }
1981 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1982 }
1983
1984 static int has_decode_delay_been_guessed(AVStream *st)
1985 {
1986 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1987 st->info->nb_decoded_frames >= 6;
1988 }
1989
1990 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1991 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1992 AVDictionary **options)
1993 {
1994 const AVCodec *codec;
1995 int got_picture = 1, ret = 0;
1996 AVFrame *frame = av_frame_alloc();
1997 AVPacket pkt = *avpkt;
1998
1999 if (!frame)
2000 return AVERROR(ENOMEM);
2001
2002 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2003 AVDictionary *thread_opt = NULL;
2004
2005 codec = st->codec->codec ? st->codec->codec
2006 : avcodec_find_decoder(st->codec->codec_id);
2007
2008 if (!codec) {
2009 st->info->found_decoder = -1;
2010 ret = -1;
2011 goto fail;
2012 }
2013
2014 /* Force thread count to 1 since the H.264 decoder will not extract
2015 * SPS and PPS to extradata during multi-threaded decoding. */
2016 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2017 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2018 if (!options)
2019 av_dict_free(&thread_opt);
2020 if (ret < 0) {
2021 st->info->found_decoder = -1;
2022 goto fail;
2023 }
2024 st->info->found_decoder = 1;
2025 } else if (!st->info->found_decoder)
2026 st->info->found_decoder = 1;
2027
2028 if (st->info->found_decoder < 0) {
2029 ret = -1;
2030 goto fail;
2031 }
2032
2033 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2034 ret >= 0 &&
2035 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
2036 (!st->codec_info_nb_frames &&
2037 st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2038 got_picture = 0;
2039 switch (st->codec->codec_type) {
2040 case AVMEDIA_TYPE_VIDEO:
2041 ret = avcodec_decode_video2(st->codec, frame,
2042 &got_picture, &pkt);
2043 break;
2044 case AVMEDIA_TYPE_AUDIO:
2045 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2046 break;
2047 default:
2048 break;
2049 }
2050 if (ret >= 0) {
2051 if (got_picture)
2052 st->info->nb_decoded_frames++;
2053 pkt.data += ret;
2054 pkt.size -= ret;
2055 ret = got_picture;
2056 }
2057 }
2058
2059 fail:
2060 av_frame_free(&frame);
2061 return ret;
2062 }
2063
2064 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2065 {
2066 while (tags->id != AV_CODEC_ID_NONE) {
2067 if (tags->id == id)
2068 return tags->tag;
2069 tags++;
2070 }
2071 return 0;
2072 }
2073
2074 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2075 {
2076 int i;
2077 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2078 if (tag == tags[i].tag)
2079 return tags[i].id;
2080 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2081 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2082 return tags[i].id;
2083 return AV_CODEC_ID_NONE;
2084 }
2085
2086 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2087 {
2088 if (flt) {
2089 switch (bps) {
2090 case 32:
2091 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2092 case 64:
2093 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2094 default:
2095 return AV_CODEC_ID_NONE;
2096 }
2097 } else {
2098 bps >>= 3;
2099 if (sflags & (1 << (bps - 1))) {
2100 switch (bps) {
2101 case 1:
2102 return AV_CODEC_ID_PCM_S8;
2103 case 2:
2104 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2105 case 3:
2106 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2107 case 4:
2108 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2109 default:
2110 return AV_CODEC_ID_NONE;
2111 }
2112 } else {
2113 switch (bps) {
2114 case 1:
2115 return AV_CODEC_ID_PCM_U8;
2116 case 2:
2117 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2118 case 3:
2119 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2120 case 4:
2121 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2122 default:
2123 return AV_CODEC_ID_NONE;
2124 }
2125 }
2126 }
2127 }
2128
2129 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
2130 {
2131 int i;
2132 for (i = 0; tags && tags[i]; i++) {
2133 int tag = ff_codec_get_tag(tags[i], id);
2134 if (tag)
2135 return tag;
2136 }
2137 return 0;
2138 }
2139
2140 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2141 {
2142 int i;
2143 for (i = 0; tags && tags[i]; i++) {
2144 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2145 if (id != AV_CODEC_ID_NONE)
2146 return id;
2147 }
2148 return AV_CODEC_ID_NONE;
2149 }
2150
2151 static void compute_chapters_end(AVFormatContext *s)
2152 {
2153 unsigned int i, j;
2154 int64_t max_time = s->duration +
2155 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2156
2157 for (i = 0; i < s->nb_chapters; i++)
2158 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2159 AVChapter *ch = s->chapters[i];
2160 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2161 ch->time_base)
2162 : INT64_MAX;
2163
2164 for (j = 0; j < s->nb_chapters; j++) {
2165 AVChapter *ch1 = s->chapters[j];
2166 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2167 ch->time_base);
2168 if (j != i && next_start > ch->start && next_start < end)
2169 end = next_start;
2170 }
2171 ch->end = (end == INT64_MAX) ? ch->start : end;
2172 }
2173 }
2174
2175 static int get_std_framerate(int i)
2176 {
2177 if (i < 60 * 12)
2178 return (i + 1) * 1001;
2179 else
2180 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2181 }
2182
2183 /* Is the time base unreliable?
2184 * This is a heuristic to balance between quick acceptance of the values in
2185 * the headers vs. some extra checks.
2186 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2187 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2188 * And there are "variable" fps files this needs to detect as well. */
2189 static int tb_unreliable(AVCodecContext *c)
2190 {
2191 if (c->time_base.den >= 101L * c->time_base.num ||
2192 c->time_base.den < 5L * c->time_base.num ||
2193 // c->codec_tag == AV_RL32("DIVX") ||
2194 // c->codec_tag == AV_RL32("XVID") ||
2195 c->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
2196 c->codec_id == AV_CODEC_ID_H264)
2197 return 1;
2198 return 0;
2199 }
2200
2201 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2202 {
2203 int i, count, ret, read_size, j;
2204 AVStream *st;
2205 AVPacket pkt1, *pkt;
2206 int64_t old_offset = avio_tell(ic->pb);
2207 // new streams might appear, no options for those
2208 int orig_nb_streams = ic->nb_streams;
2209
2210 for (i = 0; i < ic->nb_streams; i++) {
2211 const AVCodec *codec;
2212 AVDictionary *thread_opt = NULL;
2213 st = ic->streams[i];
2214
2215 // only for the split stuff
2216 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2217 st->parser = av_parser_init(st->codec->codec_id);
2218 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2219 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2220 }
2221 codec = st->codec->codec ? st->codec->codec
2222 : avcodec_find_decoder(st->codec->codec_id);
2223
2224 /* Force thread count to 1 since the H.264 decoder will not extract
2225 * SPS and PPS to extradata during multi-threaded decoding. */
2226 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2227
2228 /* Ensure that subtitle_header is properly set. */
2229 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2230 && codec && !st->codec->codec)
2231 avcodec_open2(st->codec, codec,
2232 options ? &options[i] : &thread_opt);
2233
2234 // Try to just open decoders, in case this is enough to get parameters.
2235 if (!has_codec_parameters(st)) {
2236 if (codec && !st->codec->codec)
2237 avcodec_open2(st->codec, codec,
2238 options ? &options[i] : &thread_opt);
2239 }
2240 if (!options)
2241 av_dict_free(&thread_opt);
2242 }
2243
2244 for (i = 0; i < ic->nb_streams; i++) {
2245 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2246 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2247 }
2248
2249 count = 0;
2250 read_size = 0;
2251 for (;;) {
2252 if (ff_check_interrupt(&ic->interrupt_callback)) {
2253 ret = AVERROR_EXIT;
2254 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2255 break;
2256 }
2257
2258 /* check if one codec still needs to be handled */
2259 for (i = 0; i < ic->nb_streams; i++) {
2260 int fps_analyze_framecount = 20;
2261
2262 st = ic->streams[i];
2263 if (!has_codec_parameters(st))
2264 break;
2265 /* If the timebase is coarse (like the usual millisecond precision
2266 * of mkv), we need to analyze more frames to reliably arrive at
2267 * the correct fps. */
2268 if (av_q2d(st->time_base) > 0.0005)
2269 fps_analyze_framecount *= 2;
2270 if (ic->fps_probe_size >= 0)
2271 fps_analyze_framecount = ic->fps_probe_size;
2272 /* variable fps and no guess at the real fps */
2273 if (tb_unreliable(st->codec) && !st->avg_frame_rate.num &&
2274 st->codec_info_nb_frames < fps_analyze_framecount &&
2275 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2276 break;
2277 if (st->parser && st->parser->parser->split &&
2278 !st->codec->extradata)
2279 break;
2280 if (st->first_dts == AV_NOPTS_VALUE &&
2281 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2282 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2283 break;
2284 }
2285 if (i == ic->nb_streams) {
2286 /* NOTE: If the format has no header, then we need to read some
2287 * packets to get most of the streams, so we cannot stop here. */
2288 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2289 /* If we found the info for all the codecs, we can stop. */
2290 ret = count;
2291 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2292 break;
2293 }
2294 }
2295 /* We did not get all the codec info, but we read too much data. */
2296 if (read_size >= ic->probesize) {
2297 ret = count;
2298 av_log(ic, AV_LOG_DEBUG,
2299 "Probe buffer size limit %d reached\n", ic->probesize);
2300 break;
2301 }
2302
2303 /* NOTE: A new stream can be added there if no header in file
2304 * (AVFMTCTX_NOHEADER). */
2305 ret = read_frame_internal(ic, &pkt1);
2306 if (ret == AVERROR(EAGAIN))
2307 continue;
2308
2309 if (ret < 0) {
2310 /* EOF or error*/
2311 AVPacket empty_pkt = { 0 };
2312 int err = 0;
2313 av_init_packet(&empty_pkt);
2314
2315 /* We could not have all the codec parameters before EOF. */
2316 ret = -1;
2317 for (i = 0; i < ic->nb_streams; i++) {
2318 st = ic->streams[i];
2319
2320 /* flush the decoders */
2321 if (st->info->found_decoder == 1) {
2322 do {
2323 err = try_decode_frame(st, &empty_pkt,
2324 (options && i < orig_nb_streams)
2325 ? &options[i] : NULL);
2326 } while (err > 0 && !has_codec_parameters(st));
2327 }
2328
2329 if (err < 0) {
2330 av_log(ic, AV_LOG_WARNING,
2331 "decoding for stream %d failed\n", st->index);
2332 } else if (!has_codec_parameters(st)) {
2333 char buf[256];
2334 avcodec_string(buf, sizeof(buf), st->codec, 0);
2335 av_log(ic, AV_LOG_WARNING,
2336 "Could not find codec parameters (%s)\n", buf);
2337 } else {
2338 ret = 0;
2339 }
2340 }
2341 break;
2342 }
2343
2344 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2345 pkt = &pkt1;
2346 } else {
2347 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2348 &ic->packet_buffer_end);
2349 if ((ret = av_dup_packet(pkt)) < 0)
2350 goto find_stream_info_err;
2351 }
2352
2353 read_size += pkt->size;
2354
2355 st = ic->streams[pkt->stream_index];
2356 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2357 /* check for non-increasing dts */
2358 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2359 st->info->fps_last_dts >= pkt->dts) {
2360 av_log(ic, AV_LOG_WARNING,
2361 "Non-increasing DTS in stream %d: packet %d with DTS "
2362 "%"PRId64", packet %d with DTS %"PRId64"\n",
2363 st->index, st->info->fps_last_dts_idx,
2364 st->info->fps_last_dts, st->codec_info_nb_frames,
2365 pkt->dts);
2366 st->info->fps_first_dts =
2367 st->info->fps_last_dts = AV_NOPTS_VALUE;
2368 }
2369 /* Check for a discontinuity in dts. If the difference in dts
2370 * is more than 1000 times the average packet duration in the
2371 * sequence, we treat it as a discontinuity. */
2372 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2373 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2374 (pkt->dts - st->info->fps_last_dts) / 1000 >
2375 (st->info->fps_last_dts - st->info->fps_first_dts) /
2376 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2377 av_log(ic, AV_LOG_WARNING,
2378 "DTS discontinuity in stream %d: packet %d with DTS "
2379 "%"PRId64", packet %d with DTS %"PRId64"\n",
2380 st->index, st->info->fps_last_dts_idx,
2381 st->info->fps_last_dts, st->codec_info_nb_frames,
2382 pkt->dts);
2383 st->info->fps_first_dts =
2384 st->info->fps_last_dts = AV_NOPTS_VALUE;
2385 }
2386
2387 /* update stored dts values */
2388 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2389 st->info->fps_first_dts = pkt->dts;
2390 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2391 }
2392 st->info->fps_last_dts = pkt->dts;
2393 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2394
2395 /* check max_analyze_duration */
2396 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2397 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2398 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2399 ic->max_analyze_duration);
2400 break;
2401 }
2402 }
2403 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2404 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2405 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2406 st->codec->extradata_size = i;
2407 st->codec->extradata = av_malloc(st->codec->extradata_size +
2408 FF_INPUT_BUFFER_PADDING_SIZE);
2409 if (!st->codec->extradata)
2410 return AVERROR(ENOMEM);
2411 memcpy(st->codec->extradata, pkt->data,
2412 st->codec->extradata_size);
2413 memset(st->codec->extradata + i, 0,
2414 FF_INPUT_BUFFER_PADDING_SIZE);
2415 }
2416 }
2417
2418 /* If still no information, we try to open the codec and to
2419 * decompress the frame. We try to avoid that in most cases as
2420 * it takes longer and uses more memory. For MPEG-4, we need to
2421 * decompress for QuickTime.
2422 *
2423 * If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2424 * least one frame of codec data, this makes sure the codec initializes
2425 * the channel configuration and does not only trust the values from
2426 * the container. */
2427 try_decode_frame(st, pkt,
2428 (options && i < orig_nb_streams) ? &options[i] : NULL);
2429
2430 st->codec_info_nb_frames++;
2431 count++;
2432 }
2433
2434 // close codecs which were opened in try_decode_frame()
2435 for (i = 0; i < ic->nb_streams; i++) {
2436 st = ic->streams[i];
2437 avcodec_close(st->codec);
2438 }
2439 for (i = 0; i < ic->nb_streams; i++) {
2440 st = ic->streams[i];
2441 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2442 /* estimate average framerate if not set by demuxer */
2443 if (!st->avg_frame_rate.num &&
2444 st->info->fps_last_dts != st->info->fps_first_dts) {
2445 int64_t delta_dts = st->info->fps_last_dts -
2446 st->info->fps_first_dts;
2447 int delta_packets = st->info->fps_last_dts_idx -
2448 st->info->fps_first_dts_idx;
2449 int best_fps = 0;
2450 double best_error = 0.01;
2451
2452 if (delta_dts >= INT64_MAX / st->time_base.num ||
2453 delta_packets >= INT64_MAX / st->time_base.den ||
2454 delta_dts < 0)
2455 continue;
2456 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2457 delta_packets * (int64_t) st->time_base.den,
2458 delta_dts * (int64_t) st->time_base.num, 60000);
2459
2460 /* Round guessed framerate to a "standard" framerate if it's
2461 * within 1% of the original estimate. */
2462 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2463 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2464 double error = fabs(av_q2d(st->avg_frame_rate) /
2465 av_q2d(std_fps) - 1);
2466
2467 if (error < best_error) {
2468 best_error = error;
2469 best_fps = std_fps.num;
2470 }
2471 }
2472 if (best_fps)
2473 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2474 best_fps, 12 * 1001, INT_MAX);
2475 }
2476 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2477 if (!st->codec->bits_per_coded_sample)
2478 st->codec->bits_per_coded_sample =
2479 av_get_bits_per_sample(st->codec->codec_id);
2480 // set stream disposition based on audio service type
2481 switch (st->codec->audio_service_type) {
2482 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2483 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2484 break;
2485 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2486 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2487 break;
2488 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2489 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2490 break;
2491 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2492 st->disposition = AV_DISPOSITION_COMMENT;
2493 break;
2494 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2495 st->disposition = AV_DISPOSITION_KARAOKE;
2496 break;
2497 }
2498 }
2499 }
2500
2501 estimate_timings(ic, old_offset);
2502
2503 compute_chapters_end(ic);
2504
2505 find_stream_info_err:
2506 for (i = 0; i < ic->nb_streams; i++) {
2507 ic->streams[i]->codec->thread_count = 0;
2508 av_freep(&ic->streams[i]->info);
2509 }
2510 return ret;
2511 }
2512
2513 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2514 {
2515 int i, j;
2516
2517 for (i = 0; i < ic->nb_programs; i++)
2518 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2519 if (ic->programs[i]->stream_index[j] == s)
2520 return ic->programs[i];
2521 return NULL;
2522 }
2523
2524 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2525 int wanted_stream_nb, int related_stream,
2526 AVCodec **decoder_ret, int flags)
2527 {
2528 int i, nb_streams = ic->nb_streams;
2529 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2530 unsigned *program = NULL;
2531 AVCodec *decoder = NULL, *best_decoder = NULL;
2532
2533 if (related_stream >= 0 && wanted_stream_nb < 0) {
2534 AVProgram *p = find_program_from_stream(ic, related_stream);
2535 if (p) {
2536 program = p->stream_index;
2537 nb_streams = p->nb_stream_indexes;
2538 }
2539 }
2540 for (i = 0; i < nb_streams; i++) {
2541 int real_stream_index = program ? program[i] : i;
2542 AVStream *st = ic->streams[real_stream_index];
2543 AVCodecContext *avctx = st->codec;
2544 if (avctx->codec_type != type)
2545 continue;
2546 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2547 continue;
2548 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2549 AV_DISPOSITION_VISUAL_IMPAIRED))
2550 continue;
2551 if (decoder_ret) {
2552 decoder = avcodec_find_decoder(st->codec->codec_id);
2553 if (!decoder) {
2554 if (ret < 0)
2555 ret = AVERROR_DECODER_NOT_FOUND;
2556 continue;
2557 }
2558 }
2559 if (best_count >= st->codec_info_nb_frames)
2560 continue;
2561 best_count = st->codec_info_nb_frames;
2562 ret = real_stream_index;
2563 best_decoder = decoder;
2564 if (program && i == nb_streams - 1 && ret < 0) {
2565 program = NULL;
2566 nb_streams = ic->nb_streams;
2567 /* no related stream found, try again with everything */
2568 i = 0;
2569 }
2570 }
2571 if (decoder_ret)
2572 *decoder_ret = best_decoder;
2573 return ret;
2574 }
2575
2576 /*******************************************************/
2577
2578 int av_read_play(AVFormatContext *s)
2579 {
2580 if (s->iformat->read_play)
2581 return s->iformat->read_play(s);
2582 if (s->pb)
2583 return avio_pause(s->pb, 0);
2584 return AVERROR(ENOSYS);
2585 }
2586
2587 int av_read_pause(AVFormatContext *s)
2588 {
2589 if (s->iformat->read_pause)
2590 return s->iformat->read_pause(s);
2591 if (s->pb)
2592 return avio_pause(s->pb, 1);
2593 return AVERROR(ENOSYS);
2594 }
2595
2596 void avformat_free_context(AVFormatContext *s)
2597 {
2598 int i, j;
2599 AVStream *st;
2600
2601 av_opt_free(s);
2602 if (s->iformat && s->iformat->priv_class && s->priv_data)
2603 av_opt_free(s->priv_data);
2604
2605 for (i = 0; i < s->nb_streams; i++) {
2606 /* free all data in a stream component */
2607 st = s->streams[i];
2608
2609 for (j = 0; j < st->nb_side_data; j++)
2610 av_freep(&st->side_data[j].data);
2611 av_freep(&st->side_data);
2612 st->nb_side_data = 0;
2613
2614 if (st->parser) {
2615 av_parser_close(st->parser);
2616 }
2617 if (st->attached_pic.data)
2618 av_free_packet(&st->attached_pic);
2619 av_dict_free(&st->metadata);
2620 av_freep(&st->probe_data.buf);
2621 av_free(st->index_entries);
2622 av_free(st->codec->extradata);
2623 av_free(st->codec->subtitle_header);
2624 av_free(st->codec);
2625 av_free(st->priv_data);
2626 av_free(st->info);
2627 av_free(st);
2628 }
2629 for (i = s->nb_programs - 1; i >= 0; i--) {
2630 av_dict_free(&s->programs[i]->metadata);
2631 av_freep(&s->programs[i]->stream_index);
2632 av_freep(&s->programs[i]);
2633 }
2634 av_freep(&s->programs);
2635 av_freep(&s->priv_data);
2636 while (s->nb_chapters--) {
2637 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2638 av_free(s->chapters[s->nb_chapters]);
2639 }
2640 av_freep(&s->chapters);
2641 av_dict_free(&s->metadata);
2642 av_freep(&s->streams);
2643 av_freep(&s->internal);
2644 av_free(s);
2645 }
2646
2647 void avformat_close_input(AVFormatContext **ps)
2648 {
2649 AVFormatContext *s = *ps;
2650 AVIOContext *pb = s->pb;
2651
2652 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2653 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2654 pb = NULL;
2655
2656 flush_packet_queue(s);
2657
2658 if (s->iformat)
2659 if (s->iformat->read_close)
2660 s->iformat->read_close(s);
2661
2662 avformat_free_context(s);
2663
2664 *ps = NULL;
2665
2666 avio_close(pb);
2667 }
2668
2669 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2670 {
2671 AVStream *st;
2672 int i;
2673
2674 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2675 sizeof(*s->streams)) < 0) {
2676 s->nb_streams = 0;
2677 return NULL;
2678 }
2679
2680 st = av_mallocz(sizeof(AVStream));
2681 if (!st)
2682 return NULL;
2683 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2684 av_free(st);
2685 return NULL;
2686 }
2687
2688 st->codec = avcodec_alloc_context3(c);
2689 if (s->iformat)
2690 /* no default bitrate if decoding */
2691 st->codec->bit_rate = 0;
2692 st->index = s->nb_streams;
2693 st->start_time = AV_NOPTS_VALUE;
2694 st->duration = AV_NOPTS_VALUE;
2695 /* we set the current DTS to 0 so that formats without any timestamps
2696 * but durations get some timestamps, formats with some unknown
2697 * timestamps have their first few packets buffered and the
2698 * timestamps corrected before they are returned to the user */
2699 st->cur_dts = 0;
2700 st->first_dts = AV_NOPTS_VALUE;
2701 st->probe_packets = MAX_PROBE_PACKETS;
2702
2703 /* default pts setting is MPEG-like */
2704 avpriv_set_pts_info(st, 33, 1, 90000);
2705 st->last_IP_pts = AV_NOPTS_VALUE;
2706 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2707 st->pts_buffer[i] = AV_NOPTS_VALUE;
2708
2709 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2710
2711 st->info->fps_first_dts = AV_NOPTS_VALUE;
2712 st->info->fps_last_dts = AV_NOPTS_VALUE;
2713
2714 s->streams[s->nb_streams++] = st;
2715 return st;
2716 }
2717
2718 AVProgram *av_new_program(AVFormatContext *ac, int id)
2719 {
2720 AVProgram *program = NULL;
2721 int i;
2722
2723 av_dlog(ac, "new_program: id=0x%04x\n", id);
2724
2725 for (i = 0; i < ac->nb_programs; i++)
2726 if (ac->programs[i]->id == id)
2727 program = ac->programs[i];
2728
2729 if (!program) {
2730 program = av_mallocz(sizeof(AVProgram));
2731 if (!program)
2732 return NULL;
2733 dynarray_add(&ac->programs, &ac->nb_programs, program);
2734 program->discard = AVDISCARD_NONE;
2735 }
2736 program->id = id;
2737
2738 return program;
2739 }
2740
2741 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2742 int64_t start, int64_t end, const char *title)
2743 {
2744 AVChapter *chapter = NULL;
2745 int i;
2746
2747 for (i = 0; i < s->nb_chapters; i++)
2748 if (s->chapters[i]->id == id)
2749 chapter = s->chapters[i];
2750
2751 if (!chapter) {
2752 chapter = av_mallocz(sizeof(AVChapter));
2753 if (!chapter)
2754 return NULL;
2755 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2756 }
2757 av_dict_set(&chapter->metadata, "title", title, 0);
2758 chapter->id = id;
2759 chapter->time_base = time_base;
2760 chapter->start = start;
2761 chapter->end = end;
2762
2763 return chapter;
2764 }
2765
2766 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2767 {
2768 int i, j;
2769 AVProgram *program = NULL;
2770
2771 if (idx >= ac->nb_streams) {
2772 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2773 return;
2774 }
2775
2776 for (i = 0; i < ac->nb_programs; i++) {
2777 if (ac->programs[i]->id != progid)
2778 continue;
2779 program = ac->programs[i];
2780 for (j = 0; j < program->nb_stream_indexes; j++)
2781 if (program->stream_index[j] == idx)
2782 return;
2783
2784 if (av_reallocp_array(&program->stream_index,
2785 program->nb_stream_indexes + 1,
2786 sizeof(*program->stream_index)) < 0) {
2787 program->nb_stream_indexes = 0;
2788 return;
2789 }
2790 program->stream_index[program->nb_stream_indexes++] = idx;
2791 return;
2792 }
2793 }
2794
2795 static void print_fps(double d, const char *postfix)
2796 {
2797 uint64_t v = lrintf(d * 100);
2798 if (v % 100)
2799 av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2800 else if (v % (100 * 1000))
2801 av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2802 else
2803 av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d / 1000, postfix);
2804 }
2805
2806 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
2807 {
2808 if (m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))) {
2809 AVDictionaryEntry *tag = NULL;
2810
2811 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
2812 while ((tag = av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX)))
2813 if (strcmp("language", tag->key))
2814 av_log(ctx, AV_LOG_INFO,
2815 "%s %-16s: %s\n", indent, tag->key, tag->value);
2816 }
2817 }
2818
2819 /* "user interface" functions */
2820 static void dump_stream_format(AVFormatContext *ic, int i,
2821 int index, int is_output)
2822 {
2823 char buf[256];
2824 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2825 AVStream *st = ic->streams[i];
2826 int g = av_gcd(st->time_base.num, st->time_base.den);
2827 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
2828 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2829 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2830 /* the pid is an important information, so we display it */
2831 /* XXX: add a generic system */
2832 if (flags & AVFMT_SHOW_IDS)
2833 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2834 if (lang)
2835 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2836 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames,
2837 st->time_base.num / g, st->time_base.den / g);
2838 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2839 if (st->sample_aspect_ratio.num && // default
2840 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2841 AVRational display_aspect_ratio;
2842 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2843 st->codec->width * st->sample_aspect_ratio.num,
2844 st->codec->height * st->sample_aspect_ratio.den,
2845 1024 * 1024);
2846 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2847 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2848 display_aspect_ratio.num, display_aspect_ratio.den);
2849 }
2850 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2851 if (st->avg_frame_rate.den && st->avg_frame_rate.num)
2852 print_fps(av_q2d(st->avg_frame_rate), "fps");
2853 if (st->time_base.den && st->time_base.num)
2854 print_fps(1 / av_q2d(st->time_base), "tbn");
2855 if (st->codec->time_base.den && st->codec->time_base.num)
2856 print_fps(1 / av_q2d(st->codec->time_base), "tbc");
2857 }
2858 if (st->disposition & AV_DISPOSITION_DEFAULT)
2859 av_log(NULL, AV_LOG_INFO, " (default)");
2860 if (st->disposition & AV_DISPOSITION_DUB)
2861 av_log(NULL, AV_LOG_INFO, " (dub)");
2862 if (st->disposition & AV_DISPOSITION_ORIGINAL)
2863 av_log(NULL, AV_LOG_INFO, " (original)");
2864 if (st->disposition & AV_DISPOSITION_COMMENT)
2865 av_log(NULL, AV_LOG_INFO, " (comment)");
2866 if (st->disposition & AV_DISPOSITION_LYRICS)
2867 av_log(NULL, AV_LOG_INFO, " (lyrics)");
2868 if (st->disposition & AV_DISPOSITION_KARAOKE)
2869 av_log(NULL, AV_LOG_INFO, " (karaoke)");
2870 if (st->disposition & AV_DISPOSITION_FORCED)
2871 av_log(NULL, AV_LOG_INFO, " (forced)");
2872 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
2873 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
2874 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
2875 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
2876 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
2877 av_log(NULL, AV_LOG_INFO, " (clean effects)");
2878 av_log(NULL, AV_LOG_INFO, "\n");
2879 dump_metadata(NULL, st->metadata, " ");
2880 }
2881
2882 void av_dump_format(AVFormatContext *ic, int index,
2883 const char *url, int is_output)
2884 {
2885 int i;
2886 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
2887 if (ic->nb_streams && !printed)
2888 return;
2889
2890 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2891 is_output ? "Output" : "Input",
2892 index,
2893 is_output ? ic->oformat->name : ic->iformat->name,
2894 is_output ? "to" : "from", url);
2895 dump_metadata(NULL, ic->metadata, " ");
2896 if (!is_output) {
2897 av_log(NULL, AV_LOG_INFO, " Duration: ");
2898 if (ic->duration != AV_NOPTS_VALUE) {
2899 int hours, mins, secs, us;
2900 secs = ic->duration / AV_TIME_BASE;
2901 us = ic->duration % AV_TIME_BASE;
2902 mins = secs / 60;
2903 secs %= 60;
2904 hours = mins / 60;
2905 mins %= 60;
2906 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2907 (100 * us) / AV_TIME_BASE);
2908 } else {
2909 av_log(NULL, AV_LOG_INFO, "N/A");
2910 }
2911 if (ic->start_time != AV_NOPTS_VALUE) {
2912 int secs, us;
2913 av_log(NULL, AV_LOG_INFO, ", start: ");
2914 secs = ic->start_time / AV_TIME_BASE;
2915 us = abs(ic->start_time % AV_TIME_BASE);
2916 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2917 secs, (int) av_rescale(us, 1000000, AV_TIME_BASE));
2918 }
2919 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2920 if (ic->bit_rate)
2921 av_log(NULL, AV_LOG_INFO, "%d kb/s", ic->bit_rate / 1000);
2922 else
2923 av_log(NULL, AV_LOG_INFO, "N/A");
2924 av_log(NULL, AV_LOG_INFO, "\n");
2925 }
2926 for (i = 0; i < ic->nb_chapters; i++) {
2927 AVChapter *ch = ic->chapters[i];
2928 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
2929 av_log(NULL, AV_LOG_INFO,
2930 "start %f, ", ch->start * av_q2d(ch->time_base));
2931 av_log(NULL, AV_LOG_INFO,
2932 "end %f\n", ch->end * av_q2d(ch->time_base));
2933
2934 dump_metadata(NULL, ch->metadata, " ");
2935 }
2936 if (ic->nb_programs) {
2937 int j, k, total = 0;
2938 for (j = 0; j < ic->nb_programs; j++) {
2939 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
2940 "name", NULL, 0);
2941 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2942 name ? name->value : "");
2943 dump_metadata(NULL, ic->programs[j]->metadata, " ");
2944 for (k = 0; k < ic->programs[j]->nb_stream_indexes; k++) {
2945 dump_stream_format(ic, ic->programs[j]->stream_index[k],
2946 index, is_output);
2947 printed[ic->programs[j]->stream_index[k]] = 1;
2948 }
2949 total += ic->programs[j]->nb_stream_indexes;
2950 }
2951 if (total < ic->nb_streams)
2952 av_log(NULL, AV_LOG_INFO, " No Program\n");
2953 }
2954 for (i = 0; i < ic->nb_streams; i++)
2955 if (!printed[i])
2956 dump_stream_format(ic, i, index, is_output);
2957
2958 av_free(printed);
2959 }
2960
2961 uint64_t ff_ntp_time(void)
2962 {
2963 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2964 }
2965
2966 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2967 {
2968 const char *p;
2969 char *q, buf1[20], c;
2970 int nd, len, percentd_found;
2971
2972 q = buf;
2973 p = path;
2974 percentd_found = 0;
2975 for (;;) {
2976 c = *p++;
2977 if (c == '\0')
2978 break;
2979 if (c == '%') {
2980 do {
2981 nd = 0;
2982 while (av_isdigit(*p))
2983 nd = nd * 10 + *p++ - '0';
2984 c = *p++;
2985 } while (av_isdigit(c));
2986
2987 switch (c) {
2988 case '%':
2989 goto addchar;
2990 case 'd':
2991 if (percentd_found)
2992 goto fail;
2993 percentd_found = 1;
2994 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2995 len = strlen(buf1);
2996 if ((q - buf + len) > buf_size - 1)
2997 goto fail;
2998 memcpy(q, buf1, len);
2999 q += len;
3000 break;
3001 default:
3002 goto fail;
3003 }
3004 } else {
3005 addchar:
3006 if ((q - buf) < buf_size - 1)
3007 *q++ = c;
3008 }
3009 }
3010 if (!percentd_found)
3011 goto fail;
3012 *q = '\0';
3013 return 0;
3014 fail:
3015 *q = '\0';
3016 return -1;
3017 }
3018
3019 #define HEXDUMP_PRINT(...) \
3020 do { \
3021 if (!f) \
3022 av_log(avcl, level, __VA_ARGS__); \
3023 else \
3024 fprintf(f, __VA_ARGS__); \
3025 } while (0)
3026
3027 static void hex_dump_internal(void *avcl, FILE *f, int level,
3028 const uint8_t *buf, int size)
3029 {
3030 int len, i, j, c;
3031
3032 for (i = 0; i < size; i += 16) {
3033 len = size - i;
3034 if (len > 16)
3035 len = 16;
3036 HEXDUMP_PRINT("%08x ", i);
3037 for (j = 0; j < 16; j++) {
3038 if (j < len)
3039 HEXDUMP_PRINT(" %02x", buf[i + j]);
3040 else
3041 HEXDUMP_PRINT(" ");
3042 }
3043 HEXDUMP_PRINT(" ");
3044 for (j = 0; j < len; j++) {
3045 c = buf[i + j];
3046 if (c < ' ' || c > '~')
3047 c = '.';
3048 HEXDUMP_PRINT("%c", c);
3049 }
3050 HEXDUMP_PRINT("\n");
3051 }
3052 }
3053
3054 void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3055 {
3056 hex_dump_internal(NULL, f, 0, buf, size);
3057 }
3058
3059 void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3060 {
3061 hex_dump_internal(avcl, NULL, level, buf, size);
3062 }
3063
3064 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt,
3065 int dump_payload, AVRational time_base)
3066 {
3067 HEXDUMP_PRINT("stream #%d:\n", pkt->stream_index);
3068 HEXDUMP_PRINT(" keyframe=%d\n", (pkt->flags & AV_PKT_FLAG_KEY) != 0);
3069 HEXDUMP_PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3070 /* DTS is _always_ valid after av_read_frame() */
3071 HEXDUMP_PRINT(" dts=");
3072 if (pkt->dts == AV_NOPTS_VALUE)
3073 HEXDUMP_PRINT("N/A");
3074 else
3075 HEXDUMP_PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3076 /* PTS may not be known if B-frames are present. */
3077 HEXDUMP_PRINT(" pts=");
3078 if (pkt->pts == AV_NOPTS_VALUE)
3079 HEXDUMP_PRINT("N/A");
3080 else
3081 HEXDUMP_PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3082 HEXDUMP_PRINT("\n");
3083 HEXDUMP_PRINT(" size=%d\n", pkt->size);
3084 if (dump_payload)
3085 av_hex_dump(f, pkt->data, pkt->size);
3086 }
3087
3088 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3089 {
3090 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3091 }
3092
3093 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3094 AVStream *st)
3095 {
3096 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3097 }
3098
3099 void av_url_split(char *proto, int proto_size,
3100 char *authorization, int authorization_size,
3101 char *hostname, int hostname_size,
3102 int *port_ptr, char *path, int path_size, const char *url)
3103 {
3104 const char *p, *ls, *at, *col, *brk;
3105
3106 if (port_ptr)
3107 *port_ptr = -1;
3108 if (proto_size > 0)
3109 proto[0] = 0;
3110 if (authorization_size > 0