3f400e33a75d263200dcc1d8119e5081fac42436
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #undef NDEBUG
23 #include <assert.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26
27 #include "config.h"
28
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
38
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
41
42 #include "audiointerleave.h"
43 #include "avformat.h"
44 #include "avio_internal.h"
45 #include "id3v2.h"
46 #include "internal.h"
47 #include "metadata.h"
48 #if CONFIG_NETWORK
49 #include "network.h"
50 #endif
51 #include "riff.h"
52 #include "url.h"
53
54 /**
55 * @file
56 * various utility functions for use within Libav
57 */
58
59 unsigned avformat_version(void)
60 {
61 return LIBAVFORMAT_VERSION_INT;
62 }
63
64 const char *avformat_configuration(void)
65 {
66 return LIBAV_CONFIGURATION;
67 }
68
69 const char *avformat_license(void)
70 {
71 #define LICENSE_PREFIX "libavformat license: "
72 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
73 }
74
75 /* an arbitrarily chosen "sane" max packet size -- 50M */
76 #define SANE_CHUNK_SIZE (50000000)
77
78 /* Read the data in sane-sized chunks and append to pkt.
79 * Return the number of bytes read or an error. */
80 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 {
82 int64_t chunk_size = size;
83 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
84 int orig_size = pkt->size;
85 int ret = 0;
86
87 do {
88 int prev_size = pkt->size;
89 int read_size;
90
91 /* When the caller requests a lot of data, limit it to the amount
92 * left in file or SANE_CHUNK_SIZE when it is not known. */
93 if (size > SANE_CHUNK_SIZE) {
94 int64_t filesize = avio_size(s) - avio_tell(s);
95 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 }
97 read_size = FFMIN(size, chunk_size);
98
99 ret = av_grow_packet(pkt, read_size);
100 if (ret < 0)
101 break;
102
103 ret = avio_read(s, pkt->data + prev_size, read_size);
104 if (ret != read_size) {
105 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
106 break;
107 }
108
109 size -= read_size;
110 } while (size > 0);
111
112 pkt->pos = orig_pos;
113 if (!pkt->size)
114 av_free_packet(pkt);
115 return pkt->size > orig_size ? pkt->size - orig_size : ret;
116 }
117
118 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
119 {
120 av_init_packet(pkt);
121 pkt->data = NULL;
122 pkt->size = 0;
123 pkt->pos = avio_tell(s);
124
125 return append_packet_chunked(s, pkt, size);
126 }
127
128 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
129 {
130 if (!pkt->size)
131 return av_get_packet(s, pkt, size);
132 return append_packet_chunked(s, pkt, size);
133 }
134
135 int av_filename_number_test(const char *filename)
136 {
137 char buf[1024];
138 return filename &&
139 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
140 }
141
142 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened,
143 int *score_max)
144 {
145 AVProbeData lpd = *pd;
146 AVInputFormat *fmt1 = NULL, *fmt;
147 int score, id3 = 0;
148
149 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
150 int id3len = ff_id3v2_tag_len(lpd.buf);
151 if (lpd.buf_size > id3len + 16) {
152 lpd.buf += id3len;
153 lpd.buf_size -= id3len;
154 }
155 id3 = 1;
156 }
157
158 fmt = NULL;
159 while ((fmt1 = av_iformat_next(fmt1))) {
160 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
161 continue;
162 score = 0;
163 if (fmt1->read_probe) {
164 score = fmt1->read_probe(&lpd);
165 } else if (fmt1->extensions) {
166 if (av_match_ext(lpd.filename, fmt1->extensions))
167 score = AVPROBE_SCORE_EXTENSION;
168 }
169 if (score > *score_max) {
170 *score_max = score;
171 fmt = fmt1;
172 } else if (score == *score_max)
173 fmt = NULL;
174 }
175
176 // A hack for files with huge id3v2 tags -- try to guess by file extension.
177 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_EXTENSION / 2) {
178 while ((fmt = av_iformat_next(fmt)))
179 if (fmt->extensions &&
180 av_match_ext(lpd.filename, fmt->extensions)) {
181 *score_max = AVPROBE_SCORE_EXTENSION / 2;
182 break;
183 }
184 }
185
186 if (!fmt && id3 && *score_max < AVPROBE_SCORE_EXTENSION / 2 - 1) {
187 while ((fmt = av_iformat_next(fmt)))
188 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
189 *score_max = AVPROBE_SCORE_EXTENSION / 2 - 1;
190 break;
191 }
192 }
193
194 return fmt;
195 }
196
197 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
198 {
199 int score = 0;
200 return av_probe_input_format2(pd, is_opened, &score);
201 }
202
203 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
204 AVProbeData *pd, int score)
205 {
206 static const struct {
207 const char *name;
208 enum AVCodecID id;
209 enum AVMediaType type;
210 } fmt_id_type[] = {
211 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
212 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
213 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
214 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
215 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
216 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
217 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
218 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
219 { 0 }
220 };
221 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
222
223 if (fmt) {
224 int i;
225 av_log(s, AV_LOG_DEBUG,
226 "Probe with size=%d, packets=%d detected %s with score=%d\n",
227 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
228 fmt->name, score);
229 for (i = 0; fmt_id_type[i].name; i++) {
230 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
231 st->codec->codec_id = fmt_id_type[i].id;
232 st->codec->codec_type = fmt_id_type[i].type;
233 break;
234 }
235 }
236 }
237 return !!fmt;
238 }
239
240 /************************************************************/
241 /* input media file */
242
243 /** size of probe buffer, for guessing file type from file contents */
244 #define PROBE_BUF_MIN 2048
245 #define PROBE_BUF_MAX (1 << 20)
246
247 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
248 const char *filename, void *logctx,
249 unsigned int offset, unsigned int max_probe_size)
250 {
251 AVProbeData pd = { filename ? filename : "" };
252 uint8_t *buf = NULL;
253 int ret = 0, probe_size;
254
255 if (!max_probe_size)
256 max_probe_size = PROBE_BUF_MAX;
257 else if (max_probe_size > PROBE_BUF_MAX)
258 max_probe_size = PROBE_BUF_MAX;
259 else if (max_probe_size < PROBE_BUF_MIN)
260 return AVERROR(EINVAL);
261
262 if (offset >= max_probe_size)
263 return AVERROR(EINVAL);
264 avio_skip(pb, offset);
265 max_probe_size -= offset;
266
267 for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt;
268 probe_size = FFMIN(probe_size << 1,
269 FFMAX(max_probe_size, probe_size + 1))) {
270 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX / 4 : 0;
271
272 /* Read probe data. */
273 if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
274 return ret;
275 if ((ret = avio_read(pb, buf + pd.buf_size,
276 probe_size - pd.buf_size)) < 0) {
277 /* Fail if error was not end of file, otherwise, lower score. */
278 if (ret != AVERROR_EOF) {
279 av_free(buf);
280 return ret;
281 }
282 score = 0;
283 ret = 0; /* error was end of file, nothing read */
284 }
285 pd.buf_size += ret;
286 pd.buf = buf;
287
288 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
289
290 /* Guess file format. */
291 *fmt = av_probe_input_format2(&pd, 1, &score);
292 if (*fmt) {
293 /* This can only be true in the last iteration. */
294 if (score <= AVPROBE_SCORE_MAX / 4) {
295 av_log(logctx, AV_LOG_WARNING,
296 "Format detected only with low score of %d, "
297 "misdetection possible!\n", score);
298 } else
299 av_log(logctx, AV_LOG_DEBUG,
300 "Probed with size=%d and score=%d\n", probe_size, score);
301 }
302 }
303
304 if (!*fmt) {
305 av_free(buf);
306 return AVERROR_INVALIDDATA;
307 }
308
309 /* Rewind. Reuse probe buffer to avoid seeking. */
310 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
311 av_free(buf);
312
313 return ret;
314 }
315
316 /* Open input file and probe the format if necessary. */
317 static int init_input(AVFormatContext *s, const char *filename,
318 AVDictionary **options)
319 {
320 int ret;
321 AVProbeData pd = { filename, NULL, 0 };
322
323 if (s->pb) {
324 s->flags |= AVFMT_FLAG_CUSTOM_IO;
325 if (!s->iformat)
326 return av_probe_input_buffer(s->pb, &s->iformat, filename,
327 s, 0, s->probesize);
328 else if (s->iformat->flags & AVFMT_NOFILE)
329 return AVERROR(EINVAL);
330 return 0;
331 }
332
333 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
334 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
335 return 0;
336
337 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
338 &s->interrupt_callback, options)) < 0)
339 return ret;
340 if (s->iformat)
341 return 0;
342 return av_probe_input_buffer(s->pb, &s->iformat, filename,
343 s, 0, s->probesize);
344 }
345
346 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
347 AVPacketList **plast_pktl)
348 {
349 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
350 if (!pktl)
351 return NULL;
352
353 if (*packet_buffer)
354 (*plast_pktl)->next = pktl;
355 else
356 *packet_buffer = pktl;
357
358 /* Add the packet in the buffered packet list. */
359 *plast_pktl = pktl;
360 pktl->pkt = *pkt;
361 return &pktl->pkt;
362 }
363
364 static int queue_attached_pictures(AVFormatContext *s)
365 {
366 int i;
367 for (i = 0; i < s->nb_streams; i++)
368 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
369 s->streams[i]->discard < AVDISCARD_ALL) {
370 AVPacket copy = s->streams[i]->attached_pic;
371 copy.buf = av_buffer_ref(copy.buf);
372 if (!copy.buf)
373 return AVERROR(ENOMEM);
374
375 add_to_pktbuf(&s->raw_packet_buffer, &copy,
376 &s->raw_packet_buffer_end);
377 }
378 return 0;
379 }
380
381 int avformat_open_input(AVFormatContext **ps, const char *filename,
382 AVInputFormat *fmt, AVDictionary **options)
383 {
384 AVFormatContext *s = *ps;
385 int ret = 0;
386 AVDictionary *tmp = NULL;
387 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
388
389 if (!s && !(s = avformat_alloc_context()))
390 return AVERROR(ENOMEM);
391 if (fmt)
392 s->iformat = fmt;
393
394 if (options)
395 av_dict_copy(&tmp, *options, 0);
396
397 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
398 goto fail;
399
400 if ((ret = init_input(s, filename, &tmp)) < 0)
401 goto fail;
402
403 /* Check filename in case an image number is expected. */
404 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
405 if (!av_filename_number_test(filename)) {
406 ret = AVERROR(EINVAL);
407 goto fail;
408 }
409 }
410
411 s->duration = s->start_time = AV_NOPTS_VALUE;
412 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
413
414 /* Allocate private data. */
415 if (s->iformat->priv_data_size > 0) {
416 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
417 ret = AVERROR(ENOMEM);
418 goto fail;
419 }
420 if (s->iformat->priv_class) {
421 *(const AVClass **) s->priv_data = s->iformat->priv_class;
422 av_opt_set_defaults(s->priv_data);
423 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
424 goto fail;
425 }
426 }
427
428 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
429 if (s->pb)
430 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
431
432 if (s->iformat->read_header)
433 if ((ret = s->iformat->read_header(s)) < 0)
434 goto fail;
435
436 if (id3v2_extra_meta &&
437 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
438 goto fail;
439 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
440
441 if ((ret = queue_attached_pictures(s)) < 0)
442 goto fail;
443
444 if (s->pb && !s->data_offset)
445 s->data_offset = avio_tell(s->pb);
446
447 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
448
449 if (options) {
450 av_dict_free(options);
451 *options = tmp;
452 }
453 *ps = s;
454 return 0;
455
456 fail:
457 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
458 av_dict_free(&tmp);
459 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
460 avio_close(s->pb);
461 avformat_free_context(s);
462 *ps = NULL;
463 return ret;
464 }
465
466 /*******************************************************/
467
468 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
469 {
470 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
471 AVProbeData *pd = &st->probe_data;
472 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
473 --st->probe_packets;
474
475 if (pkt) {
476 int err;
477 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
478 AVPROBE_PADDING_SIZE)) < 0)
479 return err;
480 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
481 pd->buf_size += pkt->size;
482 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
483 } else {
484 st->probe_packets = 0;
485 if (!pd->buf_size) {
486 av_log(s, AV_LOG_ERROR,
487 "nothing to probe for stream %d\n", st->index);
488 return 0;
489 }
490 }
491
492 if (!st->probe_packets ||
493 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
494 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
495 ? AVPROBE_SCORE_MAX / 4 : 0);
496 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
497 pd->buf_size = 0;
498 av_freep(&pd->buf);
499 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
500 }
501 }
502 }
503 return 0;
504 }
505
506 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
507 {
508 int ret, i, err;
509 AVStream *st;
510
511 for (;;) {
512 AVPacketList *pktl = s->raw_packet_buffer;
513
514 if (pktl) {
515 *pkt = pktl->pkt;
516 st = s->streams[pkt->stream_index];
517 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
518 !st->probe_packets ||
519 s->raw_packet_buffer_remaining_size < pkt->size) {
520 AVProbeData *pd;
521 if (st->probe_packets)
522 if ((err = probe_codec(s, st, NULL)) < 0)
523 return err;
524 pd = &st->probe_data;
525 av_freep(&pd->buf);
526 pd->buf_size = 0;
527 s->raw_packet_buffer = pktl->next;
528 s->raw_packet_buffer_remaining_size += pkt->size;
529 av_free(pktl);
530 return 0;
531 }
532 }
533
534 pkt->data = NULL;
535 pkt->size = 0;
536 av_init_packet(pkt);
537 ret = s->iformat->read_packet(s, pkt);
538 if (ret < 0) {
539 if (!pktl || ret == AVERROR(EAGAIN))
540 return ret;
541 for (i = 0; i < s->nb_streams; i++) {
542 st = s->streams[i];
543 if (st->probe_packets)
544 if ((err = probe_codec(s, st, NULL)) < 0)
545 return err;
546 }
547 continue;
548 }
549
550 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
551 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
552 av_log(s, AV_LOG_WARNING,
553 "Dropped corrupted packet (stream = %d)\n",
554 pkt->stream_index);
555 av_free_packet(pkt);
556 continue;
557 }
558
559 st = s->streams[pkt->stream_index];
560
561 switch (st->codec->codec_type) {
562 case AVMEDIA_TYPE_VIDEO:
563 if (s->video_codec_id)
564 st->codec->codec_id = s->video_codec_id;
565 break;
566 case AVMEDIA_TYPE_AUDIO:
567 if (s->audio_codec_id)
568 st->codec->codec_id = s->audio_codec_id;
569 break;
570 case AVMEDIA_TYPE_SUBTITLE:
571 if (s->subtitle_codec_id)
572 st->codec->codec_id = s->subtitle_codec_id;
573 break;
574 }
575
576 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
577 !st->probe_packets))
578 return ret;
579
580 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
581 s->raw_packet_buffer_remaining_size -= pkt->size;
582
583 if ((err = probe_codec(s, st, pkt)) < 0)
584 return err;
585 }
586 }
587
588 /**********************************************************/
589
590 /**
591 * Get the number of samples of an audio frame. Return -1 on error.
592 */
593 int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
594 {
595 int frame_size;
596
597 /* give frame_size priority if demuxing */
598 if (!mux && enc->frame_size > 1)
599 return enc->frame_size;
600
601 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
602 return frame_size;
603
604 /* Fall back on using frame_size if muxing. */
605 if (enc->frame_size > 1)
606 return enc->frame_size;
607
608 return -1;
609 }
610
611 /**
612 * Return the frame duration in seconds. Return 0 if not available.
613 */
614 void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
615 AVCodecParserContext *pc, AVPacket *pkt)
616 {
617 int frame_size;
618
619 *pnum = 0;
620 *pden = 0;
621 switch (st->codec->codec_type) {
622 case AVMEDIA_TYPE_VIDEO:
623 if (st->avg_frame_rate.num) {
624 *pnum = st->avg_frame_rate.den;
625 *pden = st->avg_frame_rate.num;
626 } else if (st->time_base.num * 1000LL > st->time_base.den) {
627 *pnum = st->time_base.num;
628 *pden = st->time_base.den;
629 } else if (st->codec->time_base.num * 1000LL > st->codec->time_base.den) {
630 *pnum = st->codec->time_base.num;
631 *pden = st->codec->time_base.den;
632 if (pc && pc->repeat_pict) {
633 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
634 *pden /= 1 + pc->repeat_pict;
635 else
636 *pnum *= 1 + pc->repeat_pict;
637 }
638 /* If this codec can be interlaced or progressive then we need
639 * a parser to compute duration of a packet. Thus if we have
640 * no parser in such case leave duration undefined. */
641 if (st->codec->ticks_per_frame > 1 && !pc)
642 *pnum = *pden = 0;
643 }
644 break;
645 case AVMEDIA_TYPE_AUDIO:
646 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
647 if (frame_size <= 0 || st->codec->sample_rate <= 0)
648 break;
649 *pnum = frame_size;
650 *pden = st->codec->sample_rate;
651 break;
652 default:
653 break;
654 }
655 }
656
657 static int is_intra_only(enum AVCodecID id)
658 {
659 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
660 if (!d)
661 return 0;
662 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
663 return 0;
664 return 1;
665 }
666
667 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
668 int64_t dts, int64_t pts)
669 {
670 AVStream *st = s->streams[stream_index];
671 AVPacketList *pktl = s->packet_buffer;
672
673 if (st->first_dts != AV_NOPTS_VALUE ||
674 dts == AV_NOPTS_VALUE ||
675 st->cur_dts == AV_NOPTS_VALUE)
676 return;
677
678 st->first_dts = dts - st->cur_dts;
679 st->cur_dts = dts;
680
681 for (; pktl; pktl = pktl->next) {
682 if (pktl->pkt.stream_index != stream_index)
683 continue;
684 // FIXME: think more about this check
685 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
686 pktl->pkt.pts += st->first_dts;
687
688 if (pktl->pkt.dts != AV_NOPTS_VALUE)
689 pktl->pkt.dts += st->first_dts;
690
691 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
692 st->start_time = pktl->pkt.pts;
693 }
694 if (st->start_time == AV_NOPTS_VALUE)
695 st->start_time = pts;
696 }
697
698 static void update_initial_durations(AVFormatContext *s, AVStream *st,
699 int stream_index, int duration)
700 {
701 AVPacketList *pktl = s->packet_buffer;
702 int64_t cur_dts = 0;
703
704 if (st->first_dts != AV_NOPTS_VALUE) {
705 cur_dts = st->first_dts;
706 for (; pktl; pktl = pktl->next) {
707 if (pktl->pkt.stream_index == stream_index) {
708 if (pktl->pkt.pts != pktl->pkt.dts ||
709 pktl->pkt.dts != AV_NOPTS_VALUE ||
710 pktl->pkt.duration)
711 break;
712 cur_dts -= duration;
713 }
714 }
715 pktl = s->packet_buffer;
716 st->first_dts = cur_dts;
717 } else if (st->cur_dts)
718 return;
719
720 for (; pktl; pktl = pktl->next) {
721 if (pktl->pkt.stream_index != stream_index)
722 continue;
723 if (pktl->pkt.pts == pktl->pkt.dts &&
724 pktl->pkt.dts == AV_NOPTS_VALUE &&
725 !pktl->pkt.duration) {
726 pktl->pkt.dts = cur_dts;
727 if (!st->codec->has_b_frames)
728 pktl->pkt.pts = cur_dts;
729 cur_dts += duration;
730 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
731 pktl->pkt.duration = duration;
732 } else
733 break;
734 }
735 if (st->first_dts == AV_NOPTS_VALUE)
736 st->cur_dts = cur_dts;
737 }
738
739 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
740 AVCodecParserContext *pc, AVPacket *pkt)
741 {
742 int num, den, presentation_delayed, delay, i;
743 int64_t offset;
744
745 if (s->flags & AVFMT_FLAG_NOFILLIN)
746 return;
747
748 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
749 pkt->dts = AV_NOPTS_VALUE;
750
751 /* do we have a video B-frame ? */
752 delay = st->codec->has_b_frames;
753 presentation_delayed = 0;
754
755 /* XXX: need has_b_frame, but cannot get it if the codec is
756 * not initialized */
757 if (delay &&
758 pc && pc->pict_type != AV_PICTURE_TYPE_B)
759 presentation_delayed = 1;
760
761 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
762 st->pts_wrap_bits < 63 &&
763 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
764 pkt->dts -= 1LL << st->pts_wrap_bits;
765 }
766
767 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
768 * We take the conservative approach and discard both.
769 * Note: If this is misbehaving for an H.264 file, then possibly
770 * presentation_delayed is not set correctly. */
771 if (delay == 1 && pkt->dts == pkt->pts &&
772 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
773 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
774 pkt->dts = pkt->pts = AV_NOPTS_VALUE;
775 }
776
777 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
778 ff_compute_frame_duration(&num, &den, st, pc, pkt);
779 if (den && num) {
780 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
781 den * (int64_t) st->time_base.num,
782 AV_ROUND_DOWN);
783
784 if (pkt->duration != 0 && s->packet_buffer)
785 update_initial_durations(s, st, pkt->stream_index,
786 pkt->duration);
787 }
788 }
789
790 /* Correct timestamps with byte offset if demuxers only have timestamps
791 * on packet boundaries */
792 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
793 /* this will estimate bitrate based on this frame's duration and size */
794 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
795 if (pkt->pts != AV_NOPTS_VALUE)
796 pkt->pts += offset;
797 if (pkt->dts != AV_NOPTS_VALUE)
798 pkt->dts += offset;
799 }
800
801 /* This may be redundant, but it should not hurt. */
802 if (pkt->dts != AV_NOPTS_VALUE &&
803 pkt->pts != AV_NOPTS_VALUE &&
804 pkt->pts > pkt->dts)
805 presentation_delayed = 1;
806
807 av_dlog(NULL,
808 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
809 "cur_dts:%"PRId64" st:%d pc:%p\n",
810 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
811 pkt->stream_index, pc);
812 /* Interpolate PTS and DTS if they are not present. We skip H.264
813 * currently because delay and has_b_frames are not reliably set. */
814 if ((delay == 0 || (delay == 1 && pc)) &&
815 st->codec->codec_id != AV_CODEC_ID_H264) {
816 if (presentation_delayed) {
817 /* DTS = decompression timestamp */
818 /* PTS = presentation timestamp */
819 if (pkt->dts == AV_NOPTS_VALUE)
820 pkt->dts = st->last_IP_pts;
821 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
822 if (pkt->dts == AV_NOPTS_VALUE)
823 pkt->dts = st->cur_dts;
824
825 /* This is tricky: the dts must be incremented by the duration
826 * of the frame we are displaying, i.e. the last I- or P-frame. */
827 if (st->last_IP_duration == 0)
828 st->last_IP_duration = pkt->duration;
829 if (pkt->dts != AV_NOPTS_VALUE)
830 st->cur_dts = pkt->dts + st->last_IP_duration;
831 st->last_IP_duration = pkt->duration;
832 st->last_IP_pts = pkt->pts;
833 /* Cannot compute PTS if not present (we can compute it only
834 * by knowing the future. */
835 } else if (pkt->pts != AV_NOPTS_VALUE ||
836 pkt->dts != AV_NOPTS_VALUE ||
837 pkt->duration ||
838 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
839 int duration = pkt->duration;
840 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
841 ff_compute_frame_duration(&num, &den, st, pc, pkt);
842 if (den && num) {
843 duration = av_rescale_rnd(1,
844 num * (int64_t) st->time_base.den,
845 den * (int64_t) st->time_base.num,
846 AV_ROUND_DOWN);
847 if (duration != 0 && s->packet_buffer)
848 update_initial_durations(s, st, pkt->stream_index,
849 duration);
850 }
851 }
852
853 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
854 duration) {
855 /* presentation is not delayed : PTS and DTS are the same */
856 if (pkt->pts == AV_NOPTS_VALUE)
857 pkt->pts = pkt->dts;
858 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
859 pkt->pts);
860 if (pkt->pts == AV_NOPTS_VALUE)
861 pkt->pts = st->cur_dts;
862 pkt->dts = pkt->pts;
863 if (pkt->pts != AV_NOPTS_VALUE)
864 st->cur_dts = pkt->pts + duration;
865 }
866 }
867 }
868
869 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
870 st->pts_buffer[0] = pkt->pts;
871 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
872 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
873 if (pkt->dts == AV_NOPTS_VALUE)
874 pkt->dts = st->pts_buffer[0];
875 // We skipped it above so we try here.
876 if (st->codec->codec_id == AV_CODEC_ID_H264)
877 // This should happen on the first packet
878 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
879 if (pkt->dts > st->cur_dts)
880 st->cur_dts = pkt->dts;
881 }
882
883 av_dlog(NULL,
884 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
885 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
886
887 /* update flags */
888 if (is_intra_only(st->codec->codec_id))
889 pkt->flags |= AV_PKT_FLAG_KEY;
890 if (pc)
891 pkt->convergence_duration = pc->convergence_duration;
892 }
893
894 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
895 {
896 while (*pkt_buf) {
897 AVPacketList *pktl = *pkt_buf;
898 *pkt_buf = pktl->next;
899 av_free_packet(&pktl->pkt);
900 av_freep(&pktl);
901 }
902 *pkt_buf_end = NULL;
903 }
904
905 /**
906 * Parse a packet, add all split parts to parse_queue.
907 *
908 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
909 */
910 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
911 {
912 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
913 AVStream *st = s->streams[stream_index];
914 uint8_t *data = pkt ? pkt->data : NULL;
915 int size = pkt ? pkt->size : 0;
916 int ret = 0, got_output = 0;
917
918 if (!pkt) {
919 av_init_packet(&flush_pkt);
920 pkt = &flush_pkt;
921 got_output = 1;
922 }
923
924 while (size > 0 || (pkt == &flush_pkt && got_output)) {
925 int len;
926
927 av_init_packet(&out_pkt);
928 len = av_parser_parse2(st->parser, st->codec,
929 &out_pkt.data, &out_pkt.size, data, size,
930 pkt->pts, pkt->dts, pkt->pos);
931
932 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
933 /* increment read pointer */
934 data += len;
935 size -= len;
936
937 got_output = !!out_pkt.size;
938
939 if (!out_pkt.size)
940 continue;
941
942 if (pkt->side_data) {
943 out_pkt.side_data = pkt->side_data;
944 out_pkt.side_data_elems = pkt->side_data_elems;
945 pkt->side_data = NULL;
946 pkt->side_data_elems = 0;
947 }
948
949 /* set the duration */
950 out_pkt.duration = 0;
951 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
952 if (st->codec->sample_rate > 0) {
953 out_pkt.duration =
954 av_rescale_q_rnd(st->parser->duration,
955 (AVRational) { 1, st->codec->sample_rate },
956 st->time_base,
957 AV_ROUND_DOWN);
958 }
959 } else if (st->codec->time_base.num != 0 &&
960 st->codec->time_base.den != 0) {
961 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
962 st->codec->time_base,
963 st->time_base,
964 AV_ROUND_DOWN);
965 }
966
967 out_pkt.stream_index = st->index;
968 out_pkt.pts = st->parser->pts;
969 out_pkt.dts = st->parser->dts;
970 out_pkt.pos = st->parser->pos;
971
972 if (st->parser->key_frame == 1 ||
973 (st->parser->key_frame == -1 &&
974 st->parser->pict_type == AV_PICTURE_TYPE_I))
975 out_pkt.flags |= AV_PKT_FLAG_KEY;
976
977 compute_pkt_fields(s, st, st->parser, &out_pkt);
978
979 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
980 out_pkt.flags & AV_PKT_FLAG_KEY) {
981 ff_reduce_index(s, st->index);
982 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
983 0, 0, AVINDEX_KEYFRAME);
984 }
985
986 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
987 out_pkt.buf = pkt->buf;
988 pkt->buf = NULL;
989 #if FF_API_DESTRUCT_PACKET
990 FF_DISABLE_DEPRECATION_WARNINGS
991 out_pkt.destruct = pkt->destruct;
992 pkt->destruct = NULL;
993 FF_ENABLE_DEPRECATION_WARNINGS
994 #endif
995 }
996 if ((ret = av_dup_packet(&out_pkt)) < 0)
997 goto fail;
998
999 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1000 av_free_packet(&out_pkt);
1001 ret = AVERROR(ENOMEM);
1002 goto fail;
1003 }
1004 }
1005
1006 /* end of the stream => close and free the parser */
1007 if (pkt == &flush_pkt) {
1008 av_parser_close(st->parser);
1009 st->parser = NULL;
1010 }
1011
1012 fail:
1013 av_free_packet(pkt);
1014 return ret;
1015 }
1016
1017 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1018 AVPacketList **pkt_buffer_end,
1019 AVPacket *pkt)
1020 {
1021 AVPacketList *pktl;
1022 av_assert0(*pkt_buffer);
1023 pktl = *pkt_buffer;
1024 *pkt = pktl->pkt;
1025 *pkt_buffer = pktl->next;
1026 if (!pktl->next)
1027 *pkt_buffer_end = NULL;
1028 av_freep(&pktl);
1029 return 0;
1030 }
1031
1032 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1033 {
1034 int ret = 0, i, got_packet = 0;
1035
1036 av_init_packet(pkt);
1037
1038 while (!got_packet && !s->parse_queue) {
1039 AVStream *st;
1040 AVPacket cur_pkt;
1041
1042 /* read next packet */
1043 ret = ff_read_packet(s, &cur_pkt);
1044 if (ret < 0) {
1045 if (ret == AVERROR(EAGAIN))
1046 return ret;
1047 /* flush the parsers */
1048 for (i = 0; i < s->nb_streams; i++) {
1049 st = s->streams[i];
1050 if (st->parser && st->need_parsing)
1051 parse_packet(s, NULL, st->index);
1052 }
1053 /* all remaining packets are now in parse_queue =>
1054 * really terminate parsing */
1055 break;
1056 }
1057 ret = 0;
1058 st = s->streams[cur_pkt.stream_index];
1059
1060 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1061 cur_pkt.dts != AV_NOPTS_VALUE &&
1062 cur_pkt.pts < cur_pkt.dts) {
1063 av_log(s, AV_LOG_WARNING,
1064 "Invalid timestamps stream=%d, pts=%"PRId64", "
1065 "dts=%"PRId64", size=%d\n",
1066 cur_pkt.stream_index, cur_pkt.pts,
1067 cur_pkt.dts, cur_pkt.size);
1068 }
1069 if (s->debug & FF_FDEBUG_TS)
1070 av_log(s, AV_LOG_DEBUG,
1071 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
1072 "size=%d, duration=%d, flags=%d\n",
1073 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
1074 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
1075
1076 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1077 st->parser = av_parser_init(st->codec->codec_id);
1078 if (!st->parser)
1079 /* no parser available: just output the raw packets */
1080 st->need_parsing = AVSTREAM_PARSE_NONE;
1081 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
1082 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1083 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
1084 st->parser->flags |= PARSER_FLAG_ONCE;
1085 }
1086
1087 if (!st->need_parsing || !st->parser) {
1088 /* no parsing needed: we just output the packet as is */
1089 *pkt = cur_pkt;
1090 compute_pkt_fields(s, st, NULL, pkt);
1091 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1092 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1093 ff_reduce_index(s, st->index);
1094 av_add_index_entry(st, pkt->pos, pkt->dts,
1095 0, 0, AVINDEX_KEYFRAME);
1096 }
1097 got_packet = 1;
1098 } else if (st->discard < AVDISCARD_ALL) {
1099 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1100 return ret;
1101 } else {
1102 /* free packet */
1103 av_free_packet(&cur_pkt);
1104 }
1105 }
1106
1107 if (!got_packet && s->parse_queue)
1108 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1109
1110 if (s->debug & FF_FDEBUG_TS)
1111 av_log(s, AV_LOG_DEBUG,
1112 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
1113 "size=%d, duration=%d, flags=%d\n",
1114 pkt->stream_index, pkt->pts, pkt->dts,
1115 pkt->size, pkt->duration, pkt->flags);
1116
1117 return ret;
1118 }
1119
1120 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1121 {
1122 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1123 int eof = 0;
1124
1125 if (!genpts)
1126 return s->packet_buffer
1127 ? read_from_packet_buffer(&s->packet_buffer,
1128 &s->packet_buffer_end, pkt)
1129 : read_frame_internal(s, pkt);
1130
1131 for (;;) {
1132 int ret;
1133 AVPacketList *pktl = s->packet_buffer;
1134
1135 if (pktl) {
1136 AVPacket *next_pkt = &pktl->pkt;
1137
1138 if (next_pkt->dts != AV_NOPTS_VALUE) {
1139 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1140 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1141 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1142 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1143 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1144 // not B-frame
1145 next_pkt->pts = pktl->pkt.dts;
1146 }
1147 pktl = pktl->next;
1148 }
1149 pktl = s->packet_buffer;
1150 }
1151
1152 /* read packet from packet buffer, if there is data */
1153 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1154 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1155 return read_from_packet_buffer(&s->packet_buffer,
1156 &s->packet_buffer_end, pkt);
1157 }
1158
1159 ret = read_frame_internal(s, pkt);
1160 if (ret < 0) {
1161 if (pktl && ret != AVERROR(EAGAIN)) {
1162 eof = 1;
1163 continue;
1164 } else
1165 return ret;
1166 }
1167
1168 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1169 &s->packet_buffer_end)) < 0)
1170 return AVERROR(ENOMEM);
1171 }
1172 }
1173
1174 /* XXX: suppress the packet queue */
1175 static void flush_packet_queue(AVFormatContext *s)
1176 {
1177 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1178 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1179 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1180
1181 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1182 }
1183
1184 /*******************************************************/
1185 /* seek support */
1186
1187 int av_find_default_stream_index(AVFormatContext *s)
1188 {
1189 int first_audio_index = -1;
1190 int i;
1191 AVStream *st;
1192
1193 if (s->nb_streams <= 0)
1194 return -1;
1195 for (i = 0; i < s->nb_streams; i++) {
1196 st = s->streams[i];
1197 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1198 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1199 return i;
1200 }
1201 if (first_audio_index < 0 &&
1202 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1203 first_audio_index = i;
1204 }
1205 return first_audio_index >= 0 ? first_audio_index : 0;
1206 }
1207
1208 /** Flush the frame reader. */
1209 void ff_read_frame_flush(AVFormatContext *s)
1210 {
1211 AVStream *st;
1212 int i, j;
1213
1214 flush_packet_queue(s);
1215
1216 /* Reset read state for each stream. */
1217 for (i = 0; i < s->nb_streams; i++) {
1218 st = s->streams[i];
1219
1220 if (st->parser) {
1221 av_parser_close(st->parser);
1222 st->parser = NULL;
1223 }
1224 st->last_IP_pts = AV_NOPTS_VALUE;
1225 /* We set the current DTS to an unspecified origin. */
1226 st->cur_dts = AV_NOPTS_VALUE;
1227
1228 st->probe_packets = MAX_PROBE_PACKETS;
1229
1230 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1231 st->pts_buffer[j] = AV_NOPTS_VALUE;
1232 }
1233 }
1234
1235 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1236 {
1237 int i;
1238
1239 for (i = 0; i < s->nb_streams; i++) {
1240 AVStream *st = s->streams[i];
1241
1242 st->cur_dts =
1243 av_rescale(timestamp,
1244 st->time_base.den * (int64_t) ref_st->time_base.num,
1245 st->time_base.num * (int64_t) ref_st->time_base.den);
1246 }
1247 }
1248
1249 void ff_reduce_index(AVFormatContext *s, int stream_index)
1250 {
1251 AVStream *st = s->streams[stream_index];
1252 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1253
1254 if ((unsigned) st->nb_index_entries >= max_entries) {
1255 int i;
1256 for (i = 0; 2 * i < st->nb_index_entries; i++)
1257 st->index_entries[i] = st->index_entries[2 * i];
1258 st->nb_index_entries = i;
1259 }
1260 }
1261
1262 int ff_add_index_entry(AVIndexEntry **index_entries,
1263 int *nb_index_entries,
1264 unsigned int *index_entries_allocated_size,
1265 int64_t pos, int64_t timestamp,
1266 int size, int distance, int flags)
1267 {
1268 AVIndexEntry *entries, *ie;
1269 int index;
1270
1271 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1272 return -1;
1273
1274 entries = av_fast_realloc(*index_entries,
1275 index_entries_allocated_size,
1276 (*nb_index_entries + 1) *
1277 sizeof(AVIndexEntry));
1278 if (!entries)
1279 return -1;
1280
1281 *index_entries = entries;
1282
1283 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1284 timestamp, AVSEEK_FLAG_ANY);
1285
1286 if (index < 0) {
1287 index = (*nb_index_entries)++;
1288 ie = &entries[index];
1289 assert(index == 0 || ie[-1].timestamp < timestamp);
1290 } else {
1291 ie = &entries[index];
1292 if (ie->timestamp != timestamp) {
1293 if (ie->timestamp <= timestamp)
1294 return -1;
1295 memmove(entries + index + 1, entries + index,
1296 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1297 (*nb_index_entries)++;
1298 } else if (ie->pos == pos && distance < ie->min_distance)
1299 // do not reduce the distance
1300 distance = ie->min_distance;
1301 }
1302
1303 ie->pos = pos;
1304 ie->timestamp = timestamp;
1305 ie->min_distance = distance;
1306 ie->size = size;
1307 ie->flags = flags;
1308
1309 return index;
1310 }
1311
1312 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1313 int size, int distance, int flags)
1314 {
1315 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1316 &st->index_entries_allocated_size, pos,
1317 timestamp, size, distance, flags);
1318 }
1319
1320 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1321 int64_t wanted_timestamp, int flags)
1322 {
1323 int a, b, m;
1324 int64_t timestamp;
1325
1326 a = -1;
1327 b = nb_entries;
1328
1329 // Optimize appending index entries at the end.
1330 if (b && entries[b - 1].timestamp < wanted_timestamp)
1331 a = b - 1;
1332
1333 while (b - a > 1) {
1334 m = (a + b) >> 1;
1335 timestamp = entries[m].timestamp;
1336 if (timestamp >= wanted_timestamp)
1337 b = m;
1338 if (timestamp <= wanted_timestamp)
1339 a = m;
1340 }
1341 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1342
1343 if (!(flags & AVSEEK_FLAG_ANY))
1344 while (m >= 0 && m < nb_entries &&
1345 !(entries[m].flags & AVINDEX_KEYFRAME))
1346 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1347
1348 if (m == nb_entries)
1349 return -1;
1350 return m;
1351 }
1352
1353 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1354 {
1355 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1356 wanted_timestamp, flags);
1357 }
1358
1359 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1360 int64_t target_ts, int flags)
1361 {
1362 AVInputFormat *avif = s->iformat;
1363 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1364 int64_t ts_min, ts_max, ts;
1365 int index;
1366 int64_t ret;
1367 AVStream *st;
1368
1369 if (stream_index < 0)
1370 return -1;
1371
1372 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1373
1374 ts_max =
1375 ts_min = AV_NOPTS_VALUE;
1376 pos_limit = -1; // GCC falsely says it may be uninitialized.
1377
1378 st = s->streams[stream_index];
1379 if (st->index_entries) {
1380 AVIndexEntry *e;
1381
1382 /* FIXME: Whole function must be checked for non-keyframe entries in
1383 * index case, especially read_timestamp(). */
1384 index = av_index_search_timestamp(st, target_ts,
1385 flags | AVSEEK_FLAG_BACKWARD);
1386 index = FFMAX(index, 0);
1387 e = &st->index_entries[index];
1388
1389 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1390 pos_min = e->pos;
1391 ts_min = e->timestamp;
1392 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1393 pos_min, ts_min);
1394 } else {
1395 assert(index == 0);
1396 }
1397
1398 index = av_index_search_timestamp(st, target_ts,
1399 flags & ~AVSEEK_FLAG_BACKWARD);
1400 assert(index < st->nb_index_entries);
1401 if (index >= 0) {
1402 e = &st->index_entries[index];
1403 assert(e->timestamp >= target_ts);
1404 pos_max = e->pos;
1405 ts_max = e->timestamp;
1406 pos_limit = pos_max - e->min_distance;
1407 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1408 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1409 }
1410 }
1411
1412 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1413 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1414 if (pos < 0)
1415 return -1;
1416
1417 /* do the seek */
1418 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1419 return ret;
1420
1421 ff_update_cur_dts(s, st, ts);
1422
1423 return 0;
1424 }
1425
1426 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1427 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1428 int64_t ts_min, int64_t ts_max,
1429 int flags, int64_t *ts_ret,
1430 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1431 int64_t *, int64_t))
1432 {
1433 int64_t pos, ts;
1434 int64_t start_pos, filesize;
1435 int no_change;
1436
1437 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1438
1439 if (ts_min == AV_NOPTS_VALUE) {
1440 pos_min = s->data_offset;
1441 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1442 if (ts_min == AV_NOPTS_VALUE)
1443 return -1;
1444 }
1445
1446 if (ts_max == AV_NOPTS_VALUE) {
1447 int step = 1024;
1448 filesize = avio_size(s->pb);
1449 pos_max = filesize - 1;
1450 do {
1451 pos_max -= step;
1452 ts_max = read_timestamp(s, stream_index, &pos_max,
1453 pos_max + step);
1454 step += step;
1455 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1456 if (ts_max == AV_NOPTS_VALUE)
1457 return -1;
1458
1459 for (;;) {
1460 int64_t tmp_pos = pos_max + 1;
1461 int64_t tmp_ts = read_timestamp(s, stream_index,
1462 &tmp_pos, INT64_MAX);
1463 if (tmp_ts == AV_NOPTS_VALUE)
1464 break;
1465 ts_max = tmp_ts;
1466 pos_max = tmp_pos;
1467 if (tmp_pos >= filesize)
1468 break;
1469 }
1470 pos_limit = pos_max;
1471 }
1472
1473 if (ts_min > ts_max)
1474 return -1;
1475 else if (ts_min == ts_max)
1476 pos_limit = pos_min;
1477
1478 no_change = 0;
1479 while (pos_min < pos_limit) {
1480 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1481 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1482 assert(pos_limit <= pos_max);
1483
1484 if (no_change == 0) {
1485 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1486 // interpolate position (better than dichotomy)
1487 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1488 ts_max - ts_min) +
1489 pos_min - approximate_keyframe_distance;
1490 } else if (no_change == 1) {
1491 // bisection if interpolation did not change min / max pos last time
1492 pos = (pos_min + pos_limit) >> 1;
1493 } else {
1494 /* linear search if bisection failed, can only happen if there
1495 * are very few or no keyframes between min/max */
1496 pos = pos_min;
1497 }
1498 if (pos <= pos_min)
1499 pos = pos_min + 1;
1500 else if (pos > pos_limit)
1501 pos = pos_limit;
1502 start_pos = pos;
1503
1504 // May pass pos_limit instead of -1.
1505 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1506 if (pos == pos_max)
1507 no_change++;
1508 else
1509 no_change = 0;
1510 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1511 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1512 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1513 pos_limit, start_pos, no_change);
1514 if (ts == AV_NOPTS_VALUE) {
1515 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1516 return -1;
1517 }
1518 assert(ts != AV_NOPTS_VALUE);
1519 if (target_ts <= ts) {
1520 pos_limit = start_pos - 1;
1521 pos_max = pos;
1522 ts_max = ts;
1523 }
1524 if (target_ts >= ts) {
1525 pos_min = pos;
1526 ts_min = ts;
1527 }
1528 }
1529
1530 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1531 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1532 pos_min = pos;
1533 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1534 pos_min++;
1535 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1536 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1537 pos, ts_min, target_ts, ts_max);
1538 *ts_ret = ts;
1539 return pos;
1540 }
1541
1542 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1543 int64_t pos, int flags)
1544 {
1545 int64_t pos_min, pos_max;
1546
1547 pos_min = s->data_offset;
1548 pos_max = avio_size(s->pb) - 1;
1549
1550 if (pos < pos_min)
1551 pos = pos_min;
1552 else if (pos > pos_max)
1553 pos = pos_max;
1554
1555 avio_seek(s->pb, pos, SEEK_SET);
1556
1557 return 0;
1558 }
1559
1560 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1561 int64_t timestamp, int flags)
1562 {
1563 int index;
1564 int64_t ret;
1565 AVStream *st;
1566 AVIndexEntry *ie;
1567
1568 st = s->streams[stream_index];
1569
1570 index = av_index_search_timestamp(st, timestamp, flags);
1571
1572 if (index < 0 && st->nb_index_entries &&
1573 timestamp < st->index_entries[0].timestamp)
1574 return -1;
1575
1576 if (index < 0 || index == st->nb_index_entries - 1) {
1577 AVPacket pkt;
1578
1579 if (st->nb_index_entries) {
1580 assert(st->index_entries);
1581 ie = &st->index_entries[st->nb_index_entries - 1];
1582 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1583 return ret;
1584 ff_update_cur_dts(s, st, ie->timestamp);
1585 } else {
1586 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1587 return ret;
1588 }
1589 for (;;) {
1590 int read_status;
1591 do {
1592 read_status = av_read_frame(s, &pkt);
1593 } while (read_status == AVERROR(EAGAIN));
1594 if (read_status < 0)
1595 break;
1596 av_free_packet(&pkt);
1597 if (stream_index == pkt.stream_index)
1598 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1599 break;
1600 }
1601 index = av_index_search_timestamp(st, timestamp, flags);
1602 }
1603 if (index < 0)
1604 return -1;
1605
1606 ff_read_frame_flush(s);
1607 if (s->iformat->read_seek)
1608 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1609 return 0;
1610 ie = &st->index_entries[index];
1611 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1612 return ret;
1613 ff_update_cur_dts(s, st, ie->timestamp);
1614
1615 return 0;
1616 }
1617
1618 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1619 int64_t timestamp, int flags)
1620 {
1621 int ret;
1622 AVStream *st;
1623
1624 if (flags & AVSEEK_FLAG_BYTE) {
1625 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1626 return -1;
1627 ff_read_frame_flush(s);
1628 return seek_frame_byte(s, stream_index, timestamp, flags);
1629 }
1630
1631 if (stream_index < 0) {
1632 stream_index = av_find_default_stream_index(s);
1633 if (stream_index < 0)
1634 return -1;
1635
1636 st = s->streams[stream_index];
1637 /* timestamp for default must be expressed in AV_TIME_BASE units */
1638 timestamp = av_rescale(timestamp, st->time_base.den,
1639 AV_TIME_BASE * (int64_t) st->time_base.num);
1640 }
1641
1642 /* first, we try the format specific seek */
1643 if (s->iformat->read_seek) {
1644 ff_read_frame_flush(s);
1645 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1646 } else
1647 ret = -1;
1648 if (ret >= 0)
1649 return 0;
1650
1651 if (s->iformat->read_timestamp &&
1652 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1653 ff_read_frame_flush(s);
1654 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1655 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1656 ff_read_frame_flush(s);
1657 return seek_frame_generic(s, stream_index, timestamp, flags);
1658 } else
1659 return -1;
1660 }
1661
1662 int av_seek_frame(AVFormatContext *s, int stream_index,
1663 int64_t timestamp, int flags)
1664 {
1665 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1666
1667 if (ret >= 0)
1668 ret = queue_attached_pictures(s);
1669
1670 return ret;
1671 }
1672
1673 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1674 int64_t ts, int64_t max_ts, int flags)
1675 {
1676 if (min_ts > ts || max_ts < ts)
1677 return -1;
1678
1679 if (s->iformat->read_seek2) {
1680 int ret;
1681 ff_read_frame_flush(s);
1682 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1683 ts, max_ts, flags);
1684
1685 if (ret >= 0)
1686 ret = queue_attached_pictures(s);
1687 return ret;
1688 }
1689
1690 if (s->iformat->read_timestamp) {
1691 // try to seek via read_timestamp()
1692 }
1693
1694 // Fall back on old API if new is not implemented but old is.
1695 // Note the old API has somewhat different semantics.
1696 if (s->iformat->read_seek || 1)
1697 return av_seek_frame(s, stream_index, ts,
1698 flags | ((uint64_t) ts - min_ts >
1699 (uint64_t) max_ts - ts
1700 ? AVSEEK_FLAG_BACKWARD : 0));
1701
1702 // try some generic seek like seek_frame_generic() but with new ts semantics
1703 }
1704
1705 /*******************************************************/
1706
1707 /**
1708 * Return TRUE if the stream has accurate duration in any stream.
1709 *
1710 * @return TRUE if the stream has accurate duration for at least one component.
1711 */
1712 static int has_duration(AVFormatContext *ic)
1713 {
1714 int i;
1715 AVStream *st;
1716
1717 for (i = 0; i < ic->nb_streams; i++) {
1718 st = ic->streams[i];
1719 if (st->duration != AV_NOPTS_VALUE)
1720 return 1;
1721 }
1722 if (ic->duration != AV_NOPTS_VALUE)
1723 return 1;
1724 return 0;
1725 }
1726
1727 /**
1728 * Estimate the stream timings from the one of each components.
1729 *
1730 * Also computes the global bitrate if possible.
1731 */
1732 static void update_stream_timings(AVFormatContext *ic)
1733 {
1734 int64_t start_time, start_time1, end_time, end_time1;
1735 int64_t duration, duration1, filesize;
1736 int i;
1737 AVStream *st;
1738
1739 start_time = INT64_MAX;
1740 end_time = INT64_MIN;
1741 duration = INT64_MIN;
1742 for (i = 0; i < ic->nb_streams; i++) {
1743 st = ic->streams[i];
1744 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1745 start_time1 = av_rescale_q(st->start_time, st->time_base,
1746 AV_TIME_BASE_Q);
1747 start_time = FFMIN(start_time, start_time1);
1748 if (st->duration != AV_NOPTS_VALUE) {
1749 end_time1 = start_time1 +
1750 av_rescale_q(st->duration, st->time_base,
1751 AV_TIME_BASE_Q);
1752 end_time = FFMAX(end_time, end_time1);
1753 }
1754 }
1755 if (st->duration != AV_NOPTS_VALUE) {
1756 duration1 = av_rescale_q(st->duration, st->time_base,
1757 AV_TIME_BASE_Q);
1758 duration = FFMAX(duration, duration1);
1759 }
1760 }
1761 if (start_time != INT64_MAX) {
1762 ic->start_time = start_time;
1763 if (end_time != INT64_MIN)
1764 duration = FFMAX(duration, end_time - start_time);
1765 }
1766 if (duration != INT64_MIN) {
1767 ic->duration = duration;
1768 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1769 /* compute the bitrate */
1770 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1771 (double) ic->duration;
1772 }
1773 }
1774
1775 static void fill_all_stream_timings(AVFormatContext *ic)
1776 {
1777 int i;
1778 AVStream *st;
1779
1780 update_stream_timings(ic);
1781 for (i = 0; i < ic->nb_streams; i++) {
1782 st = ic->streams[i];
1783 if (st->start_time == AV_NOPTS_VALUE) {
1784 if (ic->start_time != AV_NOPTS_VALUE)
1785 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1786 st->time_base);
1787 if (ic->duration != AV_NOPTS_VALUE)
1788 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1789 st->time_base);
1790 }
1791 }
1792 }
1793
1794 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1795 {
1796 int64_t filesize, duration;
1797 int i;
1798 AVStream *st;
1799
1800 /* if bit_rate is already set, we believe it */
1801 if (ic->bit_rate <= 0) {
1802 int bit_rate = 0;
1803 for (i = 0; i < ic->nb_streams; i++) {
1804 st = ic->streams[i];
1805 if (st->codec->bit_rate > 0) {
1806 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1807 bit_rate = 0;
1808 break;
1809 }
1810 bit_rate += st->codec->bit_rate;
1811 }
1812 }
1813 ic->bit_rate = bit_rate;
1814 }
1815
1816 /* if duration is already set, we believe it */
1817 if (ic->duration == AV_NOPTS_VALUE &&
1818 ic->bit_rate != 0) {
1819 filesize = ic->pb ? avio_size(ic->pb) : 0;
1820 if (filesize > 0) {
1821 for (i = 0; i < ic->nb_streams; i++) {
1822 st = ic->streams[i];
1823 duration = av_rescale(8 * filesize, st->time_base.den,
1824 ic->bit_rate *
1825 (int64_t) st->time_base.num);
1826 if (st->duration == AV_NOPTS_VALUE)
1827 st->duration = duration;
1828 }
1829 }
1830 }
1831 }
1832
1833 #define DURATION_MAX_READ_SIZE 250000
1834 #define DURATION_MAX_RETRY 3
1835
1836 /* only usable for MPEG-PS streams */
1837 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1838 {
1839 AVPacket pkt1, *pkt = &pkt1;
1840 AVStream *st;
1841 int read_size, i, ret;
1842 int64_t end_time;
1843 int64_t filesize, offset, duration;
1844 int retry = 0;
1845
1846 /* flush packet queue */
1847 flush_packet_queue(ic);
1848
1849 for (i = 0; i < ic->nb_streams; i++) {
1850 st = ic->streams[i];
1851 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1852 av_log(st->codec, AV_LOG_WARNING,
1853 "start time is not set in estimate_timings_from_pts\n");
1854
1855 if (st->parser) {
1856 av_parser_close(st->parser);
1857 st->parser = NULL;
1858 }
1859 }
1860
1861 /* estimate the end time (duration) */
1862 /* XXX: may need to support wrapping */
1863 filesize = ic->pb ? avio_size(ic->pb) : 0;
1864 end_time = AV_NOPTS_VALUE;
1865 do {
1866 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1867 if (offset < 0)
1868 offset = 0;
1869
1870 avio_seek(ic->pb, offset, SEEK_SET);
1871 read_size = 0;
1872 for (;;) {
1873 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1874 break;
1875
1876 do {
1877 ret = ff_read_packet(ic, pkt);
1878 } while (ret == AVERROR(EAGAIN));
1879 if (ret != 0)
1880 break;
1881 read_size += pkt->size;
1882 st = ic->streams[pkt->stream_index];
1883 if (pkt->pts != AV_NOPTS_VALUE &&
1884 (st->start_time != AV_NOPTS_VALUE ||
1885 st->first_dts != AV_NOPTS_VALUE)) {
1886 duration = end_time = pkt->pts;
1887 if (st->start_time != AV_NOPTS_VALUE)
1888 duration -= st->start_time;
1889 else
1890 duration -= st->first_dts;
1891 if (duration < 0)
1892 duration += 1LL << st->pts_wrap_bits;
1893 if (duration > 0) {
1894 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1895 st->duration = duration;
1896 }
1897 }
1898 av_free_packet(pkt);
1899 }
1900 } while (end_time == AV_NOPTS_VALUE &&
1901 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1902 ++retry <= DURATION_MAX_RETRY);
1903
1904 fill_all_stream_timings(ic);
1905
1906 avio_seek(ic->pb, old_offset, SEEK_SET);
1907 for (i = 0; i < ic->nb_streams; i++) {
1908 st = ic->streams[i];
1909 st->cur_dts = st->first_dts;
1910 st->last_IP_pts = AV_NOPTS_VALUE;
1911 }
1912 }
1913
1914 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1915 {
1916 int64_t file_size;
1917
1918 /* get the file size, if possible */
1919 if (ic->iformat->flags & AVFMT_NOFILE) {
1920 file_size = 0;
1921 } else {
1922 file_size = avio_size(ic->pb);
1923 file_size = FFMAX(0, file_size);
1924 }
1925
1926 if ((!strcmp(ic->iformat->name, "mpeg") ||
1927 !strcmp(ic->iformat->name, "mpegts")) &&
1928 file_size && ic->pb->seekable) {
1929 /* get accurate estimate from the PTSes */
1930 estimate_timings_from_pts(ic, old_offset);
1931 } else if (has_duration(ic)) {
1932 /* at least one component has timings - we use them for all
1933 * the components */
1934 fill_all_stream_timings(ic);
1935 } else {
1936 av_log(ic, AV_LOG_WARNING,
1937 "Estimating duration from bitrate, this may be inaccurate\n");
1938 /* less precise: use bitrate info */
1939 estimate_timings_from_bit_rate(ic);
1940 }
1941 update_stream_timings(ic);
1942
1943 {
1944 int i;
1945 AVStream av_unused *st;
1946 for (i = 0; i < ic->nb_streams; i++) {
1947 st = ic->streams[i];
1948 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
1949 (double) st->start_time / AV_TIME_BASE,
1950 (double) st->duration / AV_TIME_BASE);
1951 }
1952 av_dlog(ic,
1953 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1954 (double) ic->start_time / AV_TIME_BASE,
1955 (double) ic->duration / AV_TIME_BASE,
1956 ic->bit_rate / 1000);
1957 }
1958 }
1959
1960 static int has_codec_parameters(AVStream *st)
1961 {
1962 AVCodecContext *avctx = st->codec;
1963 int val;
1964
1965 switch (avctx->codec_type) {
1966 case AVMEDIA_TYPE_AUDIO:
1967 val = avctx->sample_rate && avctx->channels;
1968 if (st->info->found_decoder >= 0 &&
1969 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1970 return 0;
1971 break;
1972 case AVMEDIA_TYPE_VIDEO:
1973 val = avctx->width;
1974 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1975 return 0;
1976 break;
1977 default:
1978 val = 1;
1979 break;
1980 }
1981 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1982 }
1983
1984 static int has_decode_delay_been_guessed(AVStream *st)
1985 {
1986 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1987 st->info->nb_decoded_frames >= 6;
1988 }
1989
1990 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1991 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1992 AVDictionary **options)
1993 {
1994 const AVCodec *codec;
1995 int got_picture = 1, ret = 0;
1996 AVFrame *frame = av_frame_alloc();
1997 AVPacket pkt = *avpkt;
1998
1999 if (!frame)
2000 return AVERROR(ENOMEM);
2001
2002 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2003 AVDictionary *thread_opt = NULL;
2004
2005 codec = st->codec->codec ? st->codec->codec
2006 : avcodec_find_decoder(st->codec->codec_id);
2007
2008 if (!codec) {
2009 st->info->found_decoder = -1;
2010 ret = -1;
2011 goto fail;
2012 }
2013
2014 /* Force thread count to 1 since the H.264 decoder will not extract
2015 * SPS and PPS to extradata during multi-threaded decoding. */
2016 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2017 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2018 if (!options)
2019 av_dict_free(&thread_opt);
2020 if (ret < 0) {
2021 st->info->found_decoder = -1;
2022 goto fail;
2023 }
2024 st->info->found_decoder = 1;
2025 } else if (!st->info->found_decoder)
2026 st->info->found_decoder = 1;
2027
2028 if (st->info->found_decoder < 0) {
2029 ret = -1;
2030 goto fail;
2031 }
2032
2033 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2034 ret >= 0 &&
2035 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
2036 (!st->codec_info_nb_frames &&
2037 st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2038 got_picture = 0;
2039 switch (st->codec->codec_type) {
2040 case AVMEDIA_TYPE_VIDEO:
2041 ret = avcodec_decode_video2(st->codec, frame,
2042 &got_picture, &pkt);
2043 break;
2044 case AVMEDIA_TYPE_AUDIO:
2045 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2046 break;
2047 default:
2048 break;
2049 }
2050 if (ret >= 0) {
2051 if (got_picture)
2052 st->info->nb_decoded_frames++;
2053 pkt.data += ret;
2054 pkt.size -= ret;
2055 ret = got_picture;
2056 }
2057 }
2058
2059 fail:
2060 av_frame_free(&frame);
2061 return ret;
2062 }
2063
2064 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2065 {
2066 while (tags->id != AV_CODEC_ID_NONE) {
2067 if (tags->id == id)
2068 return tags->tag;
2069 tags++;
2070 }
2071 return 0;
2072 }
2073
2074 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2075 {
2076 int i;
2077 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2078 if (tag == tags[i].tag)
2079 return tags[i].id;
2080 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
2081 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2082 return tags[i].id;
2083 return AV_CODEC_ID_NONE;
2084 }
2085
2086 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2087 {
2088 if (flt) {
2089 switch (bps) {
2090 case 32:
2091 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2092 case 64:
2093 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2094 default:
2095 return AV_CODEC_ID_NONE;
2096 }
2097 } else {
2098 bps >>= 3;
2099 if (sflags & (1 << (bps - 1))) {
2100 switch (bps) {
2101 case 1:
2102 return AV_CODEC_ID_PCM_S8;
2103 case 2:
2104 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2105 case 3:
2106 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2107 case 4:
2108 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2109 default:
2110 return AV_CODEC_ID_NONE;
2111 }
2112 } else {
2113 switch (bps) {
2114 case 1:
2115 return AV_CODEC_ID_PCM_U8;
2116 case 2:
2117 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2118 case 3:
2119 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2120 case 4:
2121 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2122 default:
2123 return AV_CODEC_ID_NONE;
2124 }
2125 }
2126 }
2127 }
2128
2129 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
2130 {
2131 int i;
2132 for (i = 0; tags && tags[i]; i++) {
2133 int tag = ff_codec_get_tag(tags[i], id);
2134 if (tag)
2135 return tag;
2136 }
2137 return 0;
2138 }
2139
2140 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2141 {
2142 int i;
2143 for (i = 0; tags && tags[i]; i++) {
2144 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2145 if (id != AV_CODEC_ID_NONE)
2146 return id;
2147 }
2148 return AV_CODEC_ID_NONE;
2149 }
2150
2151 static void compute_chapters_end(AVFormatContext *s)
2152 {
2153 unsigned int i, j;
2154 int64_t max_time = s->duration +
2155 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2156
2157 for (i = 0; i < s->nb_chapters; i++)
2158 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2159 AVChapter *ch = s->chapters[i];
2160 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2161 ch->time_base)
2162 : INT64_MAX;
2163
2164 for (j = 0; j < s->nb_chapters; j++) {
2165 AVChapter *ch1 = s->chapters[j];
2166 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2167 ch->time_base);
2168 if (j != i && next_start > ch->start && next_start < end)
2169 end = next_start;
2170 }
2171 ch->end = (end == INT64_MAX) ? ch->start : end;
2172 }
2173 }
2174
2175 static int get_std_framerate(int i)
2176 {
2177 if (i < 60 * 12)
2178 return (i + 1) * 1001;
2179 else
2180 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2181 }
2182
2183 /* Is the time base unreliable?
2184 * This is a heuristic to balance between quick acceptance of the values in
2185 * the headers vs. some extra checks.
2186 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2187 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2188 * And there are "variable" fps files this needs to detect as well. */
2189 static int tb_unreliable(AVCodecContext *c)
2190 {
2191 if (c->time_base.den >= 101L * c->time_base.num ||
2192 c->time_base.den < 5L * c->time_base.num ||
2193 // c->codec_tag == AV_RL32("DIVX") ||
2194 // c->codec_tag == AV_RL32("XVID") ||
2195 c->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
2196 c->codec_id == AV_CODEC_ID_H264)
2197 return 1;
2198 return 0;
2199 }
2200
2201 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2202 {
2203 int i, count, ret, read_size, j;
2204 AVStream *st;
2205 AVPacket pkt1, *pkt;
2206 int64_t old_offset = avio_tell(ic->pb);
2207 // new streams might appear, no options for those
2208 int orig_nb_streams = ic->nb_streams;
2209
2210 for (i = 0; i < ic->nb_streams; i++) {
2211 const AVCodec *codec;
2212 AVDictionary *thread_opt = NULL;
2213 st = ic->streams[i];
2214
2215 // only for the split stuff
2216 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2217 st->parser = av_parser_init(st->codec->codec_id);
2218 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2219 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2220 }
2221 codec = st->codec->codec ? st->codec->codec
2222 : avcodec_find_decoder(st->codec->codec_id);
2223
2224 /* Force thread count to 1 since the H.264 decoder will not extract
2225 * SPS and PPS to extradata during multi-threaded decoding. */
2226 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2227
2228 /* Ensure that subtitle_header is properly set. */
2229 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2230 && codec && !st->codec->codec)
2231 avcodec_open2(st->codec, codec,
2232 options ? &options[i] : &thread_opt);
2233
2234 // Try to just open decoders, in case this is enough to get parameters.
2235 if (!has_codec_parameters(st)) {
2236 if (codec && !st->codec->codec)
2237 avcodec_open2(st->codec, codec,
2238 options ? &options[i] : &thread_opt);
2239 }
2240 if (!options)
2241 av_dict_free(&thread_opt);
2242 }
2243
2244 for (i = 0; i < ic->nb_streams; i++) {
2245 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2246 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2247 }
2248
2249 count = 0;
2250 read_size = 0;
2251 for (;;) {
2252 if (ff_check_interrupt(&ic->interrupt_callback)) {
2253 ret = AVERROR_EXIT;
2254 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2255 break;
2256 }
2257
2258 /* check if one codec still needs to be handled */
2259 for (i = 0; i < ic->nb_streams; i++) {
2260 int fps_analyze_framecount = 20;
2261
2262 st = ic->streams[i];
2263 if (!has_codec_parameters(st))
2264 break;
2265 /* If the timebase is coarse (like the usual millisecond precision
2266 * of mkv), we need to analyze more frames to reliably arrive at
2267 * the correct fps. */
2268 if (av_q2d(st->time_base) > 0.0005)
2269 fps_analyze_framecount *= 2;
2270 if (ic->fps_probe_size >= 0)
2271 fps_analyze_framecount = ic->fps_probe_size;
2272 /* variable fps and no guess at the real fps */
2273 if (tb_unreliable(st->codec) && !st->avg_frame_rate.num &&
2274 st->codec_info_nb_frames < fps_analyze_framecount &&
2275 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2276 break;
2277 if (st->parser && st->parser->parser->split &&
2278 !st->codec->extradata)
2279 break;
2280 if (st->first_dts == AV_NOPTS_VALUE &&
2281 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2282 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2283 break;
2284 }
2285 if (i == ic->nb_streams) {
2286 /* NOTE: If the format has no header, then we need to read some
2287 * packets to get most of the streams, so we cannot stop here. */
2288 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2289 /* If we found the info for all the codecs, we can stop. */
2290 ret = count;
2291 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2292 break;
2293 }
2294 }
2295 /* We did not get all the codec info, but we read too much data. */
2296 if (read_size >= ic->probesize) {
2297 ret = count;
2298 av_log(ic, AV_LOG_DEBUG,
2299 "Probe buffer size limit %d reached\n", ic->probesize);
2300 break;
2301 }
2302
2303 /* NOTE: A new stream can be added there if no header in file
2304 * (AVFMTCTX_NOHEADER). */
2305 ret = read_frame_internal(ic, &pkt1);
2306 if (ret == AVERROR(EAGAIN))
2307 continue;
2308
2309 if (ret < 0) {
2310 /* EOF or error*/
2311 AVPacket empty_pkt = { 0 };
2312 int err = 0;
2313 av_init_packet(&empty_pkt);
2314
2315 /* We could not have all the codec parameters before EOF. */
2316 ret = -1;
2317 for (i = 0; i < ic->nb_streams; i++) {
2318 st = ic->streams[i];
2319
2320 /* flush the decoders */
2321 if (st->info->found_decoder == 1) {
2322 do {
2323 err = try_decode_frame(st, &empty_pkt,
2324 (options && i < orig_nb_streams)
2325 ? &options[i] : NULL);
2326 } while (err > 0 && !has_codec_parameters(st));
2327 }
2328
2329 if (err < 0) {
2330 av_log(ic, AV_LOG_WARNING,
2331 "decoding for stream %d failed\n", st->index);
2332 } else if (!has_codec_parameters(st)) {
2333 char buf[256];
2334 avcodec_string(buf, sizeof(buf), st->codec, 0);
2335 av_log(ic, AV_LOG_WARNING,
2336 "Could not find codec parameters (%s)\n", buf);
2337 } else {
2338 ret = 0;
2339 }
2340 }
2341 break;
2342 }
2343
2344 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2345 pkt = &pkt1;
2346 } else {
2347 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2348 &ic->packet_buffer_end);
2349 if ((ret = av_dup_packet(pkt)) < 0)
2350 goto find_stream_info_err;
2351 }
2352
2353 read_size += pkt->size;
2354
2355 st = ic->streams[pkt->stream_index];
2356 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2357 /* check for non-increasing dts */
2358 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2359 st->info->fps_last_dts >= pkt->dts) {
2360 av_log(ic, AV_LOG_WARNING,
2361 "Non-increasing DTS in stream %d: packet %d with DTS "
2362 "%"PRId64", packet %d with DTS %"PRId64"\n",
2363 st->index, st->info->fps_last_dts_idx,
2364 st->info->fps_last_dts, st->codec_info_nb_frames,
2365 pkt->dts);
2366 st->info->fps_first_dts =
2367 st->info->fps_last_dts = AV_NOPTS_VALUE;
2368 }
2369 /* Check for a discontinuity in dts. If the difference in dts
2370 * is more than 1000 times the average packet duration in the
2371 * sequence, we treat it as a discontinuity. */
2372 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2373 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2374 (pkt->dts - st->info->fps_last_dts) / 1000 >
2375 (st->info->fps_last_dts - st->info->fps_first_dts) /
2376 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2377 av_log(ic, AV_LOG_WARNING,
2378 "DTS discontinuity in stream %d: packet %d with DTS "
2379 "%"PRId64", packet %d with DTS %"PRId64"\n",
2380 st->index, st->info->fps_last_dts_idx,
2381 st->info->fps_last_dts, st->codec_info_nb_frames,
2382 pkt->dts);
2383 st->info->fps_first_dts =
2384 st->info->fps_last_dts = AV_NOPTS_VALUE;
2385 }
2386
2387 /* update stored dts values */
2388 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2389 st->info->fps_first_dts = pkt->dts;
2390 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2391 }
2392 st->info->fps_last_dts = pkt->dts;
2393 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2394
2395 /* check max_analyze_duration */
2396 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2397 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2398 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2399 ic->max_analyze_duration);
2400 break;
2401 }
2402 }
2403 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2404 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2405 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2406 st->codec->extradata_size = i;
2407 st->codec->extradata = av_malloc(st->codec->extradata_size +
2408 FF_INPUT_BUFFER_PADDING_SIZE);
2409 if (!st->codec->extradata)
2410 return AVERROR(ENOMEM);
2411 memcpy(st->codec->extradata, pkt->data,
2412 st->codec->extradata_size);
2413 memset(st->codec->extradata + i, 0,
2414 FF_INPUT_BUFFER_PADDING_SIZE);
2415 }
2416 }
2417
2418 /* If still no information, we try to open the codec and to
2419 * decompress the frame. We try to avoid that in most cases as
2420 * it takes longer and uses more memory. For MPEG-4, we need to
2421 * decompress for QuickTime.
2422 *
2423 * If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2424 * least one frame of codec data, this makes sure the codec initializes
2425 * the channel configuration and does not only trust the values from
2426 * the container. */
2427 try_decode_frame(st, pkt,
2428 (options && i < orig_nb_streams) ? &options[i] : NULL);
2429
2430 st->codec_info_nb_frames++;
2431 count++;
2432 }
2433
2434 // close codecs which were opened in try_decode_frame()
2435 for (i = 0; i < ic->nb_streams; i++) {
2436 st = ic->streams[i];
2437 avcodec_close(st->codec);
2438 }
2439 for (i = 0; i < ic->nb_streams; i++) {
2440 st = ic->streams[i];
2441 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2442 /* estimate average framerate if not set by demuxer */
2443 if (!st->avg_frame_rate.num &&
2444 st->info->fps_last_dts != st->info->fps_first_dts) {
2445 int64_t delta_dts = st->info->fps_last_dts -
2446 st->info->fps_first_dts;
2447 int delta_packets = st->info->fps_last_dts_idx -
2448 st->info->fps_first_dts_idx;
2449 int best_fps = 0;
2450 double best_error = 0.01;
2451
2452 if (delta_dts >= INT64_MAX / st->time_base.num ||
2453 delta_packets >= INT64_MAX / st->time_base.den ||
2454 delta_dts < 0)
2455 continue;
2456 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2457 delta_packets * (int64_t) st->time_base.den,
2458 delta_dts * (int64_t) st->time_base.num, 60000);
2459
2460 /* Round guessed framerate to a "standard" framerate if it's
2461 * within 1% of the original estimate. */
2462 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2463 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2464 double error = fabs(av_q2d(st->avg_frame_rate) /
2465 av_q2d(std_fps) - 1);
2466
2467 if (error < best_error) {
2468 best_error = error;
2469 best_fps = std_fps.num;
2470 }
2471 }
2472 if (best_fps)
2473 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2474 best_fps, 12 * 1001, INT_MAX);
2475 }
2476 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2477 if (!st->codec->bits_per_coded_sample)
2478 st->codec->bits_per_coded_sample =
2479 av_get_bits_per_sample(st->codec->codec_id);
2480 // set stream disposition based on audio service type
2481 switch (st->codec->audio_service_type) {
2482 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2483 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2484 break;
2485 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2486 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2487 break;
2488 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2489 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2490 break;
2491 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2492 st->disposition = AV_DISPOSITION_COMMENT;
2493 break;
2494 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2495 st->disposition = AV_DISPOSITION_KARAOKE;
2496 break;
2497 }
2498 }
2499 }
2500
2501 estimate_timings(ic, old_offset);
2502
2503 compute_chapters_end(ic);
2504
2505 find_stream_info_err:
2506 for (i = 0; i < ic->nb_streams; i++) {
2507 ic->streams[i]->codec->thread_count = 0;
2508 av_freep(&ic->streams[i]->info);
2509 }
2510 return ret;
2511 }
2512
2513 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2514 {
2515 int i, j;
2516
2517 for (i = 0; i < ic->nb_programs; i++)
2518 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2519 if (ic->programs[i]->stream_index[j] == s)
2520 return ic->programs[i];
2521 return NULL;
2522 }
2523
2524 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2525 int wanted_stream_nb, int related_stream,
2526 AVCodec **decoder_ret, int flags)
2527 {
2528 int i, nb_streams = ic->nb_streams;
2529 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2530 unsigned *program = NULL;
2531 AVCodec *decoder = NULL, *best_decoder = NULL;
2532
2533 if (related_stream >= 0 && wanted_stream_nb < 0) {
2534 AVProgram *p = find_program_from_stream(ic, related_stream);
2535 if (p) {
2536 program = p->stream_index;
2537 nb_streams = p->nb_stream_indexes;
2538 }
2539 }
2540 for (i = 0; i < nb_streams; i++) {
2541 int real_stream_index = program ? program[i] : i;
2542 AVStream *st = ic->streams[real_stream_index];
2543 AVCodecContext *avctx = st->codec;
2544 if (avctx->codec_type != type)
2545 continue;
2546 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2547 continue;
2548 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2549 AV_DISPOSITION_VISUAL_IMPAIRED))
2550 continue;
2551 if (decoder_ret) {
2552 decoder = avcodec_find_decoder(st->codec->codec_id);
2553 if (!decoder) {
2554 if (ret < 0)
2555 ret = AVERROR_DECODER_NOT_FOUND;
2556 continue;
2557 }
2558 }
2559 if (best_count >= st->codec_info_nb_frames)
2560 continue;
2561 best_count = st->codec_info_nb_frames;
2562 ret = real_stream_index;
2563 best_decoder = decoder;
2564 if (program && i == nb_streams - 1 && ret < 0) {
2565 program = NULL;
2566 nb_streams = ic->nb_streams;
2567 /* no related stream found, try again with everything */
2568 i = 0;
2569 }
2570 }
2571 if (decoder_ret)
2572 *decoder_ret = best_decoder;
2573 return ret;
2574 }
2575
2576 /*******************************************************/
2577
2578 int av_read_play(AVFormatContext *s)
2579 {
2580 if (s->iformat->read_play)
2581 return s->iformat->read_play(s);
2582 if (s->pb)
2583 return avio_pause(s->pb, 0);
2584 return AVERROR(ENOSYS);
2585 }
2586
2587 int av_read_pause(AVFormatContext *s)
2588 {
2589 if (s->iformat->read_pause)
2590 return s->iformat->read_pause(s);
2591 if (s->pb)
2592 return avio_pause(s->pb, 1);
2593 return AVERROR(ENOSYS);
2594 }
2595
2596 void avformat_free_context(AVFormatContext *s)
2597 {
2598 int i;
2599 AVStream *st;
2600
2601 av_opt_free(s);
2602 if (s->iformat && s->iformat->priv_class && s->priv_data)
2603 av_opt_free(s->priv_data);
2604
2605 for (i = 0; i < s->nb_streams; i++) {
2606 /* free all data in a stream component */
2607 st = s->streams[i];
2608 if (st->parser) {
2609 av_parser_close(st->parser);
2610 }
2611 if (st->attached_pic.data)
2612 av_free_packet(&st->attached_pic);
2613 av_dict_free(&st->metadata);
2614 av_freep(&st->probe_data.buf);
2615 av_free(st->index_entries);
2616 av_free(st->codec->extradata);
2617 av_free(st->codec->subtitle_header);
2618 av_free(st->codec);
2619 av_free(st->priv_data);
2620 av_free(st->info);
2621 av_free(st);
2622 }
2623 for (i = s->nb_programs - 1; i >= 0; i--) {
2624 av_dict_free(&s->programs[i]->metadata);
2625 av_freep(&s->programs[i]->stream_index);
2626 av_freep(&s->programs[i]);
2627 }
2628 av_freep(&s->programs);
2629 av_freep(&s->priv_data);
2630 while (s->nb_chapters--) {
2631 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2632 av_free(s->chapters[s->nb_chapters]);
2633 }
2634 av_freep(&s->chapters);
2635 av_dict_free(&s->metadata);
2636 av_freep(&s->streams);
2637 av_freep(&s->internal);
2638 av_free(s);
2639 }
2640
2641 void avformat_close_input(AVFormatContext **ps)
2642 {
2643 AVFormatContext *s = *ps;
2644 AVIOContext *pb = s->pb;
2645
2646 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2647 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2648 pb = NULL;
2649
2650 flush_packet_queue(s);
2651
2652 if (s->iformat)
2653 if (s->iformat->read_close)
2654 s->iformat->read_close(s);
2655
2656 avformat_free_context(s);
2657
2658 *ps = NULL;
2659
2660 avio_close(pb);
2661 }
2662
2663 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2664 {
2665 AVStream *st;
2666 int i;
2667
2668 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2669 sizeof(*s->streams)) < 0) {
2670 s->nb_streams = 0;
2671 return NULL;
2672 }
2673
2674 st = av_mallocz(sizeof(AVStream));
2675 if (!st)
2676 return NULL;
2677 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2678 av_free(st);
2679 return NULL;
2680 }
2681
2682 st->codec = avcodec_alloc_context3(c);
2683 if (s->iformat)
2684 /* no default bitrate if decoding */
2685 st->codec->bit_rate = 0;
2686 st->index = s->nb_streams;
2687 st->start_time = AV_NOPTS_VALUE;
2688 st->duration = AV_NOPTS_VALUE;
2689 /* we set the current DTS to 0 so that formats without any timestamps
2690 * but durations get some timestamps, formats with some unknown
2691 * timestamps have their first few packets buffered and the
2692 * timestamps corrected before they are returned to the user */
2693 st->cur_dts = 0;
2694 st->first_dts = AV_NOPTS_VALUE;
2695 st->probe_packets = MAX_PROBE_PACKETS;
2696
2697 /* default pts setting is MPEG-like */
2698 avpriv_set_pts_info(st, 33, 1, 90000);
2699 st->last_IP_pts = AV_NOPTS_VALUE;
2700 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2701 st->pts_buffer[i] = AV_NOPTS_VALUE;
2702
2703 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2704
2705 st->info->fps_first_dts = AV_NOPTS_VALUE;
2706 st->info->fps_last_dts = AV_NOPTS_VALUE;
2707
2708 s->streams[s->nb_streams++] = st;
2709 return st;
2710 }
2711
2712 AVProgram *av_new_program(AVFormatContext *ac, int id)
2713 {
2714 AVProgram *program = NULL;
2715 int i;
2716
2717 av_dlog(ac, "new_program: id=0x%04x\n", id);
2718
2719 for (i = 0; i < ac->nb_programs; i++)
2720 if (ac->programs[i]->id == id)
2721 program = ac->programs[i];
2722
2723 if (!program) {
2724 program = av_mallocz(sizeof(AVProgram));
2725 if (!program)
2726 return NULL;
2727 dynarray_add(&ac->programs, &ac->nb_programs, program);
2728 program->discard = AVDISCARD_NONE;
2729 }
2730 program->id = id;
2731
2732 return program;
2733 }
2734
2735 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2736 int64_t start, int64_t end, const char *title)
2737 {
2738 AVChapter *chapter = NULL;
2739 int i;
2740
2741 for (i = 0; i < s->nb_chapters; i++)
2742 if (s->chapters[i]->id == id)
2743 chapter = s->chapters[i];
2744
2745 if (!chapter) {
2746 chapter = av_mallocz(sizeof(AVChapter));
2747 if (!chapter)
2748 return NULL;
2749 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2750 }
2751 av_dict_set(&chapter->metadata, "title", title, 0);
2752 chapter->id = id;
2753 chapter->time_base = time_base;
2754 chapter->start = start;
2755 chapter->end = end;
2756
2757 return chapter;
2758 }
2759
2760 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2761 {
2762 int i, j;
2763 AVProgram *program = NULL;
2764
2765 if (idx >= ac->nb_streams) {
2766 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2767 return;
2768 }
2769
2770 for (i = 0; i < ac->nb_programs; i++) {
2771 if (ac->programs[i]->id != progid)
2772 continue;
2773 program = ac->programs[i];
2774 for (j = 0; j < program->nb_stream_indexes; j++)
2775 if (program->stream_index[j] == idx)
2776 return;
2777
2778 if (av_reallocp_array(&program->stream_index,
2779 program->nb_stream_indexes + 1,
2780 sizeof(*program->stream_index)) < 0) {
2781 program->nb_stream_indexes = 0;
2782 return;
2783 }
2784 program->stream_index[program->nb_stream_indexes++] = idx;
2785 return;
2786 }
2787 }
2788
2789 static void print_fps(double d, const char *postfix)
2790 {
2791 uint64_t v = lrintf(d * 100);
2792 if (v % 100)
2793 av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2794 else if (v % (100 * 1000))
2795 av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2796 else
2797 av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d / 1000, postfix);
2798 }
2799
2800 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
2801 {
2802 if (m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))) {
2803 AVDictionaryEntry *tag = NULL;
2804
2805 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
2806 while ((tag = av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX)))
2807 if (strcmp("language", tag->key))
2808 av_log(ctx, AV_LOG_INFO,
2809 "%s %-16s: %s\n", indent, tag->key, tag->value);
2810 }
2811 }
2812
2813 /* "user interface" functions */
2814 static void dump_stream_format(AVFormatContext *ic, int i,
2815 int index, int is_output)
2816 {
2817 char buf[256];
2818 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2819 AVStream *st = ic->streams[i];
2820 int g = av_gcd(st->time_base.num, st->time_base.den);
2821 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
2822 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2823 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2824 /* the pid is an important information, so we display it */
2825 /* XXX: add a generic system */
2826 if (flags & AVFMT_SHOW_IDS)
2827 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2828 if (lang)
2829 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2830 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames,
2831 st->time_base.num / g, st->time_base.den / g);
2832 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2833 if (st->sample_aspect_ratio.num && // default
2834 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2835 AVRational display_aspect_ratio;
2836 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2837 st->codec->width * st->sample_aspect_ratio.num,
2838 st->codec->height * st->sample_aspect_ratio.den,
2839 1024 * 1024);
2840 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2841 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2842 display_aspect_ratio.num, display_aspect_ratio.den);
2843 }
2844 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2845 if (st->avg_frame_rate.den && st->avg_frame_rate.num)
2846 print_fps(av_q2d(st->avg_frame_rate), "fps");
2847 if (st->time_base.den && st->time_base.num)
2848 print_fps(1 / av_q2d(st->time_base), "tbn");
2849 if (st->codec->time_base.den && st->codec->time_base.num)
2850 print_fps(1 / av_q2d(st->codec->time_base), "tbc");
2851 }
2852 if (st->disposition & AV_DISPOSITION_DEFAULT)
2853 av_log(NULL, AV_LOG_INFO, " (default)");
2854 if (st->disposition & AV_DISPOSITION_DUB)
2855 av_log(NULL, AV_LOG_INFO, " (dub)");
2856 if (st->disposition & AV_DISPOSITION_ORIGINAL)
2857 av_log(NULL, AV_LOG_INFO, " (original)");
2858 if (st->disposition & AV_DISPOSITION_COMMENT)
2859 av_log(NULL, AV_LOG_INFO, " (comment)");
2860 if (st->disposition & AV_DISPOSITION_LYRICS)
2861 av_log(NULL, AV_LOG_INFO, " (lyrics)");
2862 if (st->disposition & AV_DISPOSITION_KARAOKE)
2863 av_log(NULL, AV_LOG_INFO, " (karaoke)");
2864 if (st->disposition & AV_DISPOSITION_FORCED)
2865 av_log(NULL, AV_LOG_INFO, " (forced)");
2866 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
2867 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
2868 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
2869 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
2870 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
2871 av_log(NULL, AV_LOG_INFO, " (clean effects)");
2872 av_log(NULL, AV_LOG_INFO, "\n");
2873 dump_metadata(NULL, st->metadata, " ");
2874 }
2875
2876 void av_dump_format(AVFormatContext *ic, int index,
2877 const char *url, int is_output)
2878 {
2879 int i;
2880 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
2881 if (ic->nb_streams && !printed)
2882 return;
2883
2884 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2885 is_output ? "Output" : "Input",
2886 index,
2887 is_output ? ic->oformat->name : ic->iformat->name,
2888 is_output ? "to" : "from", url);
2889 dump_metadata(NULL, ic->metadata, " ");
2890 if (!is_output) {
2891 av_log(NULL, AV_LOG_INFO, " Duration: ");
2892 if (ic->duration != AV_NOPTS_VALUE) {
2893 int hours, mins, secs, us;
2894 secs = ic->duration / AV_TIME_BASE;
2895 us = ic->duration % AV_TIME_BASE;
2896 mins = secs / 60;
2897 secs %= 60;
2898 hours = mins / 60;
2899 mins %= 60;
2900 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2901 (100 * us) / AV_TIME_BASE);
2902 } else {
2903 av_log(NULL, AV_LOG_INFO, "N/A");
2904 }
2905 if (ic->start_time != AV_NOPTS_VALUE) {
2906 int secs, us;
2907 av_log(NULL, AV_LOG_INFO, ", start: ");
2908 secs = ic->start_time / AV_TIME_BASE;
2909 us = abs(ic->start_time % AV_TIME_BASE);
2910 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2911 secs, (int) av_rescale(us, 1000000, AV_TIME_BASE));
2912 }
2913 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2914 if (ic->bit_rate)
2915 av_log(NULL, AV_LOG_INFO, "%d kb/s", ic->bit_rate / 1000);
2916 else
2917 av_log(NULL, AV_LOG_INFO, "N/A");
2918 av_log(NULL, AV_LOG_INFO, "\n");
2919 }
2920 for (i = 0; i < ic->nb_chapters; i++) {
2921 AVChapter *ch = ic->chapters[i];
2922 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
2923 av_log(NULL, AV_LOG_INFO,
2924 "start %f, ", ch->start * av_q2d(ch->time_base));
2925 av_log(NULL, AV_LOG_INFO,
2926 "end %f\n", ch->end * av_q2d(ch->time_base));
2927
2928 dump_metadata(NULL, ch->metadata, " ");
2929 }
2930 if (ic->nb_programs) {
2931 int j, k, total = 0;
2932 for (j = 0; j < ic->nb_programs; j++) {
2933 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
2934 "name", NULL, 0);
2935 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2936 name ? name->value : "");
2937 dump_metadata(NULL, ic->programs[j]->metadata, " ");
2938 for (k = 0; k < ic->programs[j]->nb_stream_indexes; k++) {
2939 dump_stream_format(ic, ic->programs[j]->stream_index[k],
2940 index, is_output);
2941 printed[ic->programs[j]->stream_index[k]] = 1;
2942 }
2943 total += ic->programs[j]->nb_stream_indexes;
2944 }
2945 if (total < ic->nb_streams)
2946 av_log(NULL, AV_LOG_INFO, " No Program\n");
2947 }
2948 for (i = 0; i < ic->nb_streams; i++)
2949 if (!printed[i])
2950 dump_stream_format(ic, i, index, is_output);
2951
2952 av_free(printed);
2953 }
2954
2955 uint64_t ff_ntp_time(void)
2956 {
2957 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2958 }
2959
2960 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2961 {
2962 const char *p;
2963 char *q, buf1[20], c;
2964 int nd, len, percentd_found;
2965
2966 q = buf;
2967 p = path;
2968 percentd_found = 0;
2969 for (;;) {
2970 c = *p++;
2971 if (c == '\0')
2972 break;
2973 if (c == '%') {
2974 do {
2975 nd = 0;
2976 while (av_isdigit(*p))
2977 nd = nd * 10 + *p++ - '0';
2978 c = *p++;
2979 } while (av_isdigit(c));
2980
2981 switch (c) {
2982 case '%':
2983 goto addchar;
2984 case 'd':
2985 if (percentd_found)
2986 goto fail;
2987 percentd_found = 1;
2988 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2989 len = strlen(buf1);
2990 if ((q - buf + len) > buf_size - 1)
2991 goto fail;
2992 memcpy(q, buf1, len);
2993 q += len;
2994 break;
2995 default:
2996 goto fail;
2997 }
2998 } else {
2999 addchar:
3000 if ((q - buf) < buf_size - 1)
3001 *q++ = c;
3002 }
3003 }
3004 if (!percentd_found)
3005 goto fail;
3006 *q = '\0';
3007 return 0;
3008 fail:
3009 *q = '\0';
3010 return -1;
3011 }
3012
3013 #define HEXDUMP_PRINT(...) \
3014 do { \
3015 if (!f) \
3016 av_log(avcl, level, __VA_ARGS__); \
3017 else \
3018 fprintf(f, __VA_ARGS__); \
3019 } while (0)
3020
3021 static void hex_dump_internal(void *avcl, FILE *f, int level,
3022 const uint8_t *buf, int size)
3023 {
3024 int len, i, j, c;
3025
3026 for (i = 0; i < size; i += 16) {
3027 len = size - i;
3028 if (len > 16)
3029 len = 16;
3030 HEXDUMP_PRINT("%08x ", i);
3031 for (j = 0; j < 16; j++) {
3032 if (j < len)
3033 HEXDUMP_PRINT(" %02x", buf[i + j]);
3034 else
3035 HEXDUMP_PRINT(" ");
3036 }
3037 HEXDUMP_PRINT(" ");
3038 for (j = 0; j < len; j++) {
3039 c = buf[i + j];
3040 if (c < ' ' || c > '~')
3041 c = '.';
3042 HEXDUMP_PRINT("%c", c);
3043 }
3044 HEXDUMP_PRINT("\n");
3045 }
3046 }
3047
3048 void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3049 {
3050 hex_dump_internal(NULL, f, 0, buf, size);
3051 }
3052
3053 void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3054 {
3055 hex_dump_internal(avcl, NULL, level, buf, size);
3056 }
3057
3058 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt,
3059 int dump_payload, AVRational time_base)
3060 {
3061 HEXDUMP_PRINT("stream #%d:\n", pkt->stream_index);
3062 HEXDUMP_PRINT(" keyframe=%d\n", (pkt->flags & AV_PKT_FLAG_KEY) != 0);
3063 HEXDUMP_PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3064 /* DTS is _always_ valid after av_read_frame() */
3065 HEXDUMP_PRINT(" dts=");
3066 if (pkt->dts == AV_NOPTS_VALUE)
3067 HEXDUMP_PRINT("N/A");
3068 else
3069 HEXDUMP_PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3070 /* PTS may not be known if B-frames are present. */
3071 HEXDUMP_PRINT(" pts=");
3072 if (pkt->pts == AV_NOPTS_VALUE)
3073 HEXDUMP_PRINT("N/A");
3074 else
3075 HEXDUMP_PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3076 HEXDUMP_PRINT("\n");
3077 HEXDUMP_PRINT(" size=%d\n", pkt->size);
3078 if (dump_payload)
3079 av_hex_dump(f, pkt->data, pkt->size);
3080 }
3081
3082 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3083 {
3084 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3085 }
3086
3087 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3088 AVStream *st)
3089 {
3090 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3091 }
3092
3093 void av_url_split(char *proto, int proto_size,
3094 char *authorization, int authorization_size,
3095 char *hostname, int hostname_size,
3096 int *port_ptr, char *path, int path_size, const char *url)
3097 {
3098 const char *p, *ls, *at, *col, *brk;
3099
3100 if (port_ptr)
3101 *port_ptr = -1;
3102 if (proto_size > 0)
3103 proto[0] = 0;
3104 if (authorization_size > 0)
3105 authorization[0] = 0;
3106 if (hostname_size > 0)
3107 hostname[0] = 0;
3108 if (path_size > 0)
3109 path[0] = 0;
3110
3111 /* parse protocol */
3112 if ((p = strchr(url, ':'))) {
3113 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3114 p++; /* skip ':' */
3115 if (*p == '/')
3116 p++;
3117 if (*p == '/')
3118 p++;
3119 } else {
3120 /* no protocol means plain filename */
3121 av_strlcpy(path, url, path_size);
3122 return;
3123 }
3124
3125 /* separate path from hostname */
3126 ls = strchr(p, '/');
3127 if (!ls)
3128 ls = strchr(p, '?');
3129 if (ls)
3130 av_strlcpy(path, ls, path_size);
3131 else
3132 ls = &p[strlen(p)]; // XXX
3133
3134 /* the rest is hostname, use that to parse auth/port */
3135 if (ls != p) {
3136 /* authorization (user[:pass]@hostname) */
3137 if ((at = strchr(p, '@')) && at < ls) {
3138 av_strlcpy(authorization, p,
3139 FFMIN(authorization_size, at + 1 - p));
3140 p = at + 1; /* skip '@' */
3141 }
3142
3143 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3144 /* [host]:port */
3145 av_strlcpy(hostname, p + 1,
3146 FFMIN(hostname_size, brk - p));
3147 if (brk[1] == ':' && port_ptr)
3148 *port_ptr = atoi(brk + 2);
3149 } else if ((col = strchr(p, ':')) && col < ls) {
3150 av_strlcpy(hostname, p,
3151 FFMIN(col + 1 - p, hostname_size));
3152 if (port_ptr)
3153 *port_ptr = atoi(col + 1);
3154 } else
3155 av_strlcpy(hostname, p,
3156 FFMIN(ls + 1 - p, hostname_size));
3157 }
3158 }
3159
3160 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3161 {
3162 int i;
3163 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3164 '4', '5', '6', '7',
3165 '8', '9', 'A', 'B',
3166 'C', 'D', 'E', 'F' };
3167 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3168 '4', '5', '6', '7',
3169 '8', '9', 'a', 'b',
3170 'c', 'd', 'e', 'f' };
3171 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3172
3173 for (i = 0; i < s; i++) {
3174 buff[i * 2] = hex_table[src[i] >> 4];
3175 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3176 }
3177
3178 return buff;
3179 }
3180
3181 int ff_hex_to_data(uint8_t *data, const char *p)
3182 {
3183 int c, len, v;
3184
3185 len = 0;
3186 v = 1;
3187 for (;;) {
3188 p += strspn(p, SPACE_CHARS);
3189 if (*p == '\0')
3190 break;
3191 c = av_toupper((unsigned char) *p++);
3192 if (c >= '0' && c <= '9')
3193 c = c - '0';
3194 else if (c >= 'A' && c <= 'F')
3195 c = c - 'A' + 10;
3196 else
3197 break;
3198 v = (v << 4) | c;
3199 if (v & 0x100) {
3200 if (data)
3201 data[len] = v;
3202 len++;
3203 v = 1;
3204 }
3205 }
3206 return len;
3207 }
3208
3209 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3210 unsigned int pts_num, unsigned int pts_den)
3211 {
3212 AVRational new_tb;
3213 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
3214 if (new_tb.num != pts_num)
3215 av_log(NULL, AV_LOG_DEBUG,
3216 "st:%d removing common factor %d from timebase\n",
3217 s->index, pts_num / new_tb.num);
3218 } else
3219 av_log(NULL, AV_LOG_WARNING,
3220 "st:%d has too large timebase, reducing\n", s->index);
3221
3222 if (new_tb.num <= 0 || new_tb.den <= 0) {
3223 av_log(NULL, AV_LOG_ERROR,
3224 "Ignoring attempt to set invalid timebase for st:%d\n",
3225 s->index);
3226 return;
3227 }
3228 s->time_base = new_tb;
3229 s->pts_wrap_bits = pts_wrap_bits;
3230 }
3231
3232 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3233 void *context)
3234 {
3235 const char *ptr = str;
3236
3237 /* Parse key=value pairs. */
3238 for (;;) {
3239 const char *key;
3240 char *dest = NULL, *dest_end;
3241 int key_len, dest_len = 0;
3242
3243 /* Skip whitespace and potential commas. */
3244 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3245 ptr++;
3246 if (!*ptr)
3247 break;
3248
3249 key = ptr;
3250
3251 if (!(ptr = strchr(key, '=')))
3252 break;
3253 ptr++;
3254 key_len = ptr - key;
3255
3256 callback_get_buf(context, key, key_len, &dest, &dest_len);
3257 dest_end = dest + dest_len - 1;
3258
3259 if (*ptr == '\"') {
3260 ptr++;
3261 while (*ptr && *ptr != '\"') {
3262 if (*ptr == '\\') {
3263 if (!ptr[1])
3264 break;
3265 if (dest && dest < dest_end)
3266 *dest++ = ptr[1];
3267 ptr += 2;
3268 } else {
3269 if (dest && dest < dest_end)
3270 *dest++ = *ptr;
3271 ptr++;
3272 }
3273 }
3274 if (*ptr == '\"')
3275 ptr++;
3276 } else {
3277 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3278 if (dest && dest < dest_end)
3279 *dest++ = *ptr;
3280 }
3281 if (dest)
3282 *dest = 0;
3283 }
3284 }
3285
3286 int ff_find_stream_index(AVFormatContext *s, int id)
3287 {
3288 int i;
3289 for (i = 0; i < s->nb_streams; i++)
3290 if (s->streams[i]->id == id)
3291 return i;
3292 return -1;
3293 }
3294
3295 int64_t ff_iso8601_to_unix_time(const char *datestr)
3296 {
3297 #if HAVE_STRPTIME
3298 struct tm time1 = { 0 }, time2 = { 0 };
3299 char *ret1, *ret2;
3300 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
3301 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
3302 if (ret2 && !ret1)
3303 return av_timegm(&time2);
3304 else
3305 return av_timegm(&time1);
3306 #else
3307 av_log(NULL, AV_LOG_WARNING,
3308 "strptime() unavailable on this system, cannot convert "
3309 "the date string.\n");
3310 return 0;
3311 #endif
3312 }
3313
3314 int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id,
3315 int std_compliance)
3316 {
3317 if (ofmt) {
3318 if (ofmt->query_codec)
3319 return ofmt->query_codec(codec_id, std_compliance);
3320 else if (ofmt->codec_tag)
3321 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3322 else if (codec_id == ofmt->video_codec ||
3323 codec_id == ofmt->audio_codec ||
3324 codec_id == ofmt->subtitle_codec)
3325 return 1;
3326 }
3327 return AVERROR_PATCHWELCOME;
3328 }
3329
3330 int avformat_network_init(void)
3331 {
3332 #if CONFIG_NETWORK
3333 int ret;
3334 ff_network_inited_globally = 1;
3335 if ((ret = ff_network_init()) < 0)
3336 return ret;
3337 ff_tls_init();
3338 #endif
3339 return 0;
3340 }
3341
3342 int avformat_network_deinit(void)
3343 {
3344 #if CONFIG_NETWORK
3345 ff_network_close();
3346 ff_tls_deinit();
3347 #endif
3348 return 0;
3349 }
3350
3351 int ff_add_param_change(AVPacket *pkt, int32_t channels,
3352 uint64_t channel_layout, int32_t sample_rate,
3353 int32_t width, int32_t height)
3354 {
3355 uint32_t flags = 0;
3356 int size = 4;
3357 uint8_t *data;
3358 if (!pkt)
3359 return AVERROR(EINVAL);
3360 if (channels) {
3361 size += 4;
3362 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
3363 }
3364 if (channel_layout) {
3365 size += 8;
3366 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
3367 }
3368 if (sample_rate) {
3369 size += 4;
3370 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
3371 }
3372 if (width || height) {
3373 size += 8;
3374 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
3375 }
3376 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
3377 if (!data)
3378 return AVERROR(ENOMEM);
3379 bytestream_put_le32(&data, flags);
3380 if (channels)
3381 bytestream_put_le32(&data, channels);
3382 if (channel_layout)
3383 bytestream_put_le64(&data, channel_layout);
3384 if (sample_rate)
3385 bytestream_put_le32(&data, sample_rate);
3386 if (width || height) {
3387 bytestream_put_le32(&data, width);
3388 bytestream_put_le32(&data, height);
3389 }
3390 return 0;
3391 }
3392
3393 int ff_generate_avci_extradata(AVStream *st)
3394 {
3395 static const uint8_t avci100_1080p_extradata[] = {
3396 // SPS
3397 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3398 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3399 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3400 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3401 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3402 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3403 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3404 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3405 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3406 // PPS
3407 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3408 0xd0
3409 };
3410 static const uint8_t avci100_1080i_extradata[] = {
3411 // SPS
3412 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3413 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3414 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3415 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3416 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3417 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3418 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3419 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3420 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3421 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3422 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3423 // PPS
3424 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3425 0xd0
3426 };
3427 static const uint8_t avci50_1080i_extradata[] = {
3428 // SPS
3429 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3430 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3431 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3432 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3433 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3434 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3435 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3436 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3437 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3438 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3439 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3440 // PPS
3441 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3442 0x11
3443 };
3444 static const uint8_t avci100_720p_extradata[] = {
3445 // SPS
3446 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3447 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3448 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3449 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3450 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3451 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3452 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3453 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3454 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3455 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3456 // PPS
3457 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3458 0x11
3459 };
3460
3461 const uint8_t *data = NULL;
3462 int size = 0;
3463
3464 if (st->codec->width == 1920) {
3465 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3466 data = avci100_1080p_extradata;
3467 size = sizeof(avci100_1080p_extradata);
3468 } else {
3469 data = avci100_1080i_extradata;
3470 size = sizeof(avci100_1080i_extradata);
3471 }
3472 } else if (st->codec->width == 1440) {
3473 data = avci50_1080i_extradata;
3474 size = sizeof(avci50_1080i_extradata);
3475 } else if (st->codec->width == 1280) {
3476 data = avci100_720p_extradata;
3477 size = sizeof(avci100_720p_extradata);
3478 }
3479
3480 if (!size)
3481 return 0;
3482
3483 av_freep(&st->codec->extradata);
3484 st->codec->extradata_size = 0;
3485 st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
3486 if (!st->codec->extradata)
3487 return AVERROR(ENOMEM);
3488
3489 memcpy(st->codec->extradata, data, size);
3490 st->codec->extradata_size = size;
3491
3492 return 0;
3493 }