42f60d53ef6d9e2dc67af65fcf1dea8dcd90909e
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #undef NDEBUG
23 #include <assert.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26
27 #include "config.h"
28
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
38
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
41
42 #include "audiointerleave.h"
43 #include "avformat.h"
44 #include "id3v2.h"
45 #include "internal.h"
46 #include "metadata.h"
47 #if CONFIG_NETWORK
48 #include "network.h"
49 #endif
50 #include "riff.h"
51 #include "url.h"
52
53 /**
54 * @file
55 * various utility functions for use within Libav
56 */
57
58 unsigned avformat_version(void)
59 {
60 return LIBAVFORMAT_VERSION_INT;
61 }
62
63 const char *avformat_configuration(void)
64 {
65 return LIBAV_CONFIGURATION;
66 }
67
68 const char *avformat_license(void)
69 {
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
72 }
73
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
76
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
80 {
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
84 int ret = 0;
85
86 do {
87 int prev_size = pkt->size;
88 int read_size;
89
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
95 }
96 read_size = FFMIN(size, chunk_size);
97
98 ret = av_grow_packet(pkt, read_size);
99 if (ret < 0)
100 break;
101
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
105 break;
106 }
107
108 size -= read_size;
109 } while (size > 0);
110
111 pkt->pos = orig_pos;
112 if (!pkt->size)
113 av_packet_unref(pkt);
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
115 }
116
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
118 {
119 av_init_packet(pkt);
120 pkt->data = NULL;
121 pkt->size = 0;
122 pkt->pos = avio_tell(s);
123
124 return append_packet_chunked(s, pkt, size);
125 }
126
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
128 {
129 if (!pkt->size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
132 }
133
134 int av_filename_number_test(const char *filename)
135 {
136 char buf[1024];
137 return filename &&
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
139 }
140
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
143 {
144 static const struct {
145 const char *name;
146 enum AVCodecID id;
147 enum AVMediaType type;
148 } fmt_id_type[] = {
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
158 { 0 }
159 };
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
161
162 if (fmt) {
163 int i;
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
167 fmt->name, score);
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
172 break;
173 }
174 }
175 }
176 return !!fmt;
177 }
178
179 /************************************************************/
180 /* input media file */
181
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
185 {
186 int ret;
187 AVProbeData pd = { filename, NULL, 0 };
188
189 if (s->pb) {
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
191 if (!s->iformat)
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
193 s, 0, s->probesize);
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
196 return 0;
197 }
198
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
201 return 0;
202
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
205 return ret;
206 if (s->iformat)
207 return 0;
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
209 s, 0, s->probesize);
210 }
211
212 static int add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl, int ref)
214 {
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
216 int ret;
217
218 if (!pktl)
219 return AVERROR(ENOMEM);
220
221 if (ref) {
222 if ((ret = av_packet_ref(&pktl->pkt, pkt)) < 0) {
223 av_free(pktl);
224 return ret;
225 }
226 } else {
227 pktl->pkt = *pkt;
228 }
229
230 if (*packet_buffer)
231 (*plast_pktl)->next = pktl;
232 else
233 *packet_buffer = pktl;
234
235 /* Add the packet in the buffered packet list. */
236 *plast_pktl = pktl;
237 return 0;
238 }
239
240 static int queue_attached_pictures(AVFormatContext *s)
241 {
242 int i, ret;
243 for (i = 0; i < s->nb_streams; i++)
244 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
245 s->streams[i]->discard < AVDISCARD_ALL) {
246
247 ret = add_to_pktbuf(&s->internal->raw_packet_buffer,
248 &s->streams[i]->attached_pic,
249 &s->internal->raw_packet_buffer_end, 1);
250 if (ret < 0)
251 return ret;
252 }
253 return 0;
254 }
255
256 int avformat_open_input(AVFormatContext **ps, const char *filename,
257 AVInputFormat *fmt, AVDictionary **options)
258 {
259 AVFormatContext *s = *ps;
260 int ret = 0;
261 AVDictionary *tmp = NULL;
262 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
263
264 if (!s && !(s = avformat_alloc_context()))
265 return AVERROR(ENOMEM);
266 if (fmt)
267 s->iformat = fmt;
268
269 if (options)
270 av_dict_copy(&tmp, *options, 0);
271
272 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
273 goto fail;
274
275 if ((ret = init_input(s, filename, &tmp)) < 0)
276 goto fail;
277
278 /* Check filename in case an image number is expected. */
279 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
280 if (!av_filename_number_test(filename)) {
281 ret = AVERROR(EINVAL);
282 goto fail;
283 }
284 }
285
286 s->duration = s->start_time = AV_NOPTS_VALUE;
287 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
288
289 /* Allocate private data. */
290 if (s->iformat->priv_data_size > 0) {
291 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
292 ret = AVERROR(ENOMEM);
293 goto fail;
294 }
295 if (s->iformat->priv_class) {
296 *(const AVClass **) s->priv_data = s->iformat->priv_class;
297 av_opt_set_defaults(s->priv_data);
298 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
299 goto fail;
300 }
301 }
302
303 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
304 if (s->pb)
305 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
306
307 if (s->iformat->read_header)
308 if ((ret = s->iformat->read_header(s)) < 0)
309 goto fail;
310
311 if (id3v2_extra_meta &&
312 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
313 goto fail;
314 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
315
316 if ((ret = queue_attached_pictures(s)) < 0)
317 goto fail;
318
319 if (s->pb && !s->internal->data_offset)
320 s->internal->data_offset = avio_tell(s->pb);
321
322 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
323
324 if (options) {
325 av_dict_free(options);
326 *options = tmp;
327 }
328 *ps = s;
329 return 0;
330
331 fail:
332 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
333 av_dict_free(&tmp);
334 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
335 avio_close(s->pb);
336 avformat_free_context(s);
337 *ps = NULL;
338 return ret;
339 }
340
341 /*******************************************************/
342
343 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
344 {
345 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
346 AVProbeData *pd = &st->probe_data;
347 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
348 --st->probe_packets;
349
350 if (pkt) {
351 int err;
352 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
353 AVPROBE_PADDING_SIZE)) < 0)
354 return err;
355 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
356 pd->buf_size += pkt->size;
357 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
358 } else {
359 st->probe_packets = 0;
360 if (!pd->buf_size) {
361 av_log(s, AV_LOG_ERROR,
362 "nothing to probe for stream %d\n", st->index);
363 return 0;
364 }
365 }
366
367 if (!st->probe_packets ||
368 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
369 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
370 ? AVPROBE_SCORE_MAX / 4 : 0);
371 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
372 pd->buf_size = 0;
373 av_freep(&pd->buf);
374 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
375 }
376 }
377 }
378 return 0;
379 }
380
381 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
382 {
383 int ret, i, err;
384 AVStream *st;
385
386 for (;;) {
387 AVPacketList *pktl = s->internal->raw_packet_buffer;
388
389 if (pktl) {
390 *pkt = pktl->pkt;
391 st = s->streams[pkt->stream_index];
392 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
393 !st->probe_packets ||
394 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
395 AVProbeData *pd;
396 if (st->probe_packets)
397 if ((err = probe_codec(s, st, NULL)) < 0)
398 return err;
399 pd = &st->probe_data;
400 av_freep(&pd->buf);
401 pd->buf_size = 0;
402 s->internal->raw_packet_buffer = pktl->next;
403 s->internal->raw_packet_buffer_remaining_size += pkt->size;
404 av_free(pktl);
405 return 0;
406 }
407 }
408
409 pkt->data = NULL;
410 pkt->size = 0;
411 av_init_packet(pkt);
412 ret = s->iformat->read_packet(s, pkt);
413 if (ret < 0) {
414 if (!pktl || ret == AVERROR(EAGAIN))
415 return ret;
416 for (i = 0; i < s->nb_streams; i++) {
417 st = s->streams[i];
418 if (st->probe_packets)
419 if ((err = probe_codec(s, st, NULL)) < 0)
420 return err;
421 }
422 continue;
423 }
424
425 if (!pkt->buf) {
426 AVPacket tmp = { 0 };
427 ret = av_packet_ref(&tmp, pkt);
428 if (ret < 0)
429 return ret;
430 *pkt = tmp;
431 }
432
433 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
434 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
435 av_log(s, AV_LOG_WARNING,
436 "Dropped corrupted packet (stream = %d)\n",
437 pkt->stream_index);
438 av_packet_unref(pkt);
439 continue;
440 }
441
442 st = s->streams[pkt->stream_index];
443
444 switch (st->codec->codec_type) {
445 case AVMEDIA_TYPE_VIDEO:
446 if (s->video_codec_id)
447 st->codec->codec_id = s->video_codec_id;
448 break;
449 case AVMEDIA_TYPE_AUDIO:
450 if (s->audio_codec_id)
451 st->codec->codec_id = s->audio_codec_id;
452 break;
453 case AVMEDIA_TYPE_SUBTITLE:
454 if (s->subtitle_codec_id)
455 st->codec->codec_id = s->subtitle_codec_id;
456 break;
457 }
458
459 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
460 !st->probe_packets))
461 return ret;
462
463 err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
464 &s->internal->raw_packet_buffer_end, 0);
465 if (err)
466 return err;
467 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
468
469 if ((err = probe_codec(s, st, pkt)) < 0)
470 return err;
471 }
472 }
473
474 /**********************************************************/
475
476 /**
477 * Return the frame duration in seconds. Return 0 if not available.
478 */
479 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
480 AVCodecParserContext *pc, AVPacket *pkt)
481 {
482 AVRational codec_framerate = s->iformat ? st->codec->framerate :
483 av_inv_q(st->codec->time_base);
484 int frame_size;
485
486 *pnum = 0;
487 *pden = 0;
488 switch (st->codec->codec_type) {
489 case AVMEDIA_TYPE_VIDEO:
490 if (st->avg_frame_rate.num) {
491 *pnum = st->avg_frame_rate.den;
492 *pden = st->avg_frame_rate.num;
493 } else if (st->time_base.num * 1000LL > st->time_base.den) {
494 *pnum = st->time_base.num;
495 *pden = st->time_base.den;
496 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
497 *pnum = codec_framerate.den;
498 *pden = codec_framerate.num;
499 if (pc && pc->repeat_pict) {
500 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
501 *pden /= 1 + pc->repeat_pict;
502 else
503 *pnum *= 1 + pc->repeat_pict;
504 }
505 /* If this codec can be interlaced or progressive then we need
506 * a parser to compute duration of a packet. Thus if we have
507 * no parser in such case leave duration undefined. */
508 if (st->codec->ticks_per_frame > 1 && !pc)
509 *pnum = *pden = 0;
510 }
511 break;
512 case AVMEDIA_TYPE_AUDIO:
513 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
514 if (frame_size <= 0 || st->codec->sample_rate <= 0)
515 break;
516 *pnum = frame_size;
517 *pden = st->codec->sample_rate;
518 break;
519 default:
520 break;
521 }
522 }
523
524 static int is_intra_only(enum AVCodecID id)
525 {
526 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
527 if (!d)
528 return 0;
529 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
530 return 0;
531 return 1;
532 }
533
534 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
535 int64_t dts, int64_t pts)
536 {
537 AVStream *st = s->streams[stream_index];
538 AVPacketList *pktl = s->internal->packet_buffer;
539
540 if (st->first_dts != AV_NOPTS_VALUE ||
541 dts == AV_NOPTS_VALUE ||
542 st->cur_dts == AV_NOPTS_VALUE)
543 return;
544
545 st->first_dts = dts - st->cur_dts;
546 st->cur_dts = dts;
547
548 for (; pktl; pktl = pktl->next) {
549 if (pktl->pkt.stream_index != stream_index)
550 continue;
551 // FIXME: think more about this check
552 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
553 pktl->pkt.pts += st->first_dts;
554
555 if (pktl->pkt.dts != AV_NOPTS_VALUE)
556 pktl->pkt.dts += st->first_dts;
557
558 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
559 st->start_time = pktl->pkt.pts;
560 }
561 if (st->start_time == AV_NOPTS_VALUE)
562 st->start_time = pts;
563 }
564
565 static void update_initial_durations(AVFormatContext *s, AVStream *st,
566 int stream_index, int duration)
567 {
568 AVPacketList *pktl = s->internal->packet_buffer;
569 int64_t cur_dts = 0;
570
571 if (st->first_dts != AV_NOPTS_VALUE) {
572 cur_dts = st->first_dts;
573 for (; pktl; pktl = pktl->next) {
574 if (pktl->pkt.stream_index == stream_index) {
575 if (pktl->pkt.pts != pktl->pkt.dts ||
576 pktl->pkt.dts != AV_NOPTS_VALUE ||
577 pktl->pkt.duration)
578 break;
579 cur_dts -= duration;
580 }
581 }
582 pktl = s->internal->packet_buffer;
583 st->first_dts = cur_dts;
584 } else if (st->cur_dts)
585 return;
586
587 for (; pktl; pktl = pktl->next) {
588 if (pktl->pkt.stream_index != stream_index)
589 continue;
590 if (pktl->pkt.pts == pktl->pkt.dts &&
591 pktl->pkt.dts == AV_NOPTS_VALUE &&
592 !pktl->pkt.duration) {
593 pktl->pkt.dts = cur_dts;
594 if (!st->codec->has_b_frames)
595 pktl->pkt.pts = cur_dts;
596 cur_dts += duration;
597 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
598 pktl->pkt.duration = duration;
599 } else
600 break;
601 }
602 if (st->first_dts == AV_NOPTS_VALUE)
603 st->cur_dts = cur_dts;
604 }
605
606 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
607 AVCodecParserContext *pc, AVPacket *pkt)
608 {
609 int num, den, presentation_delayed, delay, i;
610 int64_t offset;
611
612 if (s->flags & AVFMT_FLAG_NOFILLIN)
613 return;
614
615 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
616 pkt->dts = AV_NOPTS_VALUE;
617
618 /* do we have a video B-frame ? */
619 delay = st->codec->has_b_frames;
620 presentation_delayed = 0;
621
622 /* XXX: need has_b_frame, but cannot get it if the codec is
623 * not initialized */
624 if (delay &&
625 pc && pc->pict_type != AV_PICTURE_TYPE_B)
626 presentation_delayed = 1;
627
628 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
629 st->pts_wrap_bits < 63 &&
630 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
631 pkt->dts -= 1LL << st->pts_wrap_bits;
632 }
633
634 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
635 * We take the conservative approach and discard both.
636 * Note: If this is misbehaving for an H.264 file, then possibly
637 * presentation_delayed is not set correctly. */
638 if (delay == 1 && pkt->dts == pkt->pts &&
639 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
640 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
641 pkt->dts = AV_NOPTS_VALUE;
642 }
643
644 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
645 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
646 if (den && num) {
647 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
648 den * (int64_t) st->time_base.num,
649 AV_ROUND_DOWN);
650
651 if (pkt->duration != 0 && s->internal->packet_buffer)
652 update_initial_durations(s, st, pkt->stream_index,
653 pkt->duration);
654 }
655 }
656
657 /* Correct timestamps with byte offset if demuxers only have timestamps
658 * on packet boundaries */
659 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
660 /* this will estimate bitrate based on this frame's duration and size */
661 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
662 if (pkt->pts != AV_NOPTS_VALUE)
663 pkt->pts += offset;
664 if (pkt->dts != AV_NOPTS_VALUE)
665 pkt->dts += offset;
666 }
667
668 /* This may be redundant, but it should not hurt. */
669 if (pkt->dts != AV_NOPTS_VALUE &&
670 pkt->pts != AV_NOPTS_VALUE &&
671 pkt->pts > pkt->dts)
672 presentation_delayed = 1;
673
674 av_log(NULL, AV_LOG_TRACE,
675 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
676 "cur_dts:%"PRId64" st:%d pc:%p\n",
677 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
678 pkt->stream_index, pc);
679 /* Interpolate PTS and DTS if they are not present. We skip H.264
680 * currently because delay and has_b_frames are not reliably set. */
681 if ((delay == 0 || (delay == 1 && pc)) &&
682 st->codec->codec_id != AV_CODEC_ID_H264) {
683 if (presentation_delayed) {
684 /* DTS = decompression timestamp */
685 /* PTS = presentation timestamp */
686 if (pkt->dts == AV_NOPTS_VALUE)
687 pkt->dts = st->last_IP_pts;
688 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
689 if (pkt->dts == AV_NOPTS_VALUE)
690 pkt->dts = st->cur_dts;
691
692 /* This is tricky: the dts must be incremented by the duration
693 * of the frame we are displaying, i.e. the last I- or P-frame. */
694 if (st->last_IP_duration == 0)
695 st->last_IP_duration = pkt->duration;
696 if (pkt->dts != AV_NOPTS_VALUE)
697 st->cur_dts = pkt->dts + st->last_IP_duration;
698 st->last_IP_duration = pkt->duration;
699 st->last_IP_pts = pkt->pts;
700 /* Cannot compute PTS if not present (we can compute it only
701 * by knowing the future. */
702 } else if (pkt->pts != AV_NOPTS_VALUE ||
703 pkt->dts != AV_NOPTS_VALUE ||
704 pkt->duration ||
705 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
706 int duration = pkt->duration;
707 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
708 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
709 if (den && num) {
710 duration = av_rescale_rnd(1,
711 num * (int64_t) st->time_base.den,
712 den * (int64_t) st->time_base.num,
713 AV_ROUND_DOWN);
714 if (duration != 0 && s->internal->packet_buffer)
715 update_initial_durations(s, st, pkt->stream_index,
716 duration);
717 }
718 }
719
720 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
721 duration) {
722 /* presentation is not delayed : PTS and DTS are the same */
723 if (pkt->pts == AV_NOPTS_VALUE)
724 pkt->pts = pkt->dts;
725 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
726 pkt->pts);
727 if (pkt->pts == AV_NOPTS_VALUE)
728 pkt->pts = st->cur_dts;
729 pkt->dts = pkt->pts;
730 if (pkt->pts != AV_NOPTS_VALUE)
731 st->cur_dts = pkt->pts + duration;
732 }
733 }
734 }
735
736 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
737 st->pts_buffer[0] = pkt->pts;
738 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
739 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
740 if (pkt->dts == AV_NOPTS_VALUE)
741 pkt->dts = st->pts_buffer[0];
742 // We skipped it above so we try here.
743 if (st->codec->codec_id == AV_CODEC_ID_H264)
744 // This should happen on the first packet
745 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
746 if (pkt->dts > st->cur_dts)
747 st->cur_dts = pkt->dts;
748 }
749
750 av_log(NULL, AV_LOG_TRACE,
751 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
752 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
753
754 /* update flags */
755 if (is_intra_only(st->codec->codec_id))
756 pkt->flags |= AV_PKT_FLAG_KEY;
757 #if FF_API_CONVERGENCE_DURATION
758 FF_DISABLE_DEPRECATION_WARNINGS
759 if (pc)
760 pkt->convergence_duration = pc->convergence_duration;
761 FF_ENABLE_DEPRECATION_WARNINGS
762 #endif
763 }
764
765 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
766 {
767 while (*pkt_buf) {
768 AVPacketList *pktl = *pkt_buf;
769 *pkt_buf = pktl->next;
770 av_packet_unref(&pktl->pkt);
771 av_freep(&pktl);
772 }
773 *pkt_buf_end = NULL;
774 }
775
776 /**
777 * Parse a packet, add all split parts to parse_queue.
778 *
779 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
780 */
781 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
782 {
783 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
784 AVStream *st = s->streams[stream_index];
785 uint8_t *data = pkt ? pkt->data : NULL;
786 int size = pkt ? pkt->size : 0;
787 int ret = 0, got_output = 0;
788
789 if (!pkt) {
790 av_init_packet(&flush_pkt);
791 pkt = &flush_pkt;
792 got_output = 1;
793 }
794
795 while (size > 0 || (pkt == &flush_pkt && got_output)) {
796 int len;
797
798 av_init_packet(&out_pkt);
799 len = av_parser_parse2(st->parser, st->codec,
800 &out_pkt.data, &out_pkt.size, data, size,
801 pkt->pts, pkt->dts, pkt->pos);
802
803 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
804 /* increment read pointer */
805 data += len;
806 size -= len;
807
808 got_output = !!out_pkt.size;
809
810 if (!out_pkt.size)
811 continue;
812
813 if (pkt->side_data) {
814 out_pkt.side_data = pkt->side_data;
815 out_pkt.side_data_elems = pkt->side_data_elems;
816 pkt->side_data = NULL;
817 pkt->side_data_elems = 0;
818 }
819
820 /* set the duration */
821 out_pkt.duration = 0;
822 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
823 if (st->codec->sample_rate > 0) {
824 out_pkt.duration =
825 av_rescale_q_rnd(st->parser->duration,
826 (AVRational) { 1, st->codec->sample_rate },
827 st->time_base,
828 AV_ROUND_DOWN);
829 }
830 }
831
832 out_pkt.stream_index = st->index;
833 out_pkt.pts = st->parser->pts;
834 out_pkt.dts = st->parser->dts;
835 out_pkt.pos = st->parser->pos;
836
837 if (st->parser->key_frame == 1 ||
838 (st->parser->key_frame == -1 &&
839 st->parser->pict_type == AV_PICTURE_TYPE_I))
840 out_pkt.flags |= AV_PKT_FLAG_KEY;
841
842 compute_pkt_fields(s, st, st->parser, &out_pkt);
843
844 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
845 out_pkt.flags & AV_PKT_FLAG_KEY) {
846 ff_reduce_index(s, st->index);
847 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
848 0, 0, AVINDEX_KEYFRAME);
849 }
850
851 if ((ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,
852 &s->internal->parse_queue_end,
853 1))) {
854 av_packet_unref(&out_pkt);
855 goto fail;
856 }
857 }
858
859 /* end of the stream => close and free the parser */
860 if (pkt == &flush_pkt) {
861 av_parser_close(st->parser);
862 st->parser = NULL;
863 }
864
865 fail:
866 av_packet_unref(pkt);
867 return ret;
868 }
869
870 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
871 AVPacketList **pkt_buffer_end,
872 AVPacket *pkt)
873 {
874 AVPacketList *pktl;
875 av_assert0(*pkt_buffer);
876 pktl = *pkt_buffer;
877 *pkt = pktl->pkt;
878 *pkt_buffer = pktl->next;
879 if (!pktl->next)
880 *pkt_buffer_end = NULL;
881 av_freep(&pktl);
882 return 0;
883 }
884
885 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
886 {
887 int ret = 0, i, got_packet = 0;
888 AVDictionary *metadata = NULL;
889
890 av_init_packet(pkt);
891
892 while (!got_packet && !s->internal->parse_queue) {
893 AVStream *st;
894 AVPacket cur_pkt;
895
896 /* read next packet */
897 ret = ff_read_packet(s, &cur_pkt);
898 if (ret < 0) {
899 if (ret == AVERROR(EAGAIN))
900 return ret;
901 /* flush the parsers */
902 for (i = 0; i < s->nb_streams; i++) {
903 st = s->streams[i];
904 if (st->parser && st->need_parsing)
905 parse_packet(s, NULL, st->index);
906 }
907 /* all remaining packets are now in parse_queue =>
908 * really terminate parsing */
909 break;
910 }
911 ret = 0;
912 st = s->streams[cur_pkt.stream_index];
913
914 if (cur_pkt.pts != AV_NOPTS_VALUE &&
915 cur_pkt.dts != AV_NOPTS_VALUE &&
916 cur_pkt.pts < cur_pkt.dts) {
917 av_log(s, AV_LOG_WARNING,
918 "Invalid timestamps stream=%d, pts=%"PRId64", "
919 "dts=%"PRId64", size=%d\n",
920 cur_pkt.stream_index, cur_pkt.pts,
921 cur_pkt.dts, cur_pkt.size);
922 }
923 if (s->debug & FF_FDEBUG_TS)
924 av_log(s, AV_LOG_DEBUG,
925 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
926 "size=%d, duration=%"PRId64", flags=%d\n",
927 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
928 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
929
930 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
931 st->parser = av_parser_init(st->codec->codec_id);
932 if (!st->parser)
933 /* no parser available: just output the raw packets */
934 st->need_parsing = AVSTREAM_PARSE_NONE;
935 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
936 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
937 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
938 st->parser->flags |= PARSER_FLAG_ONCE;
939 }
940
941 if (!st->need_parsing || !st->parser) {
942 /* no parsing needed: we just output the packet as is */
943 *pkt = cur_pkt;
944 compute_pkt_fields(s, st, NULL, pkt);
945 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
946 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
947 ff_reduce_index(s, st->index);
948 av_add_index_entry(st, pkt->pos, pkt->dts,
949 0, 0, AVINDEX_KEYFRAME);
950 }
951 got_packet = 1;
952 } else if (st->discard < AVDISCARD_ALL) {
953 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
954 return ret;
955 } else {
956 /* free packet */
957 av_packet_unref(&cur_pkt);
958 }
959 }
960
961 if (!got_packet && s->internal->parse_queue)
962 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
963
964 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
965 if (metadata) {
966 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
967 av_dict_copy(&s->metadata, metadata, 0);
968 av_dict_free(&metadata);
969 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
970 }
971
972 if (s->debug & FF_FDEBUG_TS)
973 av_log(s, AV_LOG_DEBUG,
974 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
975 "size=%d, duration=%"PRId64", flags=%d\n",
976 pkt->stream_index, pkt->pts, pkt->dts,
977 pkt->size, pkt->duration, pkt->flags);
978
979 return ret;
980 }
981
982 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
983 {
984 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
985 int eof = 0;
986
987 if (!genpts)
988 return s->internal->packet_buffer
989 ? read_from_packet_buffer(&s->internal->packet_buffer,
990 &s->internal->packet_buffer_end, pkt)
991 : read_frame_internal(s, pkt);
992
993 for (;;) {
994 int ret;
995 AVPacketList *pktl = s->internal->packet_buffer;
996
997 if (pktl) {
998 AVPacket *next_pkt = &pktl->pkt;
999
1000 if (next_pkt->dts != AV_NOPTS_VALUE) {
1001 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1002 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1003 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1004 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1005 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1006 // not B-frame
1007 next_pkt->pts = pktl->pkt.dts;
1008 }
1009 pktl = pktl->next;
1010 }
1011 pktl = s->internal->packet_buffer;
1012 }
1013
1014 /* read packet from packet buffer, if there is data */
1015 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1016 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1017 return read_from_packet_buffer(&s->internal->packet_buffer,
1018 &s->internal->packet_buffer_end, pkt);
1019 }
1020
1021 ret = read_frame_internal(s, pkt);
1022 if (ret < 0) {
1023 if (pktl && ret != AVERROR(EAGAIN)) {
1024 eof = 1;
1025 continue;
1026 } else
1027 return ret;
1028 }
1029
1030 ret = add_to_pktbuf(&s->internal->packet_buffer, pkt,
1031 &s->internal->packet_buffer_end, 1);
1032 if (ret < 0)
1033 return ret;
1034 }
1035 }
1036
1037 /* XXX: suppress the packet queue */
1038 static void flush_packet_queue(AVFormatContext *s)
1039 {
1040 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1041 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1042 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1043
1044 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1045 }
1046
1047 /*******************************************************/
1048 /* seek support */
1049
1050 int av_find_default_stream_index(AVFormatContext *s)
1051 {
1052 int first_audio_index = -1;
1053 int i;
1054 AVStream *st;
1055
1056 if (s->nb_streams <= 0)
1057 return -1;
1058 for (i = 0; i < s->nb_streams; i++) {
1059 st = s->streams[i];
1060 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1061 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1062 return i;
1063 }
1064 if (first_audio_index < 0 &&
1065 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1066 first_audio_index = i;
1067 }
1068 return first_audio_index >= 0 ? first_audio_index : 0;
1069 }
1070
1071 /** Flush the frame reader. */
1072 void ff_read_frame_flush(AVFormatContext *s)
1073 {
1074 AVStream *st;
1075 int i, j;
1076
1077 flush_packet_queue(s);
1078
1079 /* Reset read state for each stream. */
1080 for (i = 0; i < s->nb_streams; i++) {
1081 st = s->streams[i];
1082
1083 if (st->parser) {
1084 av_parser_close(st->parser);
1085 st->parser = NULL;
1086 }
1087 st->last_IP_pts = AV_NOPTS_VALUE;
1088 /* We set the current DTS to an unspecified origin. */
1089 st->cur_dts = AV_NOPTS_VALUE;
1090
1091 st->probe_packets = MAX_PROBE_PACKETS;
1092
1093 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1094 st->pts_buffer[j] = AV_NOPTS_VALUE;
1095 }
1096 }
1097
1098 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1099 {
1100 int i;
1101
1102 for (i = 0; i < s->nb_streams; i++) {
1103 AVStream *st = s->streams[i];
1104
1105 st->cur_dts =
1106 av_rescale(timestamp,
1107 st->time_base.den * (int64_t) ref_st->time_base.num,
1108 st->time_base.num * (int64_t) ref_st->time_base.den);
1109 }
1110 }
1111
1112 void ff_reduce_index(AVFormatContext *s, int stream_index)
1113 {
1114 AVStream *st = s->streams[stream_index];
1115 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1116
1117 if ((unsigned) st->nb_index_entries >= max_entries) {
1118 int i;
1119 for (i = 0; 2 * i < st->nb_index_entries; i++)
1120 st->index_entries[i] = st->index_entries[2 * i];
1121 st->nb_index_entries = i;
1122 }
1123 }
1124
1125 int ff_add_index_entry(AVIndexEntry **index_entries,
1126 int *nb_index_entries,
1127 unsigned int *index_entries_allocated_size,
1128 int64_t pos, int64_t timestamp,
1129 int size, int distance, int flags)
1130 {
1131 AVIndexEntry *entries, *ie;
1132 int index;
1133
1134 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1135 return -1;
1136
1137 entries = av_fast_realloc(*index_entries,
1138 index_entries_allocated_size,
1139 (*nb_index_entries + 1) *
1140 sizeof(AVIndexEntry));
1141 if (!entries)
1142 return -1;
1143
1144 *index_entries = entries;
1145
1146 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1147 timestamp, AVSEEK_FLAG_ANY);
1148
1149 if (index < 0) {
1150 index = (*nb_index_entries)++;
1151 ie = &entries[index];
1152 assert(index == 0 || ie[-1].timestamp < timestamp);
1153 } else {
1154 ie = &entries[index];
1155 if (ie->timestamp != timestamp) {
1156 if (ie->timestamp <= timestamp)
1157 return -1;
1158 memmove(entries + index + 1, entries + index,
1159 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1160 (*nb_index_entries)++;
1161 } else if (ie->pos == pos && distance < ie->min_distance)
1162 // do not reduce the distance
1163 distance = ie->min_distance;
1164 }
1165
1166 ie->pos = pos;
1167 ie->timestamp = timestamp;
1168 ie->min_distance = distance;
1169 ie->size = size;
1170 ie->flags = flags;
1171
1172 return index;
1173 }
1174
1175 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1176 int size, int distance, int flags)
1177 {
1178 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1179 &st->index_entries_allocated_size, pos,
1180 timestamp, size, distance, flags);
1181 }
1182
1183 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1184 int64_t wanted_timestamp, int flags)
1185 {
1186 int a, b, m;
1187 int64_t timestamp;
1188
1189 a = -1;
1190 b = nb_entries;
1191
1192 // Optimize appending index entries at the end.
1193 if (b && entries[b - 1].timestamp < wanted_timestamp)
1194 a = b - 1;
1195
1196 while (b - a > 1) {
1197 m = (a + b) >> 1;
1198 timestamp = entries[m].timestamp;
1199 if (timestamp >= wanted_timestamp)
1200 b = m;
1201 if (timestamp <= wanted_timestamp)
1202 a = m;
1203 }
1204 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1205
1206 if (!(flags & AVSEEK_FLAG_ANY))
1207 while (m >= 0 && m < nb_entries &&
1208 !(entries[m].flags & AVINDEX_KEYFRAME))
1209 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1210
1211 if (m == nb_entries)
1212 return -1;
1213 return m;
1214 }
1215
1216 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1217 {
1218 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1219 wanted_timestamp, flags);
1220 }
1221
1222 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1223 int64_t target_ts, int flags)
1224 {
1225 AVInputFormat *avif = s->iformat;
1226 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1227 int64_t ts_min, ts_max, ts;
1228 int index;
1229 int64_t ret;
1230 AVStream *st;
1231
1232 if (stream_index < 0)
1233 return -1;
1234
1235 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1236
1237 ts_max =
1238 ts_min = AV_NOPTS_VALUE;
1239 pos_limit = -1; // GCC falsely says it may be uninitialized.
1240
1241 st = s->streams[stream_index];
1242 if (st->index_entries) {
1243 AVIndexEntry *e;
1244
1245 /* FIXME: Whole function must be checked for non-keyframe entries in
1246 * index case, especially read_timestamp(). */
1247 index = av_index_search_timestamp(st, target_ts,
1248 flags | AVSEEK_FLAG_BACKWARD);
1249 index = FFMAX(index, 0);
1250 e = &st->index_entries[index];
1251
1252 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1253 pos_min = e->pos;
1254 ts_min = e->timestamp;
1255 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1256 pos_min, ts_min);
1257 } else {
1258 assert(index == 0);
1259 }
1260
1261 index = av_index_search_timestamp(st, target_ts,
1262 flags & ~AVSEEK_FLAG_BACKWARD);
1263 assert(index < st->nb_index_entries);
1264 if (index >= 0) {
1265 e = &st->index_entries[index];
1266 assert(e->timestamp >= target_ts);
1267 pos_max = e->pos;
1268 ts_max = e->timestamp;
1269 pos_limit = pos_max - e->min_distance;
1270 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1271 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1272 }
1273 }
1274
1275 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1276 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1277 if (pos < 0)
1278 return -1;
1279
1280 /* do the seek */
1281 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1282 return ret;
1283
1284 ff_update_cur_dts(s, st, ts);
1285
1286 return 0;
1287 }
1288
1289 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1290 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1291 int64_t ts_min, int64_t ts_max,
1292 int flags, int64_t *ts_ret,
1293 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1294 int64_t *, int64_t))
1295 {
1296 int64_t pos, ts;
1297 int64_t start_pos, filesize;
1298 int no_change;
1299
1300 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1301
1302 if (ts_min == AV_NOPTS_VALUE) {
1303 pos_min = s->internal->data_offset;
1304 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1305 if (ts_min == AV_NOPTS_VALUE)
1306 return -1;
1307 }
1308
1309 if (ts_max == AV_NOPTS_VALUE) {
1310 int step = 1024;
1311 filesize = avio_size(s->pb);
1312 pos_max = filesize - 1;
1313 do {
1314 pos_max -= step;
1315 ts_max = read_timestamp(s, stream_index, &pos_max,
1316 pos_max + step);
1317 step += step;
1318 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1319 if (ts_max == AV_NOPTS_VALUE)
1320 return -1;
1321
1322 for (;;) {
1323 int64_t tmp_pos = pos_max + 1;
1324 int64_t tmp_ts = read_timestamp(s, stream_index,
1325 &tmp_pos, INT64_MAX);
1326 if (tmp_ts == AV_NOPTS_VALUE)
1327 break;
1328 ts_max = tmp_ts;
1329 pos_max = tmp_pos;
1330 if (tmp_pos >= filesize)
1331 break;
1332 }
1333 pos_limit = pos_max;
1334 }
1335
1336 if (ts_min > ts_max)
1337 return -1;
1338 else if (ts_min == ts_max)
1339 pos_limit = pos_min;
1340
1341 no_change = 0;
1342 while (pos_min < pos_limit) {
1343 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1344 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1345 assert(pos_limit <= pos_max);
1346
1347 if (no_change == 0) {
1348 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1349 // interpolate position (better than dichotomy)
1350 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1351 ts_max - ts_min) +
1352 pos_min - approximate_keyframe_distance;
1353 } else if (no_change == 1) {
1354 // bisection if interpolation did not change min / max pos last time
1355 pos = (pos_min + pos_limit) >> 1;
1356 } else {
1357 /* linear search if bisection failed, can only happen if there
1358 * are very few or no keyframes between min/max */
1359 pos = pos_min;
1360 }
1361 if (pos <= pos_min)
1362 pos = pos_min + 1;
1363 else if (pos > pos_limit)
1364 pos = pos_limit;
1365 start_pos = pos;
1366
1367 // May pass pos_limit instead of -1.
1368 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1369 if (pos == pos_max)
1370 no_change++;
1371 else
1372 no_change = 0;
1373 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1374 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1375 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1376 pos_limit, start_pos, no_change);
1377 if (ts == AV_NOPTS_VALUE) {
1378 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1379 return -1;
1380 }
1381 assert(ts != AV_NOPTS_VALUE);
1382 if (target_ts <= ts) {
1383 pos_limit = start_pos - 1;
1384 pos_max = pos;
1385 ts_max = ts;
1386 }
1387 if (target_ts >= ts) {
1388 pos_min = pos;
1389 ts_min = ts;
1390 }
1391 }
1392
1393 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1394 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1395 pos_min = pos;
1396 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1397 pos_min++;
1398 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1399 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1400 pos, ts_min, target_ts, ts_max);
1401 *ts_ret = ts;
1402 return pos;
1403 }
1404
1405 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1406 int64_t pos, int flags)
1407 {
1408 int64_t pos_min, pos_max;
1409
1410 pos_min = s->internal->data_offset;
1411 pos_max = avio_size(s->pb) - 1;
1412
1413 if (pos < pos_min)
1414 pos = pos_min;
1415 else if (pos > pos_max)
1416 pos = pos_max;
1417
1418 avio_seek(s->pb, pos, SEEK_SET);
1419
1420 return 0;
1421 }
1422
1423 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1424 int64_t timestamp, int flags)
1425 {
1426 int index;
1427 int64_t ret;
1428 AVStream *st;
1429 AVIndexEntry *ie;
1430
1431 st = s->streams[stream_index];
1432
1433 index = av_index_search_timestamp(st, timestamp, flags);
1434
1435 if (index < 0 && st->nb_index_entries &&
1436 timestamp < st->index_entries[0].timestamp)
1437 return -1;
1438
1439 if (index < 0 || index == st->nb_index_entries - 1) {
1440 AVPacket pkt;
1441
1442 if (st->nb_index_entries) {
1443 assert(st->index_entries);
1444 ie = &st->index_entries[st->nb_index_entries - 1];
1445 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1446 return ret;
1447 ff_update_cur_dts(s, st, ie->timestamp);
1448 } else {
1449 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1450 return ret;
1451 }
1452 for (;;) {
1453 int read_status;
1454 do {
1455 read_status = av_read_frame(s, &pkt);
1456 } while (read_status == AVERROR(EAGAIN));
1457 if (read_status < 0)
1458 break;
1459 av_packet_unref(&pkt);
1460 if (stream_index == pkt.stream_index)
1461 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1462 break;
1463 }
1464 index = av_index_search_timestamp(st, timestamp, flags);
1465 }
1466 if (index < 0)
1467 return -1;
1468
1469 ff_read_frame_flush(s);
1470 if (s->iformat->read_seek)
1471 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1472 return 0;
1473 ie = &st->index_entries[index];
1474 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1475 return ret;
1476 ff_update_cur_dts(s, st, ie->timestamp);
1477
1478 return 0;
1479 }
1480
1481 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1482 int64_t timestamp, int flags)
1483 {
1484 int ret;
1485 AVStream *st;
1486
1487 if (flags & AVSEEK_FLAG_BYTE) {
1488 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1489 return -1;
1490 ff_read_frame_flush(s);
1491 return seek_frame_byte(s, stream_index, timestamp, flags);
1492 }
1493
1494 if (stream_index < 0) {
1495 stream_index = av_find_default_stream_index(s);
1496 if (stream_index < 0)
1497 return -1;
1498
1499 st = s->streams[stream_index];
1500 /* timestamp for default must be expressed in AV_TIME_BASE units */
1501 timestamp = av_rescale(timestamp, st->time_base.den,
1502 AV_TIME_BASE * (int64_t) st->time_base.num);
1503 }
1504
1505 /* first, we try the format specific seek */
1506 if (s->iformat->read_seek) {
1507 ff_read_frame_flush(s);
1508 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1509 } else
1510 ret = -1;
1511 if (ret >= 0)
1512 return 0;
1513
1514 if (s->iformat->read_timestamp &&
1515 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1516 ff_read_frame_flush(s);
1517 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1518 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1519 ff_read_frame_flush(s);
1520 return seek_frame_generic(s, stream_index, timestamp, flags);
1521 } else
1522 return -1;
1523 }
1524
1525 int av_seek_frame(AVFormatContext *s, int stream_index,
1526 int64_t timestamp, int flags)
1527 {
1528 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1529
1530 if (ret >= 0)
1531 ret = queue_attached_pictures(s);
1532
1533 return ret;
1534 }
1535
1536 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1537 int64_t ts, int64_t max_ts, int flags)
1538 {
1539 if (min_ts > ts || max_ts < ts)
1540 return -1;
1541
1542 if (s->iformat->read_seek2) {
1543 int ret;
1544 ff_read_frame_flush(s);
1545 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1546 ts, max_ts, flags);
1547
1548 if (ret >= 0)
1549 ret = queue_attached_pictures(s);
1550 return ret;
1551 }
1552
1553 if (s->iformat->read_timestamp) {
1554 // try to seek via read_timestamp()
1555 }
1556
1557 // Fall back on old API if new is not implemented but old is.
1558 // Note the old API has somewhat different semantics.
1559 if (s->iformat->read_seek || 1)
1560 return av_seek_frame(s, stream_index, ts,
1561 flags | ((uint64_t) ts - min_ts >
1562 (uint64_t) max_ts - ts
1563 ? AVSEEK_FLAG_BACKWARD : 0));
1564
1565 // try some generic seek like seek_frame_generic() but with new ts semantics
1566 }
1567
1568 /*******************************************************/
1569
1570 /**
1571 * Return TRUE if the stream has accurate duration in any stream.
1572 *
1573 * @return TRUE if the stream has accurate duration for at least one component.
1574 */
1575 static int has_duration(AVFormatContext *ic)
1576 {
1577 int i;
1578 AVStream *st;
1579
1580 for (i = 0; i < ic->nb_streams; i++) {
1581 st = ic->streams[i];
1582 if (st->duration != AV_NOPTS_VALUE)
1583 return 1;
1584 }
1585 if (ic->duration != AV_NOPTS_VALUE)
1586 return 1;
1587 return 0;
1588 }
1589
1590 /**
1591 * Estimate the stream timings from the one of each components.
1592 *
1593 * Also computes the global bitrate if possible.
1594 */
1595 static void update_stream_timings(AVFormatContext *ic)
1596 {
1597 int64_t start_time, start_time1, end_time, end_time1;
1598 int64_t duration, duration1, filesize;
1599 int i;
1600 AVStream *st;
1601
1602 start_time = INT64_MAX;
1603 end_time = INT64_MIN;
1604 duration = INT64_MIN;
1605 for (i = 0; i < ic->nb_streams; i++) {
1606 st = ic->streams[i];
1607 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1608 start_time1 = av_rescale_q(st->start_time, st->time_base,
1609 AV_TIME_BASE_Q);
1610 start_time = FFMIN(start_time, start_time1);
1611 if (st->duration != AV_NOPTS_VALUE) {
1612 end_time1 = start_time1 +
1613 av_rescale_q(st->duration, st->time_base,
1614 AV_TIME_BASE_Q);
1615 end_time = FFMAX(end_time, end_time1);
1616 }
1617 }
1618 if (st->duration != AV_NOPTS_VALUE) {
1619 duration1 = av_rescale_q(st->duration, st->time_base,
1620 AV_TIME_BASE_Q);
1621 duration = FFMAX(duration, duration1);
1622 }
1623 }
1624 if (start_time != INT64_MAX) {
1625 ic->start_time = start_time;
1626 if (end_time != INT64_MIN)
1627 duration = FFMAX(duration, end_time - start_time);
1628 }
1629 if (duration != INT64_MIN) {
1630 ic->duration = duration;
1631 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1632 /* compute the bitrate */
1633 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1634 (double) ic->duration;
1635 }
1636 }
1637
1638 static void fill_all_stream_timings(AVFormatContext *ic)
1639 {
1640 int i;
1641 AVStream *st;
1642
1643 update_stream_timings(ic);
1644 for (i = 0; i < ic->nb_streams; i++) {
1645 st = ic->streams[i];
1646 if (st->start_time == AV_NOPTS_VALUE) {
1647 if (ic->start_time != AV_NOPTS_VALUE)
1648 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1649 st->time_base);
1650 if (ic->duration != AV_NOPTS_VALUE)
1651 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1652 st->time_base);
1653 }
1654 }
1655 }
1656
1657 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1658 {
1659 int64_t filesize, duration;
1660 int i;
1661 AVStream *st;
1662
1663 /* if bit_rate is already set, we believe it */
1664 if (ic->bit_rate <= 0) {
1665 int bit_rate = 0;
1666 for (i = 0; i < ic->nb_streams; i++) {
1667 st = ic->streams[i];
1668 if (st->codec->bit_rate > 0) {
1669 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1670 bit_rate = 0;
1671 break;
1672 }
1673 bit_rate += st->codec->bit_rate;
1674 }
1675 }
1676 ic->bit_rate = bit_rate;
1677 }
1678
1679 /* if duration is already set, we believe it */
1680 if (ic->duration == AV_NOPTS_VALUE &&
1681 ic->bit_rate != 0) {
1682 filesize = ic->pb ? avio_size(ic->pb) : 0;
1683 if (filesize > 0) {
1684 for (i = 0; i < ic->nb_streams; i++) {
1685 st = ic->streams[i];
1686 duration = av_rescale(8 * filesize, st->time_base.den,
1687 ic->bit_rate *
1688 (int64_t) st->time_base.num);
1689 if (st->duration == AV_NOPTS_VALUE)
1690 st->duration = duration;
1691 }
1692 }
1693 }
1694 }
1695
1696 #define DURATION_MAX_READ_SIZE 250000
1697 #define DURATION_MAX_RETRY 3
1698
1699 /* only usable for MPEG-PS streams */
1700 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1701 {
1702 AVPacket pkt1, *pkt = &pkt1;
1703 AVStream *st;
1704 int read_size, i, ret;
1705 int64_t end_time;
1706 int64_t filesize, offset, duration;
1707 int retry = 0;
1708
1709 /* flush packet queue */
1710 flush_packet_queue(ic);
1711
1712 for (i = 0; i < ic->nb_streams; i++) {
1713 st = ic->streams[i];
1714 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1715 av_log(st->codec, AV_LOG_WARNING,
1716 "start time is not set in estimate_timings_from_pts\n");
1717
1718 if (st->parser) {
1719 av_parser_close(st->parser);
1720 st->parser = NULL;
1721 }
1722 }
1723
1724 /* estimate the end time (duration) */
1725 /* XXX: may need to support wrapping */
1726 filesize = ic->pb ? avio_size(ic->pb) : 0;
1727 end_time = AV_NOPTS_VALUE;
1728 do {
1729 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1730 if (offset < 0)
1731 offset = 0;
1732
1733 avio_seek(ic->pb, offset, SEEK_SET);
1734 read_size = 0;
1735 for (;;) {
1736 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1737 break;
1738
1739 do {
1740 ret = ff_read_packet(ic, pkt);
1741 } while (ret == AVERROR(EAGAIN));
1742 if (ret != 0)
1743 break;
1744 read_size += pkt->size;
1745 st = ic->streams[pkt->stream_index];
1746 if (pkt->pts != AV_NOPTS_VALUE &&
1747 (st->start_time != AV_NOPTS_VALUE ||
1748 st->first_dts != AV_NOPTS_VALUE)) {
1749 duration = end_time = pkt->pts;
1750 if (st->start_time != AV_NOPTS_VALUE)
1751 duration -= st->start_time;
1752 else
1753 duration -= st->first_dts;
1754 if (duration < 0)
1755 duration += 1LL << st->pts_wrap_bits;
1756 if (duration > 0) {
1757 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1758 st->duration = duration;
1759 }
1760 }
1761 av_packet_unref(pkt);
1762 }
1763 } while (end_time == AV_NOPTS_VALUE &&
1764 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1765 ++retry <= DURATION_MAX_RETRY);
1766
1767 fill_all_stream_timings(ic);
1768
1769 avio_seek(ic->pb, old_offset, SEEK_SET);
1770 for (i = 0; i < ic->nb_streams; i++) {
1771 st = ic->streams[i];
1772 st->cur_dts = st->first_dts;
1773 st->last_IP_pts = AV_NOPTS_VALUE;
1774 }
1775 }
1776
1777 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1778 {
1779 int64_t file_size;
1780
1781 /* get the file size, if possible */
1782 if (ic->iformat->flags & AVFMT_NOFILE) {
1783 file_size = 0;
1784 } else {
1785 file_size = avio_size(ic->pb);
1786 file_size = FFMAX(0, file_size);
1787 }
1788
1789 if ((!strcmp(ic->iformat->name, "mpeg") ||
1790 !strcmp(ic->iformat->name, "mpegts")) &&
1791 file_size && ic->pb->seekable) {
1792 /* get accurate estimate from the PTSes */
1793 estimate_timings_from_pts(ic, old_offset);
1794 } else if (has_duration(ic)) {
1795 /* at least one component has timings - we use them for all
1796 * the components */
1797 fill_all_stream_timings(ic);
1798 } else {
1799 av_log(ic, AV_LOG_WARNING,
1800 "Estimating duration from bitrate, this may be inaccurate\n");
1801 /* less precise: use bitrate info */
1802 estimate_timings_from_bit_rate(ic);
1803 }
1804 update_stream_timings(ic);
1805
1806 {
1807 int i;
1808 AVStream av_unused *st;
1809 for (i = 0; i < ic->nb_streams; i++) {
1810 st = ic->streams[i];
1811 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1812 (double) st->start_time / AV_TIME_BASE,
1813 (double) st->duration / AV_TIME_BASE);
1814 }
1815 av_log(ic, AV_LOG_TRACE,
1816 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1817 (double) ic->start_time / AV_TIME_BASE,
1818 (double) ic->duration / AV_TIME_BASE,
1819 ic->bit_rate / 1000);
1820 }
1821 }
1822
1823 static int has_codec_parameters(AVStream *st)
1824 {
1825 AVCodecContext *avctx = st->codec;
1826 int val;
1827
1828 switch (avctx->codec_type) {
1829 case AVMEDIA_TYPE_AUDIO:
1830 val = avctx->sample_rate && avctx->channels;
1831 if (st->info->found_decoder >= 0 &&
1832 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1833 return 0;
1834 break;
1835 case AVMEDIA_TYPE_VIDEO:
1836 val = avctx->width;
1837 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1838 return 0;
1839 break;
1840 default:
1841 val = 1;
1842 break;
1843 }
1844 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1845 }
1846
1847 static int has_decode_delay_been_guessed(AVStream *st)
1848 {
1849 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1850 st->info->nb_decoded_frames >= 6;
1851 }
1852
1853 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1854 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1855 AVDictionary **options)
1856 {
1857 const AVCodec *codec;
1858 int got_picture = 1, ret = 0;
1859 AVFrame *frame = av_frame_alloc();
1860 AVPacket pkt = *avpkt;
1861
1862 if (!frame)
1863 return AVERROR(ENOMEM);
1864
1865 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1866 AVDictionary *thread_opt = NULL;
1867
1868 codec = st->codec->codec ? st->codec->codec
1869 : avcodec_find_decoder(st->codec->codec_id);
1870
1871 if (!codec) {
1872 st->info->found_decoder = -1;
1873 ret = -1;
1874 goto fail;
1875 }
1876
1877 /* Force thread count to 1 since the H.264 decoder will not extract
1878 * SPS and PPS to extradata during multi-threaded decoding. */
1879 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1880 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1881 if (!options)
1882 av_dict_free(&thread_opt);
1883 if (ret < 0) {
1884 st->info->found_decoder = -1;
1885 goto fail;
1886 }
1887 st->info->found_decoder = 1;
1888 } else if (!st->info->found_decoder)
1889 st->info->found_decoder = 1;
1890
1891 if (st->info->found_decoder < 0) {
1892 ret = -1;
1893 goto fail;
1894 }
1895
1896 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1897 ret >= 0 &&
1898 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1899 (!st->codec_info_nb_frames &&
1900 (st->codec->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1901 got_picture = 0;
1902 switch (st->codec->codec_type) {
1903 case AVMEDIA_TYPE_VIDEO:
1904 ret = avcodec_decode_video2(st->codec, frame,
1905 &got_picture, &pkt);
1906 break;
1907 case AVMEDIA_TYPE_AUDIO:
1908 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1909 break;
1910 default:
1911 break;
1912 }
1913 if (ret >= 0) {
1914 if (got_picture)
1915 st->info->nb_decoded_frames++;
1916 pkt.data += ret;
1917 pkt.size -= ret;
1918 ret = got_picture;
1919 }
1920 }
1921
1922 fail:
1923 av_frame_free(&frame);
1924 return ret;
1925 }
1926
1927 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1928 {
1929 while (tags->id != AV_CODEC_ID_NONE) {
1930 if (tags->id == id)
1931 return tags->tag;
1932 tags++;
1933 }
1934 return 0;
1935 }
1936
1937 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1938 {
1939 int i;
1940 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1941 if (tag == tags[i].tag)
1942 return tags[i].id;
1943 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1944 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1945 return tags[i].id;
1946 return AV_CODEC_ID_NONE;
1947 }
1948
1949 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1950 {
1951 if (flt) {
1952 switch (bps) {
1953 case 32:
1954 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1955 case 64:
1956 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1957 default:
1958 return AV_CODEC_ID_NONE;
1959 }
1960 } else {
1961 bps >>= 3;
1962 if (sflags & (1 << (bps - 1))) {
1963 switch (bps) {
1964 case 1:
1965 return AV_CODEC_ID_PCM_S8;
1966 case 2:
1967 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1968 case 3:
1969 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1970 case 4:
1971 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1972 default:
1973 return AV_CODEC_ID_NONE;
1974 }
1975 } else {
1976 switch (bps) {
1977 case 1:
1978 return AV_CODEC_ID_PCM_U8;
1979 case 2:
1980 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1981 case 3:
1982 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1983 case 4:
1984 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1985 default:
1986 return AV_CODEC_ID_NONE;
1987 }
1988 }
1989 }
1990 }
1991
1992 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1993 {
1994 int i;
1995 for (i = 0; tags && tags[i]; i++) {
1996 int tag = ff_codec_get_tag(tags[i], id);
1997 if (tag)
1998 return tag;
1999 }
2000 return 0;
2001 }
2002
2003 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2004 {
2005 int i;
2006 for (i = 0; tags && tags[i]; i++) {
2007 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2008 if (id != AV_CODEC_ID_NONE)
2009 return id;
2010 }
2011 return AV_CODEC_ID_NONE;
2012 }
2013
2014 static void compute_chapters_end(AVFormatContext *s)
2015 {
2016 unsigned int i, j;
2017 int64_t max_time = s->duration +
2018 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2019
2020 for (i = 0; i < s->nb_chapters; i++)
2021 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2022 AVChapter *ch = s->chapters[i];
2023 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2024 ch->time_base)
2025 : INT64_MAX;
2026
2027 for (j = 0; j < s->nb_chapters; j++) {
2028 AVChapter *ch1 = s->chapters[j];
2029 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2030 ch->time_base);
2031 if (j != i && next_start > ch->start && next_start < end)
2032 end = next_start;
2033 }
2034 ch->end = (end == INT64_MAX) ? ch->start : end;
2035 }
2036 }
2037
2038 static int get_std_framerate(int i)
2039 {
2040 if (i < 60 * 12)
2041 return (i + 1) * 1001;
2042 else
2043 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2044 }
2045
2046 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2047 {
2048 int i, count, ret, read_size, j;
2049 AVStream *st;
2050 AVPacket pkt1, *pkt;
2051 int64_t old_offset = avio_tell(ic->pb);
2052 // new streams might appear, no options for those
2053 int orig_nb_streams = ic->nb_streams;
2054
2055 for (i = 0; i < ic->nb_streams; i++) {
2056 const AVCodec *codec;
2057 AVDictionary *thread_opt = NULL;
2058 st = ic->streams[i];
2059
2060 // only for the split stuff
2061 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2062 st->parser = av_parser_init(st->codec->codec_id);
2063 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2064 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2065 }
2066 codec = st->codec->codec ? st->codec->codec
2067 : avcodec_find_decoder(st->codec->codec_id);
2068
2069 /* Force thread count to 1 since the H.264 decoder will not extract
2070 * SPS and PPS to extradata during multi-threaded decoding. */
2071 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2072
2073 /* Ensure that subtitle_header is properly set. */
2074 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2075 && codec && !st->codec->codec)
2076 avcodec_open2(st->codec, codec,
2077 options ? &options[i] : &thread_opt);
2078
2079 // Try to just open decoders, in case this is enough to get parameters.
2080 if (!has_codec_parameters(st)) {
2081 if (codec && !st->codec->codec)
2082 avcodec_open2(st->codec, codec,
2083 options ? &options[i] : &thread_opt);
2084 }
2085 if (!options)
2086 av_dict_free(&thread_opt);
2087 }
2088
2089 for (i = 0; i < ic->nb_streams; i++) {
2090 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2091 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2092 }
2093
2094 count = 0;
2095 read_size = 0;
2096 for (;;) {
2097 if (ff_check_interrupt(&ic->interrupt_callback)) {
2098 ret = AVERROR_EXIT;
2099 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2100 break;
2101 }
2102
2103 /* check if one codec still needs to be handled */
2104 for (i = 0; i < ic->nb_streams; i++) {
2105 int fps_analyze_framecount = 20;
2106
2107 st = ic->streams[i];
2108 if (!has_codec_parameters(st))
2109 break;
2110 /* If the timebase is coarse (like the usual millisecond precision
2111 * of mkv), we need to analyze more frames to reliably arrive at
2112 * the correct fps. */
2113 if (av_q2d(st->time_base) > 0.0005)
2114 fps_analyze_framecount *= 2;
2115 if (ic->fps_probe_size >= 0)
2116 fps_analyze_framecount = ic->fps_probe_size;
2117 /* variable fps and no guess at the real fps */
2118 if (!st->avg_frame_rate.num &&
2119 st->codec_info_nb_frames < fps_analyze_framecount &&
2120 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2121 break;
2122 if (st->parser && st->parser->parser->split &&
2123 !st->codec->extradata)
2124 break;
2125 if (st->first_dts == AV_NOPTS_VALUE &&
2126 st->codec_info_nb_frames < ic->max_ts_probe &&
2127 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2128 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2129 break;
2130 }
2131 if (i == ic->nb_streams) {
2132 /* NOTE: If the format has no header, then we need to read some
2133 * packets to get most of the streams, so we cannot stop here. */
2134 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2135 /* If we found the info for all the codecs, we can stop. */
2136 ret = count;
2137 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2138 break;
2139 }
2140 }
2141 /* We did not get all the codec info, but we read too much data. */
2142 if (read_size >= ic->probesize) {
2143 ret = count;
2144 av_log(ic, AV_LOG_DEBUG,
2145 "Probe buffer size limit %d reached\n", ic->probesize);
2146 break;
2147 }
2148
2149 /* NOTE: A new stream can be added there if no header in file
2150 * (AVFMTCTX_NOHEADER). */
2151 ret = read_frame_internal(ic, &pkt1);
2152 if (ret == AVERROR(EAGAIN))
2153 continue;
2154
2155 if (ret < 0) {
2156 /* EOF or error*/
2157 AVPacket empty_pkt = { 0 };
2158 int err = 0;
2159 av_init_packet(&empty_pkt);
2160
2161 /* We could not have all the codec parameters before EOF. */
2162 ret = -1;
2163 for (i = 0; i < ic->nb_streams; i++) {
2164 st = ic->streams[i];
2165
2166 /* flush the decoders */
2167 if (st->info->found_decoder == 1) {
2168 do {
2169 err = try_decode_frame(st, &empty_pkt,
2170 (options && i < orig_nb_streams)
2171 ? &options[i] : NULL);
2172 } while (err > 0 && !has_codec_parameters(st));
2173 }
2174
2175 if (err < 0) {
2176 av_log(ic, AV_LOG_WARNING,
2177 "decoding for stream %d failed\n", st->index);
2178 } else if (!has_codec_parameters(st)) {
2179 char buf[256];
2180 avcodec_string(buf, sizeof(buf), st->codec, 0);
2181 av_log(ic, AV_LOG_WARNING,
2182 "Could not find codec parameters (%s)\n", buf);
2183 } else {
2184 ret = 0;
2185 }
2186 }
2187 break;
2188 }
2189
2190 pkt = &pkt1;
2191
2192 if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
2193 ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt,
2194 &ic->internal->packet_buffer_end, 0);
2195 if (ret < 0)
2196 goto find_stream_info_err;
2197 }
2198
2199 read_size += pkt->size;
2200
2201 st = ic->streams[pkt->stream_index];
2202 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2203 /* check for non-increasing dts */
2204 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2205 st->info->fps_last_dts >= pkt->dts) {
2206 av_log(ic, AV_LOG_WARNING,
2207 "Non-increasing DTS in stream %d: packet %d with DTS "
2208 "%"PRId64", packet %d with DTS %"PRId64"\n",
2209 st->index, st->info->fps_last_dts_idx,
2210 st->info->fps_last_dts, st->codec_info_nb_frames,
2211 pkt->dts);
2212 st->info->fps_first_dts =
2213 st->info->fps_last_dts = AV_NOPTS_VALUE;
2214 }
2215 /* Check for a discontinuity in dts. If the difference in dts
2216 * is more than 1000 times the average packet duration in the
2217 * sequence, we treat it as a discontinuity. */
2218 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2219 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2220 (pkt->dts - st->info->fps_last_dts) / 1000 >
2221 (st->info->fps_last_dts - st->info->fps_first_dts) /
2222 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2223 av_log(ic, AV_LOG_WARNING,
2224 "DTS discontinuity in stream %d: packet %d with DTS "
2225 "%"PRId64", packet %d with DTS %"PRId64"\n",
2226 st->index, st->info->fps_last_dts_idx,
2227 st->info->fps_last_dts, st->codec_info_nb_frames,
2228 pkt->dts);
2229 st->info->fps_first_dts =
2230 st->info->fps_last_dts = AV_NOPTS_VALUE;
2231 }
2232
2233 /* update stored dts values */
2234 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2235 st->info->fps_first_dts = pkt->dts;
2236 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2237 }
2238 st->info->fps_last_dts = pkt->dts;
2239 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2240
2241 /* check max_analyze_duration */
2242 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2243 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2244 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2245 ic->max_analyze_duration);
2246 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2247 av_packet_unref(pkt);
2248 break;
2249 }
2250 }
2251 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2252 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2253 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2254 st->codec->extradata_size = i;
2255 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2256 AV_INPUT_BUFFER_PADDING_SIZE);
2257 if (!st->codec->extradata)
2258 return AVERROR(ENOMEM);
2259 memcpy(st->codec->extradata, pkt->data,
2260 st->codec->extradata_size);
2261 }
2262 }
2263
2264 /* If still no information, we try to open the codec and to
2265 * decompress the frame. We try to avoid that in most cases as
2266 * it takes longer and uses more memory. For MPEG-4, we need to
2267 * decompress for QuickTime.
2268 *
2269 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2270 * least one frame of codec data, this makes sure the codec initializes
2271 * the channel configuration and does not only trust the values from
2272 * the container. */
2273 try_decode_frame(st, pkt,
2274 (options && i < orig_nb_streams) ? &options[i] : NULL);
2275
2276 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2277 av_packet_unref(pkt);
2278
2279 st->codec_info_nb_frames++;
2280 count++;
2281 }
2282
2283 // close codecs which were opened in try_decode_frame()
2284 for (i = 0; i < ic->nb_streams; i++) {
2285 st = ic->streams[i];
2286 avcodec_close(st->codec);
2287 }
2288 for (i = 0; i < ic->nb_streams; i++) {
2289 st = ic->streams[i];
2290 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2291 /* estimate average framerate if not set by demuxer */
2292 if (!st->avg_frame_rate.num &&
2293 st->info->fps_last_dts != st->info->fps_first_dts) {
2294 int64_t delta_dts = st->info->fps_last_dts -
2295 st->info->fps_first_dts;
2296 int delta_packets = st->info->fps_last_dts_idx -
2297 st->info->fps_first_dts_idx;
2298 int best_fps = 0;
2299 double best_error = 0.01;
2300
2301 if (delta_dts >= INT64_MAX / st->time_base.num ||
2302 delta_packets >= INT64_MAX / st->time_base.den ||
2303 delta_dts < 0)
2304 continue;
2305 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2306 delta_packets * (int64_t) st->time_base.den,
2307 delta_dts * (int64_t) st->time_base.num, 60000);
2308
2309 /* Round guessed framerate to a "standard" framerate if it's
2310 * within 1% of the original estimate. */
2311 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2312 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2313 double error = fabs(av_q2d(st->avg_frame_rate) /
2314 av_q2d(std_fps) - 1);
2315
2316 if (error < best_error) {
2317 best_error = error;
2318 best_fps = std_fps.num;
2319 }
2320 }
2321 if (best_fps)
2322 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2323 best_fps, 12 * 1001, INT_MAX);
2324 }
2325 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2326 if (!st->codec->bits_per_coded_sample)
2327 st->codec->bits_per_coded_sample =
2328 av_get_bits_per_sample(st->codec->codec_id);
2329 // set stream disposition based on audio service type
2330 switch (st->codec->audio_service_type) {
2331 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2332 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2333 break;
2334 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2335 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2336 break;
2337 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2338 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2339 break;
2340 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2341 st->disposition = AV_DISPOSITION_COMMENT;
2342 break;
2343 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2344 st->disposition = AV_DISPOSITION_KARAOKE;
2345 break;
2346 }
2347 }
2348 }
2349
2350 estimate_timings(ic, old_offset);
2351
2352 compute_chapters_end(ic);
2353
2354 find_stream_info_err:
2355 for (i = 0; i < ic->nb_streams; i++) {
2356 ic->streams[i]->codec->thread_count = 0;
2357 av_freep(&ic->streams[i]->info);
2358 }
2359 return ret;
2360 }
2361
2362 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2363 {
2364 int i, j;
2365
2366 for (i = 0; i < ic->nb_programs; i++)
2367 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2368 if (ic->programs[i]->stream_index[j] == s)
2369 return ic->programs[i];
2370 return NULL;
2371 }
2372
2373 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2374 int wanted_stream_nb, int related_stream,
2375 AVCodec **decoder_ret, int flags)
2376 {
2377 int i, nb_streams = ic->nb_streams;
2378 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2379 unsigned *program = NULL;
2380 AVCodec *decoder = NULL, *best_decoder = NULL;
2381
2382 if (related_stream >= 0 && wanted_stream_nb < 0) {
2383 AVProgram *p = find_program_from_stream(ic, related_stream);
2384 if (p) {
2385 program = p->stream_index;
2386 nb_streams = p->nb_stream_indexes;
2387 }
2388 }
2389 for (i = 0; i < nb_streams; i++) {
2390 int real_stream_index = program ? program[i] : i;
2391 AVStream *st = ic->streams[real_stream_index];
2392 AVCodecContext *avctx = st->codec;
2393 if (avctx->codec_type != type)
2394 continue;
2395 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2396 continue;
2397 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2398 AV_DISPOSITION_VISUAL_IMPAIRED))
2399 continue;
2400 if (decoder_ret) {
2401 decoder = avcodec_find_decoder(st->codec->codec_id);
2402 if (!decoder) {
2403 if (ret < 0)
2404 ret = AVERROR_DECODER_NOT_FOUND;
2405 continue;
2406 }
2407 }
2408 if (best_count >= st->codec_info_nb_frames)
2409 continue;
2410 best_count = st->codec_info_nb_frames;
2411 ret = real_stream_index;
2412 best_decoder = decoder;
2413 if (program && i == nb_streams - 1 && ret < 0) {
2414 program = NULL;
2415 nb_streams = ic->nb_streams;
2416 /* no related stream found, try again with everything */
2417 i = 0;
2418 }
2419 }
2420 if (decoder_ret)
2421 *decoder_ret = best_decoder;
2422 return ret;
2423 }
2424
2425 /*******************************************************/
2426
2427 int av_read_play(AVFormatContext *s)
2428 {
2429 if (s->iformat->read_play)
2430 return s->iformat->read_play(s);
2431 if (s->pb)
2432 return avio_pause(s->pb, 0);
2433 return AVERROR(ENOSYS);
2434 }
2435
2436 int av_read_pause(AVFormatContext *s)
2437 {
2438 if (s->iformat->read_pause)
2439 return s->iformat->read_pause(s);
2440 if (s->pb)
2441 return avio_pause(s->pb, 1);
2442 return AVERROR(ENOSYS);
2443 }
2444
2445 void avformat_free_context(AVFormatContext *s)
2446 {
2447 int i, j;
2448 AVStream *st;
2449
2450 if (!s)
2451 return;
2452
2453 av_opt_free(s);
2454 if (s->iformat && s->iformat->priv_class && s->priv_data)
2455 av_opt_free(s->priv_data);
2456
2457 for (i = 0; i < s->nb_streams; i++) {
2458 /* free all data in a stream component */
2459 st = s->streams[i];
2460
2461 for (j = 0; j < st->nb_side_data; j++)
2462 av_freep(&st->side_data[j].data);
2463 av_freep(&st->side_data);
2464 st->nb_side_data = 0;
2465
2466 if (st->parser) {
2467 av_parser_close(st->parser);
2468 }
2469 if (st->attached_pic.data)
2470 av_packet_unref(&st->attached_pic);
2471 av_dict_free(&st->metadata);
2472 av_freep(&st->probe_data.buf);
2473 av_free(st->index_entries);
2474 av_free(st->codec->extradata);
2475 av_free(st->codec->subtitle_header);
2476 av_free(st->codec);
2477 av_free(st->priv_data);
2478 av_free(st->info);
2479 av_free(st);
2480 }
2481 for (i = s->nb_programs - 1; i >= 0; i--) {
2482 av_dict_free(&s->programs[i]->metadata);
2483 av_freep(&s->programs[i]->stream_index);
2484 av_freep(&s->programs[i]);
2485 }
2486 av_freep(&s->programs);
2487 av_freep(&s->priv_data);
2488 while (s->nb_chapters--) {
2489 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2490 av_free(s->chapters[s->nb_chapters]);
2491 }
2492 av_freep(&s->chapters);
2493 av_dict_free(&s->metadata);
2494 av_freep(&s->streams);
2495 av_freep(&s->internal);
2496 av_free(s);
2497 }
2498
2499 void avformat_close_input(AVFormatContext **ps)
2500 {
2501 AVFormatContext *s = *ps;
2502 AVIOContext *pb = s->pb;
2503
2504 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2505 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2506 pb = NULL;
2507
2508 flush_packet_queue(s);
2509
2510 if (s->iformat)
2511 if (s->iformat->read_close)
2512 s->iformat->read_close(s);
2513
2514 avformat_free_context(s);
2515
2516 *ps = NULL;
2517
2518 avio_close(pb);
2519 }
2520
2521 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2522 {
2523 AVStream *st;
2524 int i;
2525
2526 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2527 sizeof(*s->streams)) < 0) {
2528 s->nb_streams = 0;
2529 return NULL;
2530 }
2531
2532 st = av_mallocz(sizeof(AVStream));
2533 if (!st)
2534 return NULL;
2535 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2536 av_free(st);
2537 return NULL;
2538 }
2539
2540 st->codec = avcodec_alloc_context3(c);
2541 if (!st->codec) {
2542 av_free(st->info);
2543 av_free(st);
2544 return NULL;
2545 }
2546 if (s->iformat) {
2547 /* no default bitrate if decoding */
2548 st->codec->bit_rate = 0;
2549
2550 /* default pts setting is MPEG-like */
2551 avpriv_set_pts_info(st, 33, 1, 90000);
2552 }
2553
2554 st->index = s->nb_streams;
2555 st->start_time = AV_NOPTS_VALUE;
2556 st->duration = AV_NOPTS_VALUE;
2557 /* we set the current DTS to 0 so that formats without any timestamps
2558 * but durations get some timestamps, formats with some unknown
2559 * timestamps have their first few packets buffered and the
2560 * timestamps corrected before they are returned to the user */
2561 st->cur_dts = 0;
2562 st->first_dts = AV_NOPTS_VALUE;
2563 st->probe_packets = MAX_PROBE_PACKETS;
2564
2565 st->last_IP_pts = AV_NOPTS_VALUE;
2566 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2567 st->pts_buffer[i] = AV_NOPTS_VALUE;
2568
2569 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2570
2571 st->info->fps_first_dts = AV_NOPTS_VALUE;
2572 st->info->fps_last_dts = AV_NOPTS_VALUE;
2573
2574 s->streams[s->nb_streams++] = st;
2575 return st;
2576 }
2577
2578 AVProgram *av_new_program(AVFormatContext *ac, int id)
2579 {
2580 AVProgram *program = NULL;
2581 int i;
2582
2583 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2584
2585 for (i = 0; i < ac->nb_programs; i++)
2586 if (ac->programs[i]->id == id)
2587 program = ac->programs[i];
2588
2589 if (!program) {
2590 program = av_mallocz(sizeof(AVProgram));
2591 if (!program)
2592 return NULL;
2593 dynarray_add(&ac->programs, &ac->nb_programs, program);
2594 program->discard = AVDISCARD_NONE;
2595 }
2596 program->id = id;
2597
2598 return program;
2599 }
2600
2601 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2602 int64_t start, int64_t end, const char *title)
2603 {
2604 AVChapter *chapter = NULL;
2605 int i;
2606
2607 for (i = 0; i < s->nb_chapters; i++)
2608 if (s->chapters[i]->id == id)
2609 chapter = s->chapters[i];
2610
2611 if (!chapter) {
2612 chapter = av_mallocz(sizeof(AVChapter));
2613 if (!chapter)
2614 return NULL;
2615 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2616 }
2617 av_dict_set(&chapter->metadata, "title", title, 0);
2618 chapter->id = id;
2619 chapter->time_base = time_base;
2620 chapter->start = start;
2621 chapter->end = end;
2622
2623 return chapter;
2624 }
2625
2626 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2627 {
2628 int i, j;
2629 AVProgram *program = NULL;
2630
2631 if (idx >= ac->nb_streams) {
2632 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2633 return;
2634 }
2635
2636 for (i = 0; i < ac->nb_programs; i++) {
2637 if (ac->programs[i]->id != progid)
2638 continue;
2639 program = ac->programs[i];
2640 for (j = 0; j < program->nb_stream_indexes; j++)
2641 if (program->stream_index[j] == idx)
2642 return;
2643
2644 if (av_reallocp_array(&program->stream_index,
2645 program->nb_stream_indexes + 1,
2646 sizeof(*program->stream_index)) < 0) {
2647 program->nb_stream_indexes = 0;
2648 return;
2649 }
2650 program->stream_index[program->nb_stream_indexes++] = idx;
2651 return;
2652 }
2653 }
2654
2655 uint64_t ff_ntp_time(void)
2656 {
2657 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2658 }
2659
2660 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2661 {
2662 const char *p;
2663 char *q, buf1[20], c;
2664 int nd, len, percentd_found;
2665
2666 q = buf;
2667 p = path;
2668 percentd_found = 0;
2669 for (;;) {
2670 c = *p++;
2671 if (c == '\0')
2672 break;
2673 if (c == '%') {
2674 do {
2675 nd = 0;
2676 while (av_isdigit(*p))
2677 nd = nd * 10 + *p++ - '0';
2678 c = *p++;
2679 } while (av_isdigit(c));
2680
2681 switch (c) {
2682 case '%':
2683 goto addchar;
2684 case 'd':
2685 if (percentd_found)
2686 goto fail;
2687 percentd_found = 1;
2688 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2689 len = strlen(buf1);
2690 if ((q - buf + len) > buf_size - 1)
2691 goto fail;
2692 memcpy(q, buf1, len);
2693 q += len;
2694 break;
2695 default:
2696 goto fail;
2697 }
2698 } else {
2699 addchar:
2700 if ((q - buf) < buf_size - 1)
2701 *q++ = c;
2702 }
2703 }
2704 if (!percentd_found)
2705 goto fail;
2706 *q = '\0';
2707 return 0;
2708 fail:
2709 *q = '\0';
2710 return -1;
2711 }
2712
2713 void av_url_split(char *proto, int proto_size,
2714 char *authorization, int authorization_size,
2715 char *hostname, int hostname_size,
2716 int *port_ptr, char *path, int path_size, const char *url)
2717 {
2718 const char *p, *ls, *at, *col, *brk;
2719
2720 if (port_ptr)
2721 *port_ptr = -1;
2722 if (proto_size > 0)
2723 proto[0] = 0;
2724 if (authorization_size > 0)
2725 authorization[0] = 0;
2726 if (hostname_size > 0)
2727 hostname[0] = 0;
2728 if (path_size > 0)
2729 path[0] = 0;
2730
2731 /* parse protocol */
2732 if ((p = strchr(url, ':'))) {
2733 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2734 p++; /* skip ':' */
2735 if (*p == '/')
2736 p++;
2737 if (*p == '/')
2738 p++;
2739 } else {
2740 /* no protocol means plain filename */
2741 av_strlcpy(path, url, path_size);
2742 return;
2743 }
2744
2745 /* separate path from hostname */
2746 ls = strchr(p, '/');
2747 if (!ls)
2748 ls = strchr(p, '?');
2749 if (ls)
2750 av_strlcpy(path, ls, path_size);
2751 else
2752 ls = &p[strlen(p)]; // XXX
2753
2754 /* the rest is hostname, use that to parse auth/port */
2755 if (ls != p) {
2756 /* authorization (user[:pass]@hostname) */
2757 if ((at = strchr(p, '@')) && at < ls) {
2758 av_strlcpy(authorization, p,
2759 FFMIN(authorization_size, at + 1 - p));
2760 p = at + 1; /* skip '@' */
2761 }
2762
2763 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2764 /* [host]:port */
2765 av_strlcpy(hostname, p + 1,
2766 FFMIN(hostname_size, brk - p));
2767 if (brk[1] == ':' && port_ptr)
2768 *port_ptr = atoi(brk + 2);
2769 } else if ((col = strchr(p, ':')) && col < ls) {
2770 av_strlcpy(hostname, p,
2771 FFMIN(col + 1 - p, hostname_size));
2772 if (port_ptr)
2773 *port_ptr = atoi(col + 1);
2774 } else
2775 av_strlcpy(hostname, p,
2776 FFMIN(ls + 1 - p, hostname_size));
2777 }
2778 }
2779
2780 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2781 {
2782 int i;
2783 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2784 '4', '5', '6', '7',
2785 '8', '9', 'A', 'B',
2786 'C', 'D', 'E', 'F' };
2787 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2788 '4', '5', '6', '7',
2789 '8', '9', 'a', 'b',
2790 'c', 'd', 'e', 'f' };
2791 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2792
2793 for (i = 0; i < s; i++) {
2794 buff[i * 2] = hex_table[src[i] >> 4];
2795 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2796 }
2797
2798 return buff;
2799 }
2800
2801 int ff_hex_to_data(uint8_t *data, const char *p)
2802 {
2803 int c, len, v;
2804
2805 len = 0;
2806 v = 1;
2807 for (;;) {
2808 p += strspn(p, SPACE_CHARS);
2809 if (*p == '\0')
2810 break;
2811 c = av_toupper((unsigned char) *p++);
2812 if (c >= '0' && c <= '9')
2813 c = c - '0';
2814 else if (c >= 'A' && c <= 'F')
2815 c = c - 'A' + 10;
2816 else
2817 break;
2818 v = (v << 4) | c;
2819 if (v & 0x100) {
2820 if (data)
2821 data[len] = v;
2822 len++;
2823 v = 1;
2824 }
2825 }
2826 return len;
2827 }
2828
2829 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2830 unsigned int pts_num, unsigned int pts_den)
2831 {
2832 AVRational new_tb;
2833 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2834 if (new_tb.num != pts_num)
2835 av_log(NULL, AV_LOG_DEBUG,
2836 "st:%d removing common factor %d from timebase\n",
2837 s->index, pts_num / new_tb.num);
2838 } else
2839 av_log(NULL, AV_LOG_WARNING,
2840 "st:%d has too large timebase, reducing\n", s->index);
2841
2842 if (new_tb.num <= 0 || new_tb.den <= 0) {
2843 av_log(NULL, AV_LOG_ERROR,
2844 "Ignoring attempt to set invalid timebase for st:%d\n",
2845 s->index);
2846 return;
2847 }
2848 s->time_base = new_tb;
2849 s->pts_wrap_bits = pts_wrap_bits;
2850 }
2851
2852 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2853 void *context)
2854 {
2855 const char *ptr = str;
2856
2857 /* Parse key=value pairs. */
2858 for (;;) {
2859 const char *key;
2860 char *dest = NULL, *dest_end;
2861 int key_len, dest_len = 0;
2862
2863 /* Skip whitespace and potential commas. */
2864 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2865 ptr++;
2866 if (!*ptr)
2867 break;
2868
2869 key = ptr;
2870
2871 if (!(ptr = strchr(key, '=')))
2872 break;
2873 ptr++;
2874 key_len = ptr - key;
2875
2876 callback_get_buf(context, key, key_len, &dest, &dest_len);
2877 dest_end = dest + dest_len - 1;
2878
2879 if (*ptr == '\"') {
2880 ptr++;
2881 while (*ptr && *ptr != '\"') {
2882 if (*ptr == '\\') {
2883 if (!ptr[1])
2884 break;
2885 if (dest && dest < dest_end)
2886 *dest++ = ptr[1];
2887 ptr += 2;
2888 } else {
2889 if (dest && dest < dest_end)
2890 *dest++ = *ptr;
2891 ptr++;
2892 }
2893 }
2894 if (*ptr == '\"')
2895 ptr++;
2896 } else {
2897 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2898 if (dest && dest < dest_end)
2899 *dest++ = *ptr;
2900 }
2901 if (dest)
2902 *dest = 0;
2903 }
2904 }
2905
2906 int ff_find_stream_index(AVFormatContext *s, int id)
2907 {
2908 int i;
2909 for (i = 0; i < s->nb_streams; i++)
2910 if (s->streams[i]->id == id)
2911 return i;
2912 return -1;
2913 }
2914
2915 int64_t ff_iso8601_to_unix_time(const char *datestr)
2916 {
2917 struct tm time1 = { 0 }, time2 = { 0 };
2918 const char *ret1, *ret2;
2919 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
2920 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
2921 if (ret2 && !ret1)
2922 return av_timegm(&time2);
2923 else
2924 return av_timegm(&time1);
2925 }
2926
2927 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2928 int std_compliance)
2929 {
2930 if (ofmt) {
2931 if (ofmt->query_codec)
2932 return ofmt->query_codec(codec_id, std_compliance);
2933 else if (ofmt->codec_tag)
2934 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2935 else if (codec_id == ofmt->video_codec ||
2936 codec_id == ofmt->audio_codec ||
2937 codec_id == ofmt->subtitle_codec)
2938 return 1;
2939 }
2940 return AVERROR_PATCHWELCOME;
2941 }
2942
2943 int avformat_network_init(void)
2944 {
2945 #if CONFIG_NETWORK
2946 int ret;
2947 ff_network_inited_globally = 1;
2948 if ((ret = ff_network_init()) < 0)
2949 return ret;
2950 ff_tls_init();
2951 #endif
2952 return 0;
2953 }
2954
2955 int avformat_network_deinit(void)
2956 {
2957 #if CONFIG_NETWORK
2958 ff_network_close();
2959 ff_tls_deinit();
2960 #endif
2961 return 0;
2962 }
2963
2964 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2965 uint64_t channel_layout, int32_t sample_rate,
2966 int32_t width, int32_t height)
2967 {
2968 uint32_t flags = 0;
2969 int size = 4;
2970 uint8_t *data;
2971 if (!pkt)
2972 return AVERROR(EINVAL);
2973 if (channels) {
2974 size += 4;
2975 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2976 }
2977 if (channel_layout) {
2978 size += 8;
2979 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
2980 }
2981 if (sample_rate) {
2982 size += 4;
2983 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
2984 }
2985 if (width || height) {
2986 size += 8;
2987 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
2988 }
2989 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
2990 if (!data)
2991 return AVERROR(ENOMEM);
2992 bytestream_put_le32(&data, flags);
2993 if (channels)
2994 bytestream_put_le32(&data, channels);
2995 if (channel_layout)
2996 bytestream_put_le64(&data, channel_layout);
2997 if (sample_rate)
2998 bytestream_put_le32(&data, sample_rate);
2999 if (width || height) {
3000 bytestream_put_le32(&data, width);
3001 bytestream_put_le32(&data, height);
3002 }
3003 return 0;
3004 }
3005
3006 int ff_generate_avci_extradata(AVStream *st)
3007 {
3008 static const uint8_t avci100_1080p_extradata[] = {
3009 // SPS
3010 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3011 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3012 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3013 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3014 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3015 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3016 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3017 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3018 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3019 // PPS
3020 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3021 0xd0
3022 };
3023 static const uint8_t avci100_1080i_extradata[] = {
3024 // SPS
3025 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3026 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3027 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3028 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3029 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3030 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3031 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3032 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3033 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3034 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3035 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3036 // PPS
3037 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3038 0xd0
3039 };
3040 static const uint8_t avci50_1080i_extradata[] = {
3041 // SPS
3042 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3043 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3044 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3045 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3046 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3047 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3048 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3049 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3050 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3051 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3052 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3053 // PPS
3054 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3055 0x11
3056 };
3057 static const uint8_t avci100_720p_extradata[] = {
3058 // SPS
3059 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3060 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3061 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3062 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3063 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3064 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3065 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3066 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3067 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3068 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3069 // PPS
3070 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3071 0x11
3072 };
3073
3074 const uint8_t *data = NULL;
3075 int size = 0;
3076
3077 if (st->codec->width == 1920) {
3078 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3079 data = avci100_1080p_extradata;
3080 size = sizeof(avci100_1080p_extradata);
3081 } else {
3082 data = avci100_1080i_extradata;
3083 size = sizeof(avci100_1080i_extradata);
3084 }
3085 } else if (st->codec->width == 1440) {
3086 data = avci50_1080i_extradata;
3087 size = sizeof(avci50_1080i_extradata);
3088 } else if (st->codec->width == 1280) {
3089 data = avci100_720p_extradata;
3090 size = sizeof(avci100_720p_extradata);
3091 }
3092
3093 if (!size)
3094 return 0;
3095
3096 av_freep(&st->codec->extradata);
3097 st->codec->extradata_size = 0;
3098 st->codec->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3099 if (!st->codec->extradata)
3100 return AVERROR(ENOMEM);
3101
3102 memcpy(st->codec->extradata, data, size);
3103 st->codec->extradata_size = size;
3104
3105 return 0;
3106 }
3107
3108 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3109 int *size)
3110 {
3111 int i;
3112
3113 for (i = 0; i < st->nb_side_data; i++) {
3114 if (st->side_data[i].type == type) {
3115 if (size)
3116 *size = st->side_data[i].size;
3117 return st->side_data[i].data;
3118 }
3119 }
3120 return NULL;
3121 }
3122
3123 uint8_t *ff_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3124 int size)
3125 {
3126 AVPacketSideData *sd, *tmp;
3127 int i;
3128 uint8_t *data = av_malloc(size);
3129
3130 if (!data)
3131 return NULL;
3132
3133 for (i = 0; i < st->nb_side_data; i++) {
3134 sd = &st->side_data[i];
3135
3136 if (sd->type == type) {
3137 av_freep(&sd->data);
3138 sd->data = data;
3139 sd->size = size;
3140 return sd->data;
3141 }
3142 }
3143
3144 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3145 if (!tmp) {
3146 av_freep(&data);
3147 return NULL;
3148 }
3149
3150 st->side_data = tmp;
3151 st->nb_side_data++;
3152
3153 sd = &st->side_data[st->nb_side_data - 1];
3154 sd->type = type;
3155 sd->data = data;
3156 sd->size = size;
3157 return data;
3158 }