lavc: Make AVPacket.duration int64, and deprecate convergence_duration
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #undef NDEBUG
23 #include <assert.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26
27 #include "config.h"
28
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
38
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
41
42 #include "audiointerleave.h"
43 #include "avformat.h"
44 #include "id3v2.h"
45 #include "internal.h"
46 #include "metadata.h"
47 #if CONFIG_NETWORK
48 #include "network.h"
49 #endif
50 #include "riff.h"
51 #include "url.h"
52
53 /**
54 * @file
55 * various utility functions for use within Libav
56 */
57
58 unsigned avformat_version(void)
59 {
60 return LIBAVFORMAT_VERSION_INT;
61 }
62
63 const char *avformat_configuration(void)
64 {
65 return LIBAV_CONFIGURATION;
66 }
67
68 const char *avformat_license(void)
69 {
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
72 }
73
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
76
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
80 {
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
84 int ret = 0;
85
86 do {
87 int prev_size = pkt->size;
88 int read_size;
89
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
95 }
96 read_size = FFMIN(size, chunk_size);
97
98 ret = av_grow_packet(pkt, read_size);
99 if (ret < 0)
100 break;
101
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
105 break;
106 }
107
108 size -= read_size;
109 } while (size > 0);
110
111 pkt->pos = orig_pos;
112 if (!pkt->size)
113 av_free_packet(pkt);
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
115 }
116
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
118 {
119 av_init_packet(pkt);
120 pkt->data = NULL;
121 pkt->size = 0;
122 pkt->pos = avio_tell(s);
123
124 return append_packet_chunked(s, pkt, size);
125 }
126
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
128 {
129 if (!pkt->size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
132 }
133
134 int av_filename_number_test(const char *filename)
135 {
136 char buf[1024];
137 return filename &&
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
139 }
140
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
143 {
144 static const struct {
145 const char *name;
146 enum AVCodecID id;
147 enum AVMediaType type;
148 } fmt_id_type[] = {
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
158 { 0 }
159 };
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
161
162 if (fmt) {
163 int i;
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
167 fmt->name, score);
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
172 break;
173 }
174 }
175 }
176 return !!fmt;
177 }
178
179 /************************************************************/
180 /* input media file */
181
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
185 {
186 int ret;
187 AVProbeData pd = { filename, NULL, 0 };
188
189 if (s->pb) {
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
191 if (!s->iformat)
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
193 s, 0, s->probesize);
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
196 return 0;
197 }
198
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
201 return 0;
202
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
205 return ret;
206 if (s->iformat)
207 return 0;
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
209 s, 0, s->probesize);
210 }
211
212 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl)
214 {
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
216 if (!pktl)
217 return NULL;
218
219 if (*packet_buffer)
220 (*plast_pktl)->next = pktl;
221 else
222 *packet_buffer = pktl;
223
224 /* Add the packet in the buffered packet list. */
225 *plast_pktl = pktl;
226 pktl->pkt = *pkt;
227 return &pktl->pkt;
228 }
229
230 static int queue_attached_pictures(AVFormatContext *s)
231 {
232 int i;
233 for (i = 0; i < s->nb_streams; i++)
234 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
235 s->streams[i]->discard < AVDISCARD_ALL) {
236 AVPacket copy = s->streams[i]->attached_pic;
237 copy.buf = av_buffer_ref(copy.buf);
238 if (!copy.buf)
239 return AVERROR(ENOMEM);
240
241 add_to_pktbuf(&s->internal->raw_packet_buffer, &copy,
242 &s->internal->raw_packet_buffer_end);
243 }
244 return 0;
245 }
246
247 int avformat_open_input(AVFormatContext **ps, const char *filename,
248 AVInputFormat *fmt, AVDictionary **options)
249 {
250 AVFormatContext *s = *ps;
251 int ret = 0;
252 AVDictionary *tmp = NULL;
253 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
254
255 if (!s && !(s = avformat_alloc_context()))
256 return AVERROR(ENOMEM);
257 if (fmt)
258 s->iformat = fmt;
259
260 if (options)
261 av_dict_copy(&tmp, *options, 0);
262
263 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
264 goto fail;
265
266 if ((ret = init_input(s, filename, &tmp)) < 0)
267 goto fail;
268
269 /* Check filename in case an image number is expected. */
270 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
271 if (!av_filename_number_test(filename)) {
272 ret = AVERROR(EINVAL);
273 goto fail;
274 }
275 }
276
277 s->duration = s->start_time = AV_NOPTS_VALUE;
278 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
279
280 /* Allocate private data. */
281 if (s->iformat->priv_data_size > 0) {
282 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
283 ret = AVERROR(ENOMEM);
284 goto fail;
285 }
286 if (s->iformat->priv_class) {
287 *(const AVClass **) s->priv_data = s->iformat->priv_class;
288 av_opt_set_defaults(s->priv_data);
289 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
290 goto fail;
291 }
292 }
293
294 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
295 if (s->pb)
296 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
297
298 if (s->iformat->read_header)
299 if ((ret = s->iformat->read_header(s)) < 0)
300 goto fail;
301
302 if (id3v2_extra_meta &&
303 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
304 goto fail;
305 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
306
307 if ((ret = queue_attached_pictures(s)) < 0)
308 goto fail;
309
310 if (s->pb && !s->internal->data_offset)
311 s->internal->data_offset = avio_tell(s->pb);
312
313 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
314
315 if (options) {
316 av_dict_free(options);
317 *options = tmp;
318 }
319 *ps = s;
320 return 0;
321
322 fail:
323 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
324 av_dict_free(&tmp);
325 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
326 avio_close(s->pb);
327 avformat_free_context(s);
328 *ps = NULL;
329 return ret;
330 }
331
332 /*******************************************************/
333
334 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
335 {
336 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
337 AVProbeData *pd = &st->probe_data;
338 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
339 --st->probe_packets;
340
341 if (pkt) {
342 int err;
343 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
344 AVPROBE_PADDING_SIZE)) < 0)
345 return err;
346 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
347 pd->buf_size += pkt->size;
348 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
349 } else {
350 st->probe_packets = 0;
351 if (!pd->buf_size) {
352 av_log(s, AV_LOG_ERROR,
353 "nothing to probe for stream %d\n", st->index);
354 return 0;
355 }
356 }
357
358 if (!st->probe_packets ||
359 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
360 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
361 ? AVPROBE_SCORE_MAX / 4 : 0);
362 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
363 pd->buf_size = 0;
364 av_freep(&pd->buf);
365 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
366 }
367 }
368 }
369 return 0;
370 }
371
372 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
373 {
374 int ret, i, err;
375 AVStream *st;
376
377 for (;;) {
378 AVPacketList *pktl = s->internal->raw_packet_buffer;
379
380 if (pktl) {
381 *pkt = pktl->pkt;
382 st = s->streams[pkt->stream_index];
383 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
384 !st->probe_packets ||
385 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
386 AVProbeData *pd;
387 if (st->probe_packets)
388 if ((err = probe_codec(s, st, NULL)) < 0)
389 return err;
390 pd = &st->probe_data;
391 av_freep(&pd->buf);
392 pd->buf_size = 0;
393 s->internal->raw_packet_buffer = pktl->next;
394 s->internal->raw_packet_buffer_remaining_size += pkt->size;
395 av_free(pktl);
396 return 0;
397 }
398 }
399
400 pkt->data = NULL;
401 pkt->size = 0;
402 av_init_packet(pkt);
403 ret = s->iformat->read_packet(s, pkt);
404 if (ret < 0) {
405 if (!pktl || ret == AVERROR(EAGAIN))
406 return ret;
407 for (i = 0; i < s->nb_streams; i++) {
408 st = s->streams[i];
409 if (st->probe_packets)
410 if ((err = probe_codec(s, st, NULL)) < 0)
411 return err;
412 }
413 continue;
414 }
415
416 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
417 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
418 av_log(s, AV_LOG_WARNING,
419 "Dropped corrupted packet (stream = %d)\n",
420 pkt->stream_index);
421 av_free_packet(pkt);
422 continue;
423 }
424
425 st = s->streams[pkt->stream_index];
426
427 switch (st->codec->codec_type) {
428 case AVMEDIA_TYPE_VIDEO:
429 if (s->video_codec_id)
430 st->codec->codec_id = s->video_codec_id;
431 break;
432 case AVMEDIA_TYPE_AUDIO:
433 if (s->audio_codec_id)
434 st->codec->codec_id = s->audio_codec_id;
435 break;
436 case AVMEDIA_TYPE_SUBTITLE:
437 if (s->subtitle_codec_id)
438 st->codec->codec_id = s->subtitle_codec_id;
439 break;
440 }
441
442 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
443 !st->probe_packets))
444 return ret;
445
446 add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
447 &s->internal->raw_packet_buffer_end);
448 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
449
450 if ((err = probe_codec(s, st, pkt)) < 0)
451 return err;
452 }
453 }
454
455 /**********************************************************/
456
457 /**
458 * Return the frame duration in seconds. Return 0 if not available.
459 */
460 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
461 AVCodecParserContext *pc, AVPacket *pkt)
462 {
463 AVRational codec_framerate = s->iformat ? st->codec->framerate :
464 av_inv_q(st->codec->time_base);
465 int frame_size;
466
467 *pnum = 0;
468 *pden = 0;
469 switch (st->codec->codec_type) {
470 case AVMEDIA_TYPE_VIDEO:
471 if (st->avg_frame_rate.num) {
472 *pnum = st->avg_frame_rate.den;
473 *pden = st->avg_frame_rate.num;
474 } else if (st->time_base.num * 1000LL > st->time_base.den) {
475 *pnum = st->time_base.num;
476 *pden = st->time_base.den;
477 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
478 *pnum = codec_framerate.den;
479 *pden = codec_framerate.num;
480 if (pc && pc->repeat_pict) {
481 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
482 *pden /= 1 + pc->repeat_pict;
483 else
484 *pnum *= 1 + pc->repeat_pict;
485 }
486 /* If this codec can be interlaced or progressive then we need
487 * a parser to compute duration of a packet. Thus if we have
488 * no parser in such case leave duration undefined. */
489 if (st->codec->ticks_per_frame > 1 && !pc)
490 *pnum = *pden = 0;
491 }
492 break;
493 case AVMEDIA_TYPE_AUDIO:
494 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
495 if (frame_size <= 0 || st->codec->sample_rate <= 0)
496 break;
497 *pnum = frame_size;
498 *pden = st->codec->sample_rate;
499 break;
500 default:
501 break;
502 }
503 }
504
505 static int is_intra_only(enum AVCodecID id)
506 {
507 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
508 if (!d)
509 return 0;
510 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
511 return 0;
512 return 1;
513 }
514
515 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
516 int64_t dts, int64_t pts)
517 {
518 AVStream *st = s->streams[stream_index];
519 AVPacketList *pktl = s->internal->packet_buffer;
520
521 if (st->first_dts != AV_NOPTS_VALUE ||
522 dts == AV_NOPTS_VALUE ||
523 st->cur_dts == AV_NOPTS_VALUE)
524 return;
525
526 st->first_dts = dts - st->cur_dts;
527 st->cur_dts = dts;
528
529 for (; pktl; pktl = pktl->next) {
530 if (pktl->pkt.stream_index != stream_index)
531 continue;
532 // FIXME: think more about this check
533 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
534 pktl->pkt.pts += st->first_dts;
535
536 if (pktl->pkt.dts != AV_NOPTS_VALUE)
537 pktl->pkt.dts += st->first_dts;
538
539 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
540 st->start_time = pktl->pkt.pts;
541 }
542 if (st->start_time == AV_NOPTS_VALUE)
543 st->start_time = pts;
544 }
545
546 static void update_initial_durations(AVFormatContext *s, AVStream *st,
547 int stream_index, int duration)
548 {
549 AVPacketList *pktl = s->internal->packet_buffer;
550 int64_t cur_dts = 0;
551
552 if (st->first_dts != AV_NOPTS_VALUE) {
553 cur_dts = st->first_dts;
554 for (; pktl; pktl = pktl->next) {
555 if (pktl->pkt.stream_index == stream_index) {
556 if (pktl->pkt.pts != pktl->pkt.dts ||
557 pktl->pkt.dts != AV_NOPTS_VALUE ||
558 pktl->pkt.duration)
559 break;
560 cur_dts -= duration;
561 }
562 }
563 pktl = s->internal->packet_buffer;
564 st->first_dts = cur_dts;
565 } else if (st->cur_dts)
566 return;
567
568 for (; pktl; pktl = pktl->next) {
569 if (pktl->pkt.stream_index != stream_index)
570 continue;
571 if (pktl->pkt.pts == pktl->pkt.dts &&
572 pktl->pkt.dts == AV_NOPTS_VALUE &&
573 !pktl->pkt.duration) {
574 pktl->pkt.dts = cur_dts;
575 if (!st->codec->has_b_frames)
576 pktl->pkt.pts = cur_dts;
577 cur_dts += duration;
578 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
579 pktl->pkt.duration = duration;
580 } else
581 break;
582 }
583 if (st->first_dts == AV_NOPTS_VALUE)
584 st->cur_dts = cur_dts;
585 }
586
587 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
588 AVCodecParserContext *pc, AVPacket *pkt)
589 {
590 int num, den, presentation_delayed, delay, i;
591 int64_t offset;
592
593 if (s->flags & AVFMT_FLAG_NOFILLIN)
594 return;
595
596 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
597 pkt->dts = AV_NOPTS_VALUE;
598
599 /* do we have a video B-frame ? */
600 delay = st->codec->has_b_frames;
601 presentation_delayed = 0;
602
603 /* XXX: need has_b_frame, but cannot get it if the codec is
604 * not initialized */
605 if (delay &&
606 pc && pc->pict_type != AV_PICTURE_TYPE_B)
607 presentation_delayed = 1;
608
609 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
610 st->pts_wrap_bits < 63 &&
611 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
612 pkt->dts -= 1LL << st->pts_wrap_bits;
613 }
614
615 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
616 * We take the conservative approach and discard both.
617 * Note: If this is misbehaving for an H.264 file, then possibly
618 * presentation_delayed is not set correctly. */
619 if (delay == 1 && pkt->dts == pkt->pts &&
620 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
621 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
622 pkt->dts = AV_NOPTS_VALUE;
623 }
624
625 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
626 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
627 if (den && num) {
628 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
629 den * (int64_t) st->time_base.num,
630 AV_ROUND_DOWN);
631
632 if (pkt->duration != 0 && s->internal->packet_buffer)
633 update_initial_durations(s, st, pkt->stream_index,
634 pkt->duration);
635 }
636 }
637
638 /* Correct timestamps with byte offset if demuxers only have timestamps
639 * on packet boundaries */
640 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
641 /* this will estimate bitrate based on this frame's duration and size */
642 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
643 if (pkt->pts != AV_NOPTS_VALUE)
644 pkt->pts += offset;
645 if (pkt->dts != AV_NOPTS_VALUE)
646 pkt->dts += offset;
647 }
648
649 /* This may be redundant, but it should not hurt. */
650 if (pkt->dts != AV_NOPTS_VALUE &&
651 pkt->pts != AV_NOPTS_VALUE &&
652 pkt->pts > pkt->dts)
653 presentation_delayed = 1;
654
655 av_log(NULL, AV_LOG_TRACE,
656 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
657 "cur_dts:%"PRId64" st:%d pc:%p\n",
658 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
659 pkt->stream_index, pc);
660 /* Interpolate PTS and DTS if they are not present. We skip H.264
661 * currently because delay and has_b_frames are not reliably set. */
662 if ((delay == 0 || (delay == 1 && pc)) &&
663 st->codec->codec_id != AV_CODEC_ID_H264) {
664 if (presentation_delayed) {
665 /* DTS = decompression timestamp */
666 /* PTS = presentation timestamp */
667 if (pkt->dts == AV_NOPTS_VALUE)
668 pkt->dts = st->last_IP_pts;
669 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
670 if (pkt->dts == AV_NOPTS_VALUE)
671 pkt->dts = st->cur_dts;
672
673 /* This is tricky: the dts must be incremented by the duration
674 * of the frame we are displaying, i.e. the last I- or P-frame. */
675 if (st->last_IP_duration == 0)
676 st->last_IP_duration = pkt->duration;
677 if (pkt->dts != AV_NOPTS_VALUE)
678 st->cur_dts = pkt->dts + st->last_IP_duration;
679 st->last_IP_duration = pkt->duration;
680 st->last_IP_pts = pkt->pts;
681 /* Cannot compute PTS if not present (we can compute it only
682 * by knowing the future. */
683 } else if (pkt->pts != AV_NOPTS_VALUE ||
684 pkt->dts != AV_NOPTS_VALUE ||
685 pkt->duration ||
686 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
687 int duration = pkt->duration;
688 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
689 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
690 if (den && num) {
691 duration = av_rescale_rnd(1,
692 num * (int64_t) st->time_base.den,
693 den * (int64_t) st->time_base.num,
694 AV_ROUND_DOWN);
695 if (duration != 0 && s->internal->packet_buffer)
696 update_initial_durations(s, st, pkt->stream_index,
697 duration);
698 }
699 }
700
701 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
702 duration) {
703 /* presentation is not delayed : PTS and DTS are the same */
704 if (pkt->pts == AV_NOPTS_VALUE)
705 pkt->pts = pkt->dts;
706 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
707 pkt->pts);
708 if (pkt->pts == AV_NOPTS_VALUE)
709 pkt->pts = st->cur_dts;
710 pkt->dts = pkt->pts;
711 if (pkt->pts != AV_NOPTS_VALUE)
712 st->cur_dts = pkt->pts + duration;
713 }
714 }
715 }
716
717 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
718 st->pts_buffer[0] = pkt->pts;
719 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
720 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
721 if (pkt->dts == AV_NOPTS_VALUE)
722 pkt->dts = st->pts_buffer[0];
723 // We skipped it above so we try here.
724 if (st->codec->codec_id == AV_CODEC_ID_H264)
725 // This should happen on the first packet
726 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
727 if (pkt->dts > st->cur_dts)
728 st->cur_dts = pkt->dts;
729 }
730
731 av_log(NULL, AV_LOG_TRACE,
732 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
733 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
734
735 /* update flags */
736 if (is_intra_only(st->codec->codec_id))
737 pkt->flags |= AV_PKT_FLAG_KEY;
738 #if FF_API_CONVERGENCE_DURATION
739 FF_DISABLE_DEPRECATION_WARNINGS
740 if (pc)
741 pkt->convergence_duration = pc->convergence_duration;
742 FF_ENABLE_DEPRECATION_WARNINGS
743 #endif
744 }
745
746 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
747 {
748 while (*pkt_buf) {
749 AVPacketList *pktl = *pkt_buf;
750 *pkt_buf = pktl->next;
751 av_free_packet(&pktl->pkt);
752 av_freep(&pktl);
753 }
754 *pkt_buf_end = NULL;
755 }
756
757 /**
758 * Parse a packet, add all split parts to parse_queue.
759 *
760 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
761 */
762 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
763 {
764 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
765 AVStream *st = s->streams[stream_index];
766 uint8_t *data = pkt ? pkt->data : NULL;
767 int size = pkt ? pkt->size : 0;
768 int ret = 0, got_output = 0;
769
770 if (!pkt) {
771 av_init_packet(&flush_pkt);
772 pkt = &flush_pkt;
773 got_output = 1;
774 }
775
776 while (size > 0 || (pkt == &flush_pkt && got_output)) {
777 int len;
778
779 av_init_packet(&out_pkt);
780 len = av_parser_parse2(st->parser, st->codec,
781 &out_pkt.data, &out_pkt.size, data, size,
782 pkt->pts, pkt->dts, pkt->pos);
783
784 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
785 /* increment read pointer */
786 data += len;
787 size -= len;
788
789 got_output = !!out_pkt.size;
790
791 if (!out_pkt.size)
792 continue;
793
794 if (pkt->side_data) {
795 out_pkt.side_data = pkt->side_data;
796 out_pkt.side_data_elems = pkt->side_data_elems;
797 pkt->side_data = NULL;
798 pkt->side_data_elems = 0;
799 }
800
801 /* set the duration */
802 out_pkt.duration = 0;
803 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
804 if (st->codec->sample_rate > 0) {
805 out_pkt.duration =
806 av_rescale_q_rnd(st->parser->duration,
807 (AVRational) { 1, st->codec->sample_rate },
808 st->time_base,
809 AV_ROUND_DOWN);
810 }
811 }
812
813 out_pkt.stream_index = st->index;
814 out_pkt.pts = st->parser->pts;
815 out_pkt.dts = st->parser->dts;
816 out_pkt.pos = st->parser->pos;
817
818 if (st->parser->key_frame == 1 ||
819 (st->parser->key_frame == -1 &&
820 st->parser->pict_type == AV_PICTURE_TYPE_I))
821 out_pkt.flags |= AV_PKT_FLAG_KEY;
822
823 compute_pkt_fields(s, st, st->parser, &out_pkt);
824
825 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
826 out_pkt.flags & AV_PKT_FLAG_KEY) {
827 ff_reduce_index(s, st->index);
828 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
829 0, 0, AVINDEX_KEYFRAME);
830 }
831
832 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
833 out_pkt.buf = pkt->buf;
834 pkt->buf = NULL;
835 }
836 if ((ret = av_dup_packet(&out_pkt)) < 0)
837 goto fail;
838
839 if (!add_to_pktbuf(&s->internal->parse_queue, &out_pkt, &s->internal->parse_queue_end)) {
840 av_free_packet(&out_pkt);
841 ret = AVERROR(ENOMEM);
842 goto fail;
843 }
844 }
845
846 /* end of the stream => close and free the parser */
847 if (pkt == &flush_pkt) {
848 av_parser_close(st->parser);
849 st->parser = NULL;
850 }
851
852 fail:
853 av_free_packet(pkt);
854 return ret;
855 }
856
857 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
858 AVPacketList **pkt_buffer_end,
859 AVPacket *pkt)
860 {
861 AVPacketList *pktl;
862 av_assert0(*pkt_buffer);
863 pktl = *pkt_buffer;
864 *pkt = pktl->pkt;
865 *pkt_buffer = pktl->next;
866 if (!pktl->next)
867 *pkt_buffer_end = NULL;
868 av_freep(&pktl);
869 return 0;
870 }
871
872 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
873 {
874 int ret = 0, i, got_packet = 0;
875 AVDictionary *metadata = NULL;
876
877 av_init_packet(pkt);
878
879 while (!got_packet && !s->internal->parse_queue) {
880 AVStream *st;
881 AVPacket cur_pkt;
882
883 /* read next packet */
884 ret = ff_read_packet(s, &cur_pkt);
885 if (ret < 0) {
886 if (ret == AVERROR(EAGAIN))
887 return ret;
888 /* flush the parsers */
889 for (i = 0; i < s->nb_streams; i++) {
890 st = s->streams[i];
891 if (st->parser && st->need_parsing)
892 parse_packet(s, NULL, st->index);
893 }
894 /* all remaining packets are now in parse_queue =>
895 * really terminate parsing */
896 break;
897 }
898 ret = 0;
899 st = s->streams[cur_pkt.stream_index];
900
901 if (cur_pkt.pts != AV_NOPTS_VALUE &&
902 cur_pkt.dts != AV_NOPTS_VALUE &&
903 cur_pkt.pts < cur_pkt.dts) {
904 av_log(s, AV_LOG_WARNING,
905 "Invalid timestamps stream=%d, pts=%"PRId64", "
906 "dts=%"PRId64", size=%d\n",
907 cur_pkt.stream_index, cur_pkt.pts,
908 cur_pkt.dts, cur_pkt.size);
909 }
910 if (s->debug & FF_FDEBUG_TS)
911 av_log(s, AV_LOG_DEBUG,
912 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
913 "size=%d, duration=%"PRId64", flags=%d\n",
914 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
915 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
916
917 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
918 st->parser = av_parser_init(st->codec->codec_id);
919 if (!st->parser)
920 /* no parser available: just output the raw packets */
921 st->need_parsing = AVSTREAM_PARSE_NONE;
922 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
923 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
924 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
925 st->parser->flags |= PARSER_FLAG_ONCE;
926 }
927
928 if (!st->need_parsing || !st->parser) {
929 /* no parsing needed: we just output the packet as is */
930 *pkt = cur_pkt;
931 compute_pkt_fields(s, st, NULL, pkt);
932 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
933 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
934 ff_reduce_index(s, st->index);
935 av_add_index_entry(st, pkt->pos, pkt->dts,
936 0, 0, AVINDEX_KEYFRAME);
937 }
938 got_packet = 1;
939 } else if (st->discard < AVDISCARD_ALL) {
940 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
941 return ret;
942 } else {
943 /* free packet */
944 av_free_packet(&cur_pkt);
945 }
946 }
947
948 if (!got_packet && s->internal->parse_queue)
949 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
950
951 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
952 if (metadata) {
953 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
954 av_dict_copy(&s->metadata, metadata, 0);
955 av_dict_free(&metadata);
956 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
957 }
958
959 if (s->debug & FF_FDEBUG_TS)
960 av_log(s, AV_LOG_DEBUG,
961 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
962 "size=%d, duration=%"PRId64", flags=%d\n",
963 pkt->stream_index, pkt->pts, pkt->dts,
964 pkt->size, pkt->duration, pkt->flags);
965
966 return ret;
967 }
968
969 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
970 {
971 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
972 int eof = 0;
973
974 if (!genpts)
975 return s->internal->packet_buffer
976 ? read_from_packet_buffer(&s->internal->packet_buffer,
977 &s->internal->packet_buffer_end, pkt)
978 : read_frame_internal(s, pkt);
979
980 for (;;) {
981 int ret;
982 AVPacketList *pktl = s->internal->packet_buffer;
983
984 if (pktl) {
985 AVPacket *next_pkt = &pktl->pkt;
986
987 if (next_pkt->dts != AV_NOPTS_VALUE) {
988 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
989 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
990 if (pktl->pkt.stream_index == next_pkt->stream_index &&
991 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
992 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
993 // not B-frame
994 next_pkt->pts = pktl->pkt.dts;
995 }
996 pktl = pktl->next;
997 }
998 pktl = s->internal->packet_buffer;
999 }
1000
1001 /* read packet from packet buffer, if there is data */
1002 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1003 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1004 return read_from_packet_buffer(&s->internal->packet_buffer,
1005 &s->internal->packet_buffer_end, pkt);
1006 }
1007
1008 ret = read_frame_internal(s, pkt);
1009 if (ret < 0) {
1010 if (pktl && ret != AVERROR(EAGAIN)) {
1011 eof = 1;
1012 continue;
1013 } else
1014 return ret;
1015 }
1016
1017 if (av_dup_packet(add_to_pktbuf(&s->internal->packet_buffer, pkt,
1018 &s->internal->packet_buffer_end)) < 0)
1019 return AVERROR(ENOMEM);
1020 }
1021 }
1022
1023 /* XXX: suppress the packet queue */
1024 static void flush_packet_queue(AVFormatContext *s)
1025 {
1026 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1027 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1028 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1029
1030 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1031 }
1032
1033 /*******************************************************/
1034 /* seek support */
1035
1036 int av_find_default_stream_index(AVFormatContext *s)
1037 {
1038 int first_audio_index = -1;
1039 int i;
1040 AVStream *st;
1041
1042 if (s->nb_streams <= 0)
1043 return -1;
1044 for (i = 0; i < s->nb_streams; i++) {
1045 st = s->streams[i];
1046 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1047 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1048 return i;
1049 }
1050 if (first_audio_index < 0 &&
1051 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1052 first_audio_index = i;
1053 }
1054 return first_audio_index >= 0 ? first_audio_index : 0;
1055 }
1056
1057 /** Flush the frame reader. */
1058 void ff_read_frame_flush(AVFormatContext *s)
1059 {
1060 AVStream *st;
1061 int i, j;
1062
1063 flush_packet_queue(s);
1064
1065 /* Reset read state for each stream. */
1066 for (i = 0; i < s->nb_streams; i++) {
1067 st = s->streams[i];
1068
1069 if (st->parser) {
1070 av_parser_close(st->parser);
1071 st->parser = NULL;
1072 }
1073 st->last_IP_pts = AV_NOPTS_VALUE;
1074 /* We set the current DTS to an unspecified origin. */
1075 st->cur_dts = AV_NOPTS_VALUE;
1076
1077 st->probe_packets = MAX_PROBE_PACKETS;
1078
1079 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1080 st->pts_buffer[j] = AV_NOPTS_VALUE;
1081 }
1082 }
1083
1084 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1085 {
1086 int i;
1087
1088 for (i = 0; i < s->nb_streams; i++) {
1089 AVStream *st = s->streams[i];
1090
1091 st->cur_dts =
1092 av_rescale(timestamp,
1093 st->time_base.den * (int64_t) ref_st->time_base.num,
1094 st->time_base.num * (int64_t) ref_st->time_base.den);
1095 }
1096 }
1097
1098 void ff_reduce_index(AVFormatContext *s, int stream_index)
1099 {
1100 AVStream *st = s->streams[stream_index];
1101 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1102
1103 if ((unsigned) st->nb_index_entries >= max_entries) {
1104 int i;
1105 for (i = 0; 2 * i < st->nb_index_entries; i++)
1106 st->index_entries[i] = st->index_entries[2 * i];
1107 st->nb_index_entries = i;
1108 }
1109 }
1110
1111 int ff_add_index_entry(AVIndexEntry **index_entries,
1112 int *nb_index_entries,
1113 unsigned int *index_entries_allocated_size,
1114 int64_t pos, int64_t timestamp,
1115 int size, int distance, int flags)
1116 {
1117 AVIndexEntry *entries, *ie;
1118 int index;
1119
1120 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1121 return -1;
1122
1123 entries = av_fast_realloc(*index_entries,
1124 index_entries_allocated_size,
1125 (*nb_index_entries + 1) *
1126 sizeof(AVIndexEntry));
1127 if (!entries)
1128 return -1;
1129
1130 *index_entries = entries;
1131
1132 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1133 timestamp, AVSEEK_FLAG_ANY);
1134
1135 if (index < 0) {
1136 index = (*nb_index_entries)++;
1137 ie = &entries[index];
1138 assert(index == 0 || ie[-1].timestamp < timestamp);
1139 } else {
1140 ie = &entries[index];
1141 if (ie->timestamp != timestamp) {
1142 if (ie->timestamp <= timestamp)
1143 return -1;
1144 memmove(entries + index + 1, entries + index,
1145 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1146 (*nb_index_entries)++;
1147 } else if (ie->pos == pos && distance < ie->min_distance)
1148 // do not reduce the distance
1149 distance = ie->min_distance;
1150 }
1151
1152 ie->pos = pos;
1153 ie->timestamp = timestamp;
1154 ie->min_distance = distance;
1155 ie->size = size;
1156 ie->flags = flags;
1157
1158 return index;
1159 }
1160
1161 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1162 int size, int distance, int flags)
1163 {
1164 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1165 &st->index_entries_allocated_size, pos,
1166 timestamp, size, distance, flags);
1167 }
1168
1169 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1170 int64_t wanted_timestamp, int flags)
1171 {
1172 int a, b, m;
1173 int64_t timestamp;
1174
1175 a = -1;
1176 b = nb_entries;
1177
1178 // Optimize appending index entries at the end.
1179 if (b && entries[b - 1].timestamp < wanted_timestamp)
1180 a = b - 1;
1181
1182 while (b - a > 1) {
1183 m = (a + b) >> 1;
1184 timestamp = entries[m].timestamp;
1185 if (timestamp >= wanted_timestamp)
1186 b = m;
1187 if (timestamp <= wanted_timestamp)
1188 a = m;
1189 }
1190 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1191
1192 if (!(flags & AVSEEK_FLAG_ANY))
1193 while (m >= 0 && m < nb_entries &&
1194 !(entries[m].flags & AVINDEX_KEYFRAME))
1195 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1196
1197 if (m == nb_entries)
1198 return -1;
1199 return m;
1200 }
1201
1202 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1203 {
1204 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1205 wanted_timestamp, flags);
1206 }
1207
1208 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1209 int64_t target_ts, int flags)
1210 {
1211 AVInputFormat *avif = s->iformat;
1212 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1213 int64_t ts_min, ts_max, ts;
1214 int index;
1215 int64_t ret;
1216 AVStream *st;
1217
1218 if (stream_index < 0)
1219 return -1;
1220
1221 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1222
1223 ts_max =
1224 ts_min = AV_NOPTS_VALUE;
1225 pos_limit = -1; // GCC falsely says it may be uninitialized.
1226
1227 st = s->streams[stream_index];
1228 if (st->index_entries) {
1229 AVIndexEntry *e;
1230
1231 /* FIXME: Whole function must be checked for non-keyframe entries in
1232 * index case, especially read_timestamp(). */
1233 index = av_index_search_timestamp(st, target_ts,
1234 flags | AVSEEK_FLAG_BACKWARD);
1235 index = FFMAX(index, 0);
1236 e = &st->index_entries[index];
1237
1238 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1239 pos_min = e->pos;
1240 ts_min = e->timestamp;
1241 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1242 pos_min, ts_min);
1243 } else {
1244 assert(index == 0);
1245 }
1246
1247 index = av_index_search_timestamp(st, target_ts,
1248 flags & ~AVSEEK_FLAG_BACKWARD);
1249 assert(index < st->nb_index_entries);
1250 if (index >= 0) {
1251 e = &st->index_entries[index];
1252 assert(e->timestamp >= target_ts);
1253 pos_max = e->pos;
1254 ts_max = e->timestamp;
1255 pos_limit = pos_max - e->min_distance;
1256 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1257 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1258 }
1259 }
1260
1261 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1262 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1263 if (pos < 0)
1264 return -1;
1265
1266 /* do the seek */
1267 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1268 return ret;
1269
1270 ff_update_cur_dts(s, st, ts);
1271
1272 return 0;
1273 }
1274
1275 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1276 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1277 int64_t ts_min, int64_t ts_max,
1278 int flags, int64_t *ts_ret,
1279 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1280 int64_t *, int64_t))
1281 {
1282 int64_t pos, ts;
1283 int64_t start_pos, filesize;
1284 int no_change;
1285
1286 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1287
1288 if (ts_min == AV_NOPTS_VALUE) {
1289 pos_min = s->internal->data_offset;
1290 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1291 if (ts_min == AV_NOPTS_VALUE)
1292 return -1;
1293 }
1294
1295 if (ts_max == AV_NOPTS_VALUE) {
1296 int step = 1024;
1297 filesize = avio_size(s->pb);
1298 pos_max = filesize - 1;
1299 do {
1300 pos_max -= step;
1301 ts_max = read_timestamp(s, stream_index, &pos_max,
1302 pos_max + step);
1303 step += step;
1304 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1305 if (ts_max == AV_NOPTS_VALUE)
1306 return -1;
1307
1308 for (;;) {
1309 int64_t tmp_pos = pos_max + 1;
1310 int64_t tmp_ts = read_timestamp(s, stream_index,
1311 &tmp_pos, INT64_MAX);
1312 if (tmp_ts == AV_NOPTS_VALUE)
1313 break;
1314 ts_max = tmp_ts;
1315 pos_max = tmp_pos;
1316 if (tmp_pos >= filesize)
1317 break;
1318 }
1319 pos_limit = pos_max;
1320 }
1321
1322 if (ts_min > ts_max)
1323 return -1;
1324 else if (ts_min == ts_max)
1325 pos_limit = pos_min;
1326
1327 no_change = 0;
1328 while (pos_min < pos_limit) {
1329 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1330 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1331 assert(pos_limit <= pos_max);
1332
1333 if (no_change == 0) {
1334 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1335 // interpolate position (better than dichotomy)
1336 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1337 ts_max - ts_min) +
1338 pos_min - approximate_keyframe_distance;
1339 } else if (no_change == 1) {
1340 // bisection if interpolation did not change min / max pos last time
1341 pos = (pos_min + pos_limit) >> 1;
1342 } else {
1343 /* linear search if bisection failed, can only happen if there
1344 * are very few or no keyframes between min/max */
1345 pos = pos_min;
1346 }
1347 if (pos <= pos_min)
1348 pos = pos_min + 1;
1349 else if (pos > pos_limit)
1350 pos = pos_limit;
1351 start_pos = pos;
1352
1353 // May pass pos_limit instead of -1.
1354 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1355 if (pos == pos_max)
1356 no_change++;
1357 else
1358 no_change = 0;
1359 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1360 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1361 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1362 pos_limit, start_pos, no_change);
1363 if (ts == AV_NOPTS_VALUE) {
1364 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1365 return -1;
1366 }
1367 assert(ts != AV_NOPTS_VALUE);
1368 if (target_ts <= ts) {
1369 pos_limit = start_pos - 1;
1370 pos_max = pos;
1371 ts_max = ts;
1372 }
1373 if (target_ts >= ts) {
1374 pos_min = pos;
1375 ts_min = ts;
1376 }
1377 }
1378
1379 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1380 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1381 pos_min = pos;
1382 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1383 pos_min++;
1384 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1385 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1386 pos, ts_min, target_ts, ts_max);
1387 *ts_ret = ts;
1388 return pos;
1389 }
1390
1391 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1392 int64_t pos, int flags)
1393 {
1394 int64_t pos_min, pos_max;
1395
1396 pos_min = s->internal->data_offset;
1397 pos_max = avio_size(s->pb) - 1;
1398
1399 if (pos < pos_min)
1400 pos = pos_min;
1401 else if (pos > pos_max)
1402 pos = pos_max;
1403
1404 avio_seek(s->pb, pos, SEEK_SET);
1405
1406 return 0;
1407 }
1408
1409 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1410 int64_t timestamp, int flags)
1411 {
1412 int index;
1413 int64_t ret;
1414 AVStream *st;
1415 AVIndexEntry *ie;
1416
1417 st = s->streams[stream_index];
1418
1419 index = av_index_search_timestamp(st, timestamp, flags);
1420
1421 if (index < 0 && st->nb_index_entries &&
1422 timestamp < st->index_entries[0].timestamp)
1423 return -1;
1424
1425 if (index < 0 || index == st->nb_index_entries - 1) {
1426 AVPacket pkt;
1427
1428 if (st->nb_index_entries) {
1429 assert(st->index_entries);
1430 ie = &st->index_entries[st->nb_index_entries - 1];
1431 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1432 return ret;
1433 ff_update_cur_dts(s, st, ie->timestamp);
1434 } else {
1435 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1436 return ret;
1437 }
1438 for (;;) {
1439 int read_status;
1440 do {
1441 read_status = av_read_frame(s, &pkt);
1442 } while (read_status == AVERROR(EAGAIN));
1443 if (read_status < 0)
1444 break;
1445 av_free_packet(&pkt);
1446 if (stream_index == pkt.stream_index)
1447 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1448 break;
1449 }
1450 index = av_index_search_timestamp(st, timestamp, flags);
1451 }
1452 if (index < 0)
1453 return -1;
1454
1455 ff_read_frame_flush(s);
1456 if (s->iformat->read_seek)
1457 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1458 return 0;
1459 ie = &st->index_entries[index];
1460 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1461 return ret;
1462 ff_update_cur_dts(s, st, ie->timestamp);
1463
1464 return 0;
1465 }
1466
1467 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1468 int64_t timestamp, int flags)
1469 {
1470 int ret;
1471 AVStream *st;
1472
1473 if (flags & AVSEEK_FLAG_BYTE) {
1474 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1475 return -1;
1476 ff_read_frame_flush(s);
1477 return seek_frame_byte(s, stream_index, timestamp, flags);
1478 }
1479
1480 if (stream_index < 0) {
1481 stream_index = av_find_default_stream_index(s);
1482 if (stream_index < 0)
1483 return -1;
1484
1485 st = s->streams[stream_index];
1486 /* timestamp for default must be expressed in AV_TIME_BASE units */
1487 timestamp = av_rescale(timestamp, st->time_base.den,
1488 AV_TIME_BASE * (int64_t) st->time_base.num);
1489 }
1490
1491 /* first, we try the format specific seek */
1492 if (s->iformat->read_seek) {
1493 ff_read_frame_flush(s);
1494 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1495 } else
1496 ret = -1;
1497 if (ret >= 0)
1498 return 0;
1499
1500 if (s->iformat->read_timestamp &&
1501 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1502 ff_read_frame_flush(s);
1503 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1504 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1505 ff_read_frame_flush(s);
1506 return seek_frame_generic(s, stream_index, timestamp, flags);
1507 } else
1508 return -1;
1509 }
1510
1511 int av_seek_frame(AVFormatContext *s, int stream_index,
1512 int64_t timestamp, int flags)
1513 {
1514 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1515
1516 if (ret >= 0)
1517 ret = queue_attached_pictures(s);
1518
1519 return ret;
1520 }
1521
1522 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1523 int64_t ts, int64_t max_ts, int flags)
1524 {
1525 if (min_ts > ts || max_ts < ts)
1526 return -1;
1527
1528 if (s->iformat->read_seek2) {
1529 int ret;
1530 ff_read_frame_flush(s);
1531 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1532 ts, max_ts, flags);
1533
1534 if (ret >= 0)
1535 ret = queue_attached_pictures(s);
1536 return ret;
1537 }
1538
1539 if (s->iformat->read_timestamp) {
1540 // try to seek via read_timestamp()
1541 }
1542
1543 // Fall back on old API if new is not implemented but old is.
1544 // Note the old API has somewhat different semantics.
1545 if (s->iformat->read_seek || 1)
1546 return av_seek_frame(s, stream_index, ts,
1547 flags | ((uint64_t) ts - min_ts >
1548 (uint64_t) max_ts - ts
1549 ? AVSEEK_FLAG_BACKWARD : 0));
1550
1551 // try some generic seek like seek_frame_generic() but with new ts semantics
1552 }
1553
1554 /*******************************************************/
1555
1556 /**
1557 * Return TRUE if the stream has accurate duration in any stream.
1558 *
1559 * @return TRUE if the stream has accurate duration for at least one component.
1560 */
1561 static int has_duration(AVFormatContext *ic)
1562 {
1563 int i;
1564 AVStream *st;
1565
1566 for (i = 0; i < ic->nb_streams; i++) {
1567 st = ic->streams[i];
1568 if (st->duration != AV_NOPTS_VALUE)
1569 return 1;
1570 }
1571 if (ic->duration != AV_NOPTS_VALUE)
1572 return 1;
1573 return 0;
1574 }
1575
1576 /**
1577 * Estimate the stream timings from the one of each components.
1578 *
1579 * Also computes the global bitrate if possible.
1580 */
1581 static void update_stream_timings(AVFormatContext *ic)
1582 {
1583 int64_t start_time, start_time1, end_time, end_time1;
1584 int64_t duration, duration1, filesize;
1585 int i;
1586 AVStream *st;
1587
1588 start_time = INT64_MAX;
1589 end_time = INT64_MIN;
1590 duration = INT64_MIN;
1591 for (i = 0; i < ic->nb_streams; i++) {
1592 st = ic->streams[i];
1593 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1594 start_time1 = av_rescale_q(st->start_time, st->time_base,
1595 AV_TIME_BASE_Q);
1596 start_time = FFMIN(start_time, start_time1);
1597 if (st->duration != AV_NOPTS_VALUE) {
1598 end_time1 = start_time1 +
1599 av_rescale_q(st->duration, st->time_base,
1600 AV_TIME_BASE_Q);
1601 end_time = FFMAX(end_time, end_time1);
1602 }
1603 }
1604 if (st->duration != AV_NOPTS_VALUE) {
1605 duration1 = av_rescale_q(st->duration, st->time_base,
1606 AV_TIME_BASE_Q);
1607 duration = FFMAX(duration, duration1);
1608 }
1609 }
1610 if (start_time != INT64_MAX) {
1611 ic->start_time = start_time;
1612 if (end_time != INT64_MIN)
1613 duration = FFMAX(duration, end_time - start_time);
1614 }
1615 if (duration != INT64_MIN) {
1616 ic->duration = duration;
1617 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1618 /* compute the bitrate */
1619 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1620 (double) ic->duration;
1621 }
1622 }
1623
1624 static void fill_all_stream_timings(AVFormatContext *ic)
1625 {
1626 int i;
1627 AVStream *st;
1628
1629 update_stream_timings(ic);
1630 for (i = 0; i < ic->nb_streams; i++) {
1631 st = ic->streams[i];
1632 if (st->start_time == AV_NOPTS_VALUE) {
1633 if (ic->start_time != AV_NOPTS_VALUE)
1634 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1635 st->time_base);
1636 if (ic->duration != AV_NOPTS_VALUE)
1637 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1638 st->time_base);
1639 }
1640 }
1641 }
1642
1643 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1644 {
1645 int64_t filesize, duration;
1646 int i;
1647 AVStream *st;
1648
1649 /* if bit_rate is already set, we believe it */
1650 if (ic->bit_rate <= 0) {
1651 int bit_rate = 0;
1652 for (i = 0; i < ic->nb_streams; i++) {
1653 st = ic->streams[i];
1654 if (st->codec->bit_rate > 0) {
1655 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1656 bit_rate = 0;
1657 break;
1658 }
1659 bit_rate += st->codec->bit_rate;
1660 }
1661 }
1662 ic->bit_rate = bit_rate;
1663 }
1664
1665 /* if duration is already set, we believe it */
1666 if (ic->duration == AV_NOPTS_VALUE &&
1667 ic->bit_rate != 0) {
1668 filesize = ic->pb ? avio_size(ic->pb) : 0;
1669 if (filesize > 0) {
1670 for (i = 0; i < ic->nb_streams; i++) {
1671 st = ic->streams[i];
1672 duration = av_rescale(8 * filesize, st->time_base.den,
1673 ic->bit_rate *
1674 (int64_t) st->time_base.num);
1675 if (st->duration == AV_NOPTS_VALUE)
1676 st->duration = duration;
1677 }
1678 }
1679 }
1680 }
1681
1682 #define DURATION_MAX_READ_SIZE 250000
1683 #define DURATION_MAX_RETRY 3
1684
1685 /* only usable for MPEG-PS streams */
1686 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1687 {
1688 AVPacket pkt1, *pkt = &pkt1;
1689 AVStream *st;
1690 int read_size, i, ret;
1691 int64_t end_time;
1692 int64_t filesize, offset, duration;
1693 int retry = 0;
1694
1695 /* flush packet queue */
1696 flush_packet_queue(ic);
1697
1698 for (i = 0; i < ic->nb_streams; i++) {
1699 st = ic->streams[i];
1700 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1701 av_log(st->codec, AV_LOG_WARNING,
1702 "start time is not set in estimate_timings_from_pts\n");
1703
1704 if (st->parser) {
1705 av_parser_close(st->parser);
1706 st->parser = NULL;
1707 }
1708 }
1709
1710 /* estimate the end time (duration) */
1711 /* XXX: may need to support wrapping */
1712 filesize = ic->pb ? avio_size(ic->pb) : 0;
1713 end_time = AV_NOPTS_VALUE;
1714 do {
1715 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1716 if (offset < 0)
1717 offset = 0;
1718
1719 avio_seek(ic->pb, offset, SEEK_SET);
1720 read_size = 0;
1721 for (;;) {
1722 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1723 break;
1724
1725 do {
1726 ret = ff_read_packet(ic, pkt);
1727 } while (ret == AVERROR(EAGAIN));
1728 if (ret != 0)
1729 break;
1730 read_size += pkt->size;
1731 st = ic->streams[pkt->stream_index];
1732 if (pkt->pts != AV_NOPTS_VALUE &&
1733 (st->start_time != AV_NOPTS_VALUE ||
1734 st->first_dts != AV_NOPTS_VALUE)) {
1735 duration = end_time = pkt->pts;
1736 if (st->start_time != AV_NOPTS_VALUE)
1737 duration -= st->start_time;
1738 else
1739 duration -= st->first_dts;
1740 if (duration < 0)
1741 duration += 1LL << st->pts_wrap_bits;
1742 if (duration > 0) {
1743 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1744 st->duration = duration;
1745 }
1746 }
1747 av_free_packet(pkt);
1748 }
1749 } while (end_time == AV_NOPTS_VALUE &&
1750 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1751 ++retry <= DURATION_MAX_RETRY);
1752
1753 fill_all_stream_timings(ic);
1754
1755 avio_seek(ic->pb, old_offset, SEEK_SET);
1756 for (i = 0; i < ic->nb_streams; i++) {
1757 st = ic->streams[i];
1758 st->cur_dts = st->first_dts;
1759 st->last_IP_pts = AV_NOPTS_VALUE;
1760 }
1761 }
1762
1763 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1764 {
1765 int64_t file_size;
1766
1767 /* get the file size, if possible */
1768 if (ic->iformat->flags & AVFMT_NOFILE) {
1769 file_size = 0;
1770 } else {
1771 file_size = avio_size(ic->pb);
1772 file_size = FFMAX(0, file_size);
1773 }
1774
1775 if ((!strcmp(ic->iformat->name, "mpeg") ||
1776 !strcmp(ic->iformat->name, "mpegts")) &&
1777 file_size && ic->pb->seekable) {
1778 /* get accurate estimate from the PTSes */
1779 estimate_timings_from_pts(ic, old_offset);
1780 } else if (has_duration(ic)) {
1781 /* at least one component has timings - we use them for all
1782 * the components */
1783 fill_all_stream_timings(ic);
1784 } else {
1785 av_log(ic, AV_LOG_WARNING,
1786 "Estimating duration from bitrate, this may be inaccurate\n");
1787 /* less precise: use bitrate info */
1788 estimate_timings_from_bit_rate(ic);
1789 }
1790 update_stream_timings(ic);
1791
1792 {
1793 int i;
1794 AVStream av_unused *st;
1795 for (i = 0; i < ic->nb_streams; i++) {
1796 st = ic->streams[i];
1797 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1798 (double) st->start_time / AV_TIME_BASE,
1799 (double) st->duration / AV_TIME_BASE);
1800 }
1801 av_log(ic, AV_LOG_TRACE,
1802 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1803 (double) ic->start_time / AV_TIME_BASE,
1804 (double) ic->duration / AV_TIME_BASE,
1805 ic->bit_rate / 1000);
1806 }
1807 }
1808
1809 static int has_codec_parameters(AVStream *st)
1810 {
1811 AVCodecContext *avctx = st->codec;
1812 int val;
1813
1814 switch (avctx->codec_type) {
1815 case AVMEDIA_TYPE_AUDIO:
1816 val = avctx->sample_rate && avctx->channels;
1817 if (st->info->found_decoder >= 0 &&
1818 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1819 return 0;
1820 break;
1821 case AVMEDIA_TYPE_VIDEO:
1822 val = avctx->width;
1823 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1824 return 0;
1825 break;
1826 default:
1827 val = 1;
1828 break;
1829 }
1830 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1831 }
1832
1833 static int has_decode_delay_been_guessed(AVStream *st)
1834 {
1835 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1836 st->info->nb_decoded_frames >= 6;
1837 }
1838
1839 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1840 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1841 AVDictionary **options)
1842 {
1843 const AVCodec *codec;
1844 int got_picture = 1, ret = 0;
1845 AVFrame *frame = av_frame_alloc();
1846 AVPacket pkt = *avpkt;
1847
1848 if (!frame)
1849 return AVERROR(ENOMEM);
1850
1851 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1852 AVDictionary *thread_opt = NULL;
1853
1854 codec = st->codec->codec ? st->codec->codec
1855 : avcodec_find_decoder(st->codec->codec_id);
1856
1857 if (!codec) {
1858 st->info->found_decoder = -1;
1859 ret = -1;
1860 goto fail;
1861 }
1862
1863 /* Force thread count to 1 since the H.264 decoder will not extract
1864 * SPS and PPS to extradata during multi-threaded decoding. */
1865 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1866 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1867 if (!options)
1868 av_dict_free(&thread_opt);
1869 if (ret < 0) {
1870 st->info->found_decoder = -1;
1871 goto fail;
1872 }
1873 st->info->found_decoder = 1;
1874 } else if (!st->info->found_decoder)
1875 st->info->found_decoder = 1;
1876
1877 if (st->info->found_decoder < 0) {
1878 ret = -1;
1879 goto fail;
1880 }
1881
1882 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1883 ret >= 0 &&
1884 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1885 (!st->codec_info_nb_frames &&
1886 (st->codec->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1887 got_picture = 0;
1888 switch (st->codec->codec_type) {
1889 case AVMEDIA_TYPE_VIDEO:
1890 ret = avcodec_decode_video2(st->codec, frame,
1891 &got_picture, &pkt);
1892 break;
1893 case AVMEDIA_TYPE_AUDIO:
1894 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1895 break;
1896 default:
1897 break;
1898 }
1899 if (ret >= 0) {
1900 if (got_picture)
1901 st->info->nb_decoded_frames++;
1902 pkt.data += ret;
1903 pkt.size -= ret;
1904 ret = got_picture;
1905 }
1906 }
1907
1908 fail:
1909 av_frame_free(&frame);
1910 return ret;
1911 }
1912
1913 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1914 {
1915 while (tags->id != AV_CODEC_ID_NONE) {
1916 if (tags->id == id)
1917 return tags->tag;
1918 tags++;
1919 }
1920 return 0;
1921 }
1922
1923 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1924 {
1925 int i;
1926 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1927 if (tag == tags[i].tag)
1928 return tags[i].id;
1929 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1930 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1931 return tags[i].id;
1932 return AV_CODEC_ID_NONE;
1933 }
1934
1935 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1936 {
1937 if (flt) {
1938 switch (bps) {
1939 case 32:
1940 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1941 case 64:
1942 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1943 default:
1944 return AV_CODEC_ID_NONE;
1945 }
1946 } else {
1947 bps >>= 3;
1948 if (sflags & (1 << (bps - 1))) {
1949 switch (bps) {
1950 case 1:
1951 return AV_CODEC_ID_PCM_S8;
1952 case 2:
1953 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1954 case 3:
1955 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1956 case 4:
1957 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1958 default:
1959 return AV_CODEC_ID_NONE;
1960 }
1961 } else {
1962 switch (bps) {
1963 case 1:
1964 return AV_CODEC_ID_PCM_U8;
1965 case 2:
1966 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1967 case 3:
1968 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1969 case 4:
1970 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1971 default:
1972 return AV_CODEC_ID_NONE;
1973 }
1974 }
1975 }
1976 }
1977
1978 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1979 {
1980 int i;
1981 for (i = 0; tags && tags[i]; i++) {
1982 int tag = ff_codec_get_tag(tags[i], id);
1983 if (tag)
1984 return tag;
1985 }
1986 return 0;
1987 }
1988
1989 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
1990 {
1991 int i;
1992 for (i = 0; tags && tags[i]; i++) {
1993 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
1994 if (id != AV_CODEC_ID_NONE)
1995 return id;
1996 }
1997 return AV_CODEC_ID_NONE;
1998 }
1999
2000 static void compute_chapters_end(AVFormatContext *s)
2001 {
2002 unsigned int i, j;
2003 int64_t max_time = s->duration +
2004 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2005
2006 for (i = 0; i < s->nb_chapters; i++)
2007 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2008 AVChapter *ch = s->chapters[i];
2009 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2010 ch->time_base)
2011 : INT64_MAX;
2012
2013 for (j = 0; j < s->nb_chapters; j++) {
2014 AVChapter *ch1 = s->chapters[j];
2015 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2016 ch->time_base);
2017 if (j != i && next_start > ch->start && next_start < end)
2018 end = next_start;
2019 }
2020 ch->end = (end == INT64_MAX) ? ch->start : end;
2021 }
2022 }
2023
2024 static int get_std_framerate(int i)
2025 {
2026 if (i < 60 * 12)
2027 return (i + 1) * 1001;
2028 else
2029 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2030 }
2031
2032 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2033 {
2034 int i, count, ret, read_size, j;
2035 AVStream *st;
2036 AVPacket pkt1, *pkt;
2037 int64_t old_offset = avio_tell(ic->pb);
2038 // new streams might appear, no options for those
2039 int orig_nb_streams = ic->nb_streams;
2040
2041 for (i = 0; i < ic->nb_streams; i++) {
2042 const AVCodec *codec;
2043 AVDictionary *thread_opt = NULL;
2044 st = ic->streams[i];
2045
2046 // only for the split stuff
2047 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2048 st->parser = av_parser_init(st->codec->codec_id);
2049 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2050 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2051 }
2052 codec = st->codec->codec ? st->codec->codec
2053 : avcodec_find_decoder(st->codec->codec_id);
2054
2055 /* Force thread count to 1 since the H.264 decoder will not extract
2056 * SPS and PPS to extradata during multi-threaded decoding. */
2057 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2058
2059 /* Ensure that subtitle_header is properly set. */
2060 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2061 && codec && !st->codec->codec)
2062 avcodec_open2(st->codec, codec,
2063 options ? &options[i] : &thread_opt);
2064
2065 // Try to just open decoders, in case this is enough to get parameters.
2066 if (!has_codec_parameters(st)) {
2067 if (codec && !st->codec->codec)
2068 avcodec_open2(st->codec, codec,
2069 options ? &options[i] : &thread_opt);
2070 }
2071 if (!options)
2072 av_dict_free(&thread_opt);
2073 }
2074
2075 for (i = 0; i < ic->nb_streams; i++) {
2076 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2077 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2078 }
2079
2080 count = 0;
2081 read_size = 0;
2082 for (;;) {
2083 if (ff_check_interrupt(&ic->interrupt_callback)) {
2084 ret = AVERROR_EXIT;
2085 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2086 break;
2087 }
2088
2089 /* check if one codec still needs to be handled */
2090 for (i = 0; i < ic->nb_streams; i++) {
2091 int fps_analyze_framecount = 20;
2092
2093 st = ic->streams[i];
2094 if (!has_codec_parameters(st))
2095 break;
2096 /* If the timebase is coarse (like the usual millisecond precision
2097 * of mkv), we need to analyze more frames to reliably arrive at
2098 * the correct fps. */
2099 if (av_q2d(st->time_base) > 0.0005)
2100 fps_analyze_framecount *= 2;
2101 if (ic->fps_probe_size >= 0)
2102 fps_analyze_framecount = ic->fps_probe_size;
2103 /* variable fps and no guess at the real fps */
2104 if (!st->avg_frame_rate.num &&
2105 st->codec_info_nb_frames < fps_analyze_framecount &&
2106 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2107 break;
2108 if (st->parser && st->parser->parser->split &&
2109 !st->codec->extradata)
2110 break;
2111 if (st->first_dts == AV_NOPTS_VALUE &&
2112 st->codec_info_nb_frames < ic->max_ts_probe &&
2113 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2114 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2115 break;
2116 }
2117 if (i == ic->nb_streams) {
2118 /* NOTE: If the format has no header, then we need to read some
2119 * packets to get most of the streams, so we cannot stop here. */
2120 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2121 /* If we found the info for all the codecs, we can stop. */
2122 ret = count;
2123 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2124 break;
2125 }
2126 }
2127 /* We did not get all the codec info, but we read too much data. */
2128 if (read_size >= ic->probesize) {
2129 ret = count;
2130 av_log(ic, AV_LOG_DEBUG,
2131 "Probe buffer size limit %d reached\n", ic->probesize);
2132 break;
2133 }
2134
2135 /* NOTE: A new stream can be added there if no header in file
2136 * (AVFMTCTX_NOHEADER). */
2137 ret = read_frame_internal(ic, &pkt1);
2138 if (ret == AVERROR(EAGAIN))
2139 continue;
2140
2141 if (ret < 0) {
2142 /* EOF or error*/
2143 AVPacket empty_pkt = { 0 };
2144 int err = 0;
2145 av_init_packet(&empty_pkt);
2146
2147 /* We could not have all the codec parameters before EOF. */
2148 ret = -1;
2149 for (i = 0; i < ic->nb_streams; i++) {
2150 st = ic->streams[i];
2151
2152 /* flush the decoders */
2153 if (st->info->found_decoder == 1) {
2154 do {
2155 err = try_decode_frame(st, &empty_pkt,
2156 (options && i < orig_nb_streams)
2157 ? &options[i] : NULL);
2158 } while (err > 0 && !has_codec_parameters(st));
2159 }
2160
2161 if (err < 0) {
2162 av_log(ic, AV_LOG_WARNING,
2163 "decoding for stream %d failed\n", st->index);
2164 } else if (!has_codec_parameters(st)) {
2165 char buf[256];
2166 avcodec_string(buf, sizeof(buf), st->codec, 0);
2167 av_log(ic, AV_LOG_WARNING,
2168 "Could not find codec parameters (%s)\n", buf);
2169 } else {
2170 ret = 0;
2171 }
2172 }
2173 break;
2174 }
2175
2176 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2177 pkt = &pkt1;
2178 } else {
2179 pkt = add_to_pktbuf(&ic->internal->packet_buffer, &pkt1,
2180 &ic->internal->packet_buffer_end);
2181 if ((ret = av_dup_packet(pkt)) < 0)
2182 goto find_stream_info_err;
2183 }
2184
2185 read_size += pkt->size;
2186
2187 st = ic->streams[pkt->stream_index];
2188 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2189 /* check for non-increasing dts */
2190 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2191 st->info->fps_last_dts >= pkt->dts) {
2192 av_log(ic, AV_LOG_WARNING,
2193 "Non-increasing DTS in stream %d: packet %d with DTS "
2194 "%"PRId64", packet %d with DTS %"PRId64"\n",
2195 st->index, st->info->fps_last_dts_idx,
2196 st->info->fps_last_dts, st->codec_info_nb_frames,
2197 pkt->dts);
2198 st->info->fps_first_dts =
2199 st->info->fps_last_dts = AV_NOPTS_VALUE;
2200 }
2201 /* Check for a discontinuity in dts. If the difference in dts
2202 * is more than 1000 times the average packet duration in the
2203 * sequence, we treat it as a discontinuity. */
2204 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2205 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2206 (pkt->dts - st->info->fps_last_dts) / 1000 >
2207 (st->info->fps_last_dts - st->info->fps_first_dts) /
2208 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2209 av_log(ic, AV_LOG_WARNING,
2210 "DTS discontinuity in stream %d: packet %d with DTS "
2211 "%"PRId64", packet %d with DTS %"PRId64"\n",
2212 st->index, st->info->fps_last_dts_idx,
2213 st->info->fps_last_dts, st->codec_info_nb_frames,
2214 pkt->dts);
2215 st->info->fps_first_dts =
2216 st->info->fps_last_dts = AV_NOPTS_VALUE;
2217 }
2218
2219 /* update stored dts values */
2220 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2221 st->info->fps_first_dts = pkt->dts;
2222 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2223 }
2224 st->info->fps_last_dts = pkt->dts;
2225 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2226
2227 /* check max_analyze_duration */
2228 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2229 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2230 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2231 ic->max_analyze_duration);
2232 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2233 av_packet_unref(pkt);
2234 break;
2235 }
2236 }
2237 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2238 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2239 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2240 st->codec->extradata_size = i;
2241 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2242 AV_INPUT_BUFFER_PADDING_SIZE);
2243 if (!st->codec->extradata)
2244 return AVERROR(ENOMEM);
2245 memcpy(st->codec->extradata, pkt->data,
2246 st->codec->extradata_size);
2247 }
2248 }
2249
2250 /* If still no information, we try to open the codec and to
2251 * decompress the frame. We try to avoid that in most cases as
2252 * it takes longer and uses more memory. For MPEG-4, we need to
2253 * decompress for QuickTime.
2254 *
2255 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2256 * least one frame of codec data, this makes sure the codec initializes
2257 * the channel configuration and does not only trust the values from
2258 * the container. */
2259 try_decode_frame(st, pkt,
2260 (options && i < orig_nb_streams) ? &options[i] : NULL);
2261
2262 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2263 av_packet_unref(pkt);
2264
2265 st->codec_info_nb_frames++;
2266 count++;
2267 }
2268
2269 // close codecs which were opened in try_decode_frame()
2270 for (i = 0; i < ic->nb_streams; i++) {
2271 st = ic->streams[i];
2272 avcodec_close(st->codec);
2273 }
2274 for (i = 0; i < ic->nb_streams; i++) {
2275 st = ic->streams[i];
2276 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2277 /* estimate average framerate if not set by demuxer */
2278 if (!st->avg_frame_rate.num &&
2279 st->info->fps_last_dts != st->info->fps_first_dts) {
2280 int64_t delta_dts = st->info->fps_last_dts -
2281 st->info->fps_first_dts;
2282 int delta_packets = st->info->fps_last_dts_idx -
2283 st->info->fps_first_dts_idx;
2284 int best_fps = 0;
2285 double best_error = 0.01;
2286
2287 if (delta_dts >= INT64_MAX / st->time_base.num ||
2288 delta_packets >= INT64_MAX / st->time_base.den ||
2289 delta_dts < 0)
2290 continue;
2291 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2292 delta_packets * (int64_t) st->time_base.den,
2293 delta_dts * (int64_t) st->time_base.num, 60000);
2294
2295 /* Round guessed framerate to a "standard" framerate if it's
2296 * within 1% of the original estimate. */
2297 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2298 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2299 double error = fabs(av_q2d(st->avg_frame_rate) /
2300 av_q2d(std_fps) - 1);
2301
2302 if (error < best_error) {
2303 best_error = error;
2304 best_fps = std_fps.num;
2305 }
2306 }
2307 if (best_fps)
2308 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2309 best_fps, 12 * 1001, INT_MAX);
2310 }
2311 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2312 if (!st->codec->bits_per_coded_sample)
2313 st->codec->bits_per_coded_sample =
2314 av_get_bits_per_sample(st->codec->codec_id);
2315 // set stream disposition based on audio service type
2316 switch (st->codec->audio_service_type) {
2317 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2318 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2319 break;
2320 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2321 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2322 break;
2323 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2324 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2325 break;
2326 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2327 st->disposition = AV_DISPOSITION_COMMENT;
2328 break;
2329 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2330 st->disposition = AV_DISPOSITION_KARAOKE;
2331 break;
2332 }
2333 }
2334 }
2335
2336 estimate_timings(ic, old_offset);
2337
2338 compute_chapters_end(ic);
2339
2340 find_stream_info_err:
2341 for (i = 0; i < ic->nb_streams; i++) {
2342 ic->streams[i]->codec->thread_count = 0;
2343 av_freep(&ic->streams[i]->info);
2344 }
2345 return ret;
2346 }
2347
2348 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2349 {
2350 int i, j;
2351
2352 for (i = 0; i < ic->nb_programs; i++)
2353 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2354 if (ic->programs[i]->stream_index[j] == s)
2355 return ic->programs[i];
2356 return NULL;
2357 }
2358
2359 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2360 int wanted_stream_nb, int related_stream,
2361 AVCodec **decoder_ret, int flags)
2362 {
2363 int i, nb_streams = ic->nb_streams;
2364 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2365 unsigned *program = NULL;
2366 AVCodec *decoder = NULL, *best_decoder = NULL;
2367
2368 if (related_stream >= 0 && wanted_stream_nb < 0) {
2369 AVProgram *p = find_program_from_stream(ic, related_stream);
2370 if (p) {
2371 program = p->stream_index;
2372 nb_streams = p->nb_stream_indexes;
2373 }
2374 }
2375 for (i = 0; i < nb_streams; i++) {
2376 int real_stream_index = program ? program[i] : i;
2377 AVStream *st = ic->streams[real_stream_index];
2378 AVCodecContext *avctx = st->codec;
2379 if (avctx->codec_type != type)
2380 continue;
2381 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2382 continue;
2383 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2384 AV_DISPOSITION_VISUAL_IMPAIRED))
2385 continue;
2386 if (decoder_ret) {
2387 decoder = avcodec_find_decoder(st->codec->codec_id);
2388 if (!decoder) {
2389 if (ret < 0)
2390 ret = AVERROR_DECODER_NOT_FOUND;
2391 continue;
2392 }
2393 }
2394 if (best_count >= st->codec_info_nb_frames)
2395 continue;
2396 best_count = st->codec_info_nb_frames;
2397 ret = real_stream_index;
2398 best_decoder = decoder;
2399 if (program && i == nb_streams - 1 && ret < 0) {
2400 program = NULL;
2401 nb_streams = ic->nb_streams;
2402 /* no related stream found, try again with everything */
2403 i = 0;
2404 }
2405 }
2406 if (decoder_ret)
2407 *decoder_ret = best_decoder;
2408 return ret;
2409 }
2410
2411 /*******************************************************/
2412
2413 int av_read_play(AVFormatContext *s)
2414 {
2415 if (s->iformat->read_play)
2416 return s->iformat->read_play(s);
2417 if (s->pb)
2418 return avio_pause(s->pb, 0);
2419 return AVERROR(ENOSYS);
2420 }
2421
2422 int av_read_pause(AVFormatContext *s)
2423 {
2424 if (s->iformat->read_pause)
2425 return s->iformat->read_pause(s);
2426 if (s->pb)
2427 return avio_pause(s->pb, 1);
2428 return AVERROR(ENOSYS);
2429 }
2430
2431 void avformat_free_context(AVFormatContext *s)
2432 {
2433 int i, j;
2434 AVStream *st;
2435
2436 if (!s)
2437 return;
2438
2439 av_opt_free(s);
2440 if (s->iformat && s->iformat->priv_class && s->priv_data)
2441 av_opt_free(s->priv_data);
2442
2443 for (i = 0; i < s->nb_streams; i++) {
2444 /* free all data in a stream component */
2445 st = s->streams[i];
2446
2447 for (j = 0; j < st->nb_side_data; j++)
2448 av_freep(&st->side_data[j].data);
2449 av_freep(&st->side_data);
2450 st->nb_side_data = 0;
2451
2452 if (st->parser) {
2453 av_parser_close(st->parser);
2454 }
2455 if (st->attached_pic.data)
2456 av_free_packet(&st->attached_pic);
2457 av_dict_free(&st->metadata);
2458 av_freep(&st->probe_data.buf);
2459 av_free(st->index_entries);
2460 av_free(st->codec->extradata);
2461 av_free(st->codec->subtitle_header);
2462 av_free(st->codec);
2463 av_free(st->priv_data);
2464 av_free(st->info);
2465 av_free(st);
2466 }
2467 for (i = s->nb_programs - 1; i >= 0; i--) {
2468 av_dict_free(&s->programs[i]->metadata);
2469 av_freep(&s->programs[i]->stream_index);
2470 av_freep(&s->programs[i]);
2471 }
2472 av_freep(&s->programs);
2473 av_freep(&s->priv_data);
2474 while (s->nb_chapters--) {
2475 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2476 av_free(s->chapters[s->nb_chapters]);
2477 }
2478 av_freep(&s->chapters);
2479 av_dict_free(&s->metadata);
2480 av_freep(&s->streams);
2481 av_freep(&s->internal);
2482 av_free(s);
2483 }
2484
2485 void avformat_close_input(AVFormatContext **ps)
2486 {
2487 AVFormatContext *s = *ps;
2488 AVIOContext *pb = s->pb;
2489
2490 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2491 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2492 pb = NULL;
2493
2494 flush_packet_queue(s);
2495
2496 if (s->iformat)
2497 if (s->iformat->read_close)
2498 s->iformat->read_close(s);
2499
2500 avformat_free_context(s);
2501
2502 *ps = NULL;
2503
2504 avio_close(pb);
2505 }
2506
2507 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2508 {
2509 AVStream *st;
2510 int i;
2511
2512 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2513 sizeof(*s->streams)) < 0) {
2514 s->nb_streams = 0;
2515 return NULL;
2516 }
2517
2518 st = av_mallocz(sizeof(AVStream));
2519 if (!st)
2520 return NULL;
2521 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2522 av_free(st);
2523 return NULL;
2524 }
2525
2526 st->codec = avcodec_alloc_context3(c);
2527 if (!st->codec) {
2528 av_free(st->info);
2529 av_free(st);
2530 return NULL;
2531 }
2532 if (s->iformat) {
2533 /* no default bitrate if decoding */
2534 st->codec->bit_rate = 0;
2535
2536 /* default pts setting is MPEG-like */
2537 avpriv_set_pts_info(st, 33, 1, 90000);
2538 }
2539
2540 st->index = s->nb_streams;
2541 st->start_time = AV_NOPTS_VALUE;
2542 st->duration = AV_NOPTS_VALUE;
2543 /* we set the current DTS to 0 so that formats without any timestamps
2544 * but durations get some timestamps, formats with some unknown
2545 * timestamps have their first few packets buffered and the
2546 * timestamps corrected before they are returned to the user */
2547 st->cur_dts = 0;
2548 st->first_dts = AV_NOPTS_VALUE;
2549 st->probe_packets = MAX_PROBE_PACKETS;
2550
2551 st->last_IP_pts = AV_NOPTS_VALUE;
2552 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2553 st->pts_buffer[i] = AV_NOPTS_VALUE;
2554
2555 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2556
2557 st->info->fps_first_dts = AV_NOPTS_VALUE;
2558 st->info->fps_last_dts = AV_NOPTS_VALUE;
2559
2560 s->streams[s->nb_streams++] = st;
2561 return st;
2562 }
2563
2564 AVProgram *av_new_program(AVFormatContext *ac, int id)
2565 {
2566 AVProgram *program = NULL;
2567 int i;
2568
2569 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2570
2571 for (i = 0; i < ac->nb_programs; i++)
2572 if (ac->programs[i]->id == id)
2573 program = ac->programs[i];
2574
2575 if (!program) {
2576 program = av_mallocz(sizeof(AVProgram));
2577 if (!program)
2578 return NULL;
2579 dynarray_add(&ac->programs, &ac->nb_programs, program);
2580 program->discard = AVDISCARD_NONE;
2581 }
2582 program->id = id;
2583
2584 return program;
2585 }
2586
2587 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2588 int64_t start, int64_t end, const char *title)
2589 {
2590 AVChapter *chapter = NULL;
2591 int i;
2592
2593 for (i = 0; i < s->nb_chapters; i++)
2594 if (s->chapters[i]->id == id)
2595 chapter = s->chapters[i];
2596
2597 if (!chapter) {
2598 chapter = av_mallocz(sizeof(AVChapter));
2599 if (!chapter)
2600 return NULL;
2601 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2602 }
2603 av_dict_set(&chapter->metadata, "title", title, 0);
2604 chapter->id = id;
2605 chapter->time_base = time_base;
2606 chapter->start = start;
2607 chapter->end = end;
2608
2609 return chapter;
2610 }
2611
2612 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2613 {
2614 int i, j;
2615 AVProgram *program = NULL;
2616
2617 if (idx >= ac->nb_streams) {
2618 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2619 return;
2620 }
2621
2622 for (i = 0; i < ac->nb_programs; i++) {
2623 if (ac->programs[i]->id != progid)
2624 continue;
2625 program = ac->programs[i];
2626 for (j = 0; j < program->nb_stream_indexes; j++)
2627 if (program->stream_index[j] == idx)
2628 return;
2629
2630 if (av_reallocp_array(&program->stream_index,
2631 program->nb_stream_indexes + 1,
2632 sizeof(*program->stream_index)) < 0) {
2633 program->nb_stream_indexes = 0;
2634 return;
2635 }
2636 program->stream_index[program->nb_stream_indexes++] = idx;
2637 return;
2638 }
2639 }
2640
2641 uint64_t ff_ntp_time(void)
2642 {
2643 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2644 }
2645
2646 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2647 {
2648 const char *p;
2649 char *q, buf1[20], c;
2650 int nd, len, percentd_found;
2651
2652 q = buf;
2653 p = path;
2654 percentd_found = 0;
2655 for (;;) {
2656 c = *p++;
2657 if (c == '\0')
2658 break;
2659 if (c == '%') {
2660 do {
2661 nd = 0;
2662 while (av_isdigit(*p))
2663 nd = nd * 10 + *p++ - '0';
2664 c = *p++;
2665 } while (av_isdigit(c));
2666
2667 switch (c) {
2668 case '%':
2669 goto addchar;
2670 case 'd':
2671 if (percentd_found)
2672 goto fail;
2673 percentd_found = 1;
2674 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2675 len = strlen(buf1);
2676 if ((q - buf + len) > buf_size - 1)
2677 goto fail;
2678 memcpy(q, buf1, len);
2679 q += len;
2680 break;
2681 default:
2682 goto fail;
2683 }
2684 } else {
2685 addchar:
2686 if ((q - buf) < buf_size - 1)
2687 *q++ = c;
2688 }
2689 }
2690 if (!percentd_found)
2691 goto fail;
2692 *q = '\0';
2693 return 0;
2694 fail:
2695 *q = '\0';
2696 return -1;
2697 }
2698
2699 void av_url_split(char *proto, int proto_size,
2700 char *authorization, int authorization_size,
2701 char *hostname, int hostname_size,
2702 int *port_ptr, char *path, int path_size, const char *url)
2703 {
2704 const char *p, *ls, *at, *col, *brk;
2705
2706 if (port_ptr)
2707 *port_ptr = -1;
2708 if (proto_size > 0)
2709 proto[0] = 0;
2710 if (authorization_size > 0)
2711 authorization[0] = 0;
2712 if (hostname_size > 0)
2713 hostname[0] = 0;
2714 if (path_size > 0)
2715 path[0] = 0;
2716
2717 /* parse protocol */
2718 if ((p = strchr(url, ':'))) {
2719 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2720 p++; /* skip ':' */
2721 if (*p == '/')
2722 p++;
2723 if (*p == '/')
2724 p++;
2725 } else {
2726 /* no protocol means plain filename */
2727 av_strlcpy(path, url, path_size);
2728 return;
2729 }
2730
2731 /* separate path from hostname */
2732 ls = strchr(p, '/');
2733 if (!ls)
2734 ls = strchr(p, '?');
2735 if (ls)
2736 av_strlcpy(path, ls, path_size);
2737 else
2738 ls = &p[strlen(p)]; // XXX
2739
2740 /* the rest is hostname, use that to parse auth/port */
2741 if (ls != p) {
2742 /* authorization (user[:pass]@hostname) */
2743 if ((at = strchr(p, '@')) && at < ls) {
2744 av_strlcpy(authorization, p,
2745 FFMIN(authorization_size, at + 1 - p));
2746 p = at + 1; /* skip '@' */
2747 }
2748
2749 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2750 /* [host]:port */
2751 av_strlcpy(hostname, p + 1,
2752 FFMIN(hostname_size, brk - p));
2753 if (brk[1] == ':' && port_ptr)
2754 *port_ptr = atoi(brk + 2);
2755 } else if ((col = strchr(p, ':')) && col < ls) {
2756 av_strlcpy(hostname, p,
2757 FFMIN(col + 1 - p, hostname_size));
2758 if (port_ptr)
2759 *port_ptr = atoi(col + 1);
2760 } else
2761 av_strlcpy(hostname, p,
2762 FFMIN(ls + 1 - p, hostname_size));
2763 }
2764 }
2765
2766 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2767 {
2768 int i;
2769 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2770 '4', '5', '6', '7',
2771 '8', '9', 'A', 'B',
2772 'C', 'D', 'E', 'F' };
2773 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2774 '4', '5', '6', '7',
2775 '8', '9', 'a', 'b',
2776 'c', 'd', 'e', 'f' };
2777 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2778
2779 for (i = 0; i < s; i++) {
2780 buff[i * 2] = hex_table[src[i] >> 4];
2781 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2782 }
2783
2784 return buff;
2785 }
2786
2787 int ff_hex_to_data(uint8_t *data, const char *p)
2788 {
2789 int c, len, v;
2790
2791 len = 0;
2792 v = 1;
2793 for (;;) {
2794 p += strspn(p, SPACE_CHARS);
2795 if (*p == '\0')
2796 break;
2797 c = av_toupper((unsigned char) *p++);
2798 if (c >= '0' && c <= '9')
2799 c = c - '0';
2800 else if (c >= 'A' && c <= 'F')
2801 c = c - 'A' + 10;
2802 else
2803 break;
2804 v = (v << 4) | c;
2805 if (v & 0x100) {
2806 if (data)
2807 data[len] = v;
2808 len++;
2809 v = 1;
2810 }
2811 }
2812 return len;
2813 }
2814
2815 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2816 unsigned int pts_num, unsigned int pts_den)
2817 {
2818 AVRational new_tb;
2819 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2820 if (new_tb.num != pts_num)
2821 av_log(NULL, AV_LOG_DEBUG,
2822 "st:%d removing common factor %d from timebase\n",
2823 s->index, pts_num / new_tb.num);
2824 } else
2825 av_log(NULL, AV_LOG_WARNING,
2826 "st:%d has too large timebase, reducing\n", s->index);
2827
2828 if (new_tb.num <= 0 || new_tb.den <= 0) {
2829 av_log(NULL, AV_LOG_ERROR,
2830 "Ignoring attempt to set invalid timebase for st:%d\n",
2831 s->index);
2832 return;
2833 }
2834 s->time_base = new_tb;
2835 s->pts_wrap_bits = pts_wrap_bits;
2836 }
2837
2838 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2839 void *context)
2840 {
2841 const char *ptr = str;
2842
2843 /* Parse key=value pairs. */
2844 for (;;) {
2845 const char *key;
2846 char *dest = NULL, *dest_end;
2847 int key_len, dest_len = 0;
2848
2849 /* Skip whitespace and potential commas. */
2850 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2851 ptr++;
2852 if (!*ptr)
2853 break;
2854
2855 key = ptr;
2856
2857 if (!(ptr = strchr(key, '=')))
2858 break;
2859 ptr++;
2860 key_len = ptr - key;
2861
2862 callback_get_buf(context, key, key_len, &dest, &dest_len);
2863 dest_end = dest + dest_len - 1;
2864
2865 if (*ptr == '\"') {
2866 ptr++;
2867 while (*ptr && *ptr != '\"') {
2868 if (*ptr == '\\') {
2869 if (!ptr[1])
2870 break;
2871 if (dest && dest < dest_end)
2872 *dest++ = ptr[1];
2873 ptr += 2;
2874 } else {
2875 if (dest && dest < dest_end)
2876 *dest++ = *ptr;
2877 ptr++;
2878 }
2879 }
2880 if (*ptr == '\"')
2881 ptr++;
2882 } else {
2883 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2884 if (dest && dest < dest_end)
2885 *dest++ = *ptr;
2886 }
2887 if (dest)
2888 *dest = 0;
2889 }
2890 }
2891
2892 int ff_find_stream_index(AVFormatContext *s, int id)
2893 {
2894 int i;
2895 for (i = 0; i < s->nb_streams; i++)
2896 if (s->streams[i]->id == id)
2897 return i;
2898 return -1;
2899 }
2900
2901 int64_t ff_iso8601_to_unix_time(const char *datestr)
2902 {
2903 struct tm time1 = { 0 }, time2 = { 0 };
2904 const char *ret1, *ret2;
2905 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
2906 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
2907 if (ret2 && !ret1)
2908 return av_timegm(&time2);
2909 else
2910 return av_timegm(&time1);
2911 }
2912
2913 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2914 int std_compliance)
2915 {
2916 if (ofmt) {
2917 if (ofmt->query_codec)
2918 return ofmt->query_codec(codec_id, std_compliance);
2919 else if (ofmt->codec_tag)
2920 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2921 else if (codec_id == ofmt->video_codec ||
2922 codec_id == ofmt->audio_codec ||
2923 codec_id == ofmt->subtitle_codec)
2924 return 1;
2925 }
2926 return AVERROR_PATCHWELCOME;
2927 }
2928
2929 int avformat_network_init(void)
2930 {
2931 #if CONFIG_NETWORK
2932 int ret;
2933 ff_network_inited_globally = 1;
2934 if ((ret = ff_network_init()) < 0)
2935 return ret;
2936 ff_tls_init();
2937 #endif
2938 return 0;
2939 }
2940
2941 int avformat_network_deinit(void)
2942 {
2943 #if CONFIG_NETWORK
2944 ff_network_close();
2945 ff_tls_deinit();
2946 #endif
2947 return 0;
2948 }
2949
2950 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2951 uint64_t channel_layout, int32_t sample_rate,
2952 int32_t width, int32_t height)
2953 {
2954 uint32_t flags = 0;
2955 int size = 4;
2956 uint8_t *data;
2957 if (!pkt)
2958 return AVERROR(EINVAL);
2959 if (channels) {
2960 size += 4;
2961 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2962 }
2963 if (channel_layout) {
2964 size += 8;
2965 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
2966 }
2967 if (sample_rate) {
2968 size += 4;
2969 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
2970 }
2971 if (width || height) {
2972 size += 8;
2973 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
2974 }
2975 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
2976 if (!data)
2977 return AVERROR(ENOMEM);
2978 bytestream_put_le32(&data, flags);
2979 if (channels)
2980 bytestream_put_le32(&data, channels);
2981 if (channel_layout)
2982 bytestream_put_le64(&data, channel_layout);
2983 if (sample_rate)
2984 bytestream_put_le32(&data, sample_rate);
2985 if (width || height) {
2986 bytestream_put_le32(&data, width);
2987 bytestream_put_le32(&data, height);
2988 }
2989 return 0;
2990 }
2991
2992 int ff_generate_avci_extradata(AVStream *st)
2993 {
2994 static const uint8_t avci100_1080p_extradata[] = {
2995 // SPS
2996 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
2997 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
2998 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
2999 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3000 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3001 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3002 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3003 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3004 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3005 // PPS
3006 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3007 0xd0
3008 };
3009 static const uint8_t avci100_1080i_extradata[] = {
3010 // SPS
3011 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3012 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3013 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3014 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3015 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3016 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3017 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3018 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3019 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3020 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3021 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3022 // PPS
3023 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3024 0xd0
3025 };
3026 static const uint8_t avci50_1080i_extradata[] = {
3027 // SPS
3028 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3029 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3030 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3031 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3032 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3033 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3034 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3035 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3036 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3037 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3038 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3039 // PPS
3040 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3041 0x11
3042 };
3043 static const uint8_t avci100_720p_extradata[] = {
3044 // SPS
3045 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3046 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3047 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3048 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3049 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3050 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3051 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3052 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3053 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3054 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3055 // PPS
3056 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3057 0x11
3058 };
3059
3060 const uint8_t *data = NULL;
3061 int size = 0;
3062
3063 if (st->codec->width == 1920) {
3064 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3065 data = avci100_1080p_extradata;
3066 size = sizeof(avci100_1080p_extradata);
3067 } else {
3068 data = avci100_1080i_extradata;
3069 size = sizeof(avci100_1080i_extradata);
3070 }
3071 } else if (st->codec->width == 1440) {
3072 data = avci50_1080i_extradata;
3073 size = sizeof(avci50_1080i_extradata);
3074 } else if (st->codec->width == 1280) {
3075 data = avci100_720p_extradata;
3076 size = sizeof(avci100_720p_extradata);
3077 }
3078
3079 if (!size)
3080 return 0;
3081
3082 av_freep(&st->codec->extradata);
3083 st->codec->extradata_size = 0;
3084 st->codec->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3085 if (!st->codec->extradata)
3086 return AVERROR(ENOMEM);
3087
3088 memcpy(st->codec->extradata, data, size);
3089 st->codec->extradata_size = size;
3090
3091 return 0;
3092 }
3093
3094 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3095 int *size)