5d4ec626de813fecb7f6d7b1823b1ad3dee8419c
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #undef NDEBUG
23 #include <assert.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26
27 #include "config.h"
28
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
38
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
41
42 #include "audiointerleave.h"
43 #include "avformat.h"
44 #include "id3v2.h"
45 #include "internal.h"
46 #include "metadata.h"
47 #if CONFIG_NETWORK
48 #include "network.h"
49 #endif
50 #include "riff.h"
51 #include "url.h"
52
53 /**
54 * @file
55 * various utility functions for use within Libav
56 */
57
58 unsigned avformat_version(void)
59 {
60 return LIBAVFORMAT_VERSION_INT;
61 }
62
63 const char *avformat_configuration(void)
64 {
65 return LIBAV_CONFIGURATION;
66 }
67
68 const char *avformat_license(void)
69 {
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
72 }
73
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
76
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
80 {
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
84 int ret = 0;
85
86 do {
87 int prev_size = pkt->size;
88 int read_size;
89
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
95 }
96 read_size = FFMIN(size, chunk_size);
97
98 ret = av_grow_packet(pkt, read_size);
99 if (ret < 0)
100 break;
101
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
105 break;
106 }
107
108 size -= read_size;
109 } while (size > 0);
110
111 pkt->pos = orig_pos;
112 if (!pkt->size)
113 av_free_packet(pkt);
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
115 }
116
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
118 {
119 av_init_packet(pkt);
120 pkt->data = NULL;
121 pkt->size = 0;
122 pkt->pos = avio_tell(s);
123
124 return append_packet_chunked(s, pkt, size);
125 }
126
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
128 {
129 if (!pkt->size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
132 }
133
134 int av_filename_number_test(const char *filename)
135 {
136 char buf[1024];
137 return filename &&
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
139 }
140
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
143 {
144 static const struct {
145 const char *name;
146 enum AVCodecID id;
147 enum AVMediaType type;
148 } fmt_id_type[] = {
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
158 { 0 }
159 };
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
161
162 if (fmt) {
163 int i;
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
167 fmt->name, score);
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
172 break;
173 }
174 }
175 }
176 return !!fmt;
177 }
178
179 /************************************************************/
180 /* input media file */
181
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
185 {
186 int ret;
187 AVProbeData pd = { filename, NULL, 0 };
188
189 if (s->pb) {
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
191 if (!s->iformat)
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
193 s, 0, s->probesize);
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
196 return 0;
197 }
198
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
201 return 0;
202
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
205 return ret;
206 if (s->iformat)
207 return 0;
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
209 s, 0, s->probesize);
210 }
211
212 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl)
214 {
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
216 if (!pktl)
217 return NULL;
218
219 if (*packet_buffer)
220 (*plast_pktl)->next = pktl;
221 else
222 *packet_buffer = pktl;
223
224 /* Add the packet in the buffered packet list. */
225 *plast_pktl = pktl;
226 pktl->pkt = *pkt;
227 return &pktl->pkt;
228 }
229
230 static int queue_attached_pictures(AVFormatContext *s)
231 {
232 int i;
233 for (i = 0; i < s->nb_streams; i++)
234 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
235 s->streams[i]->discard < AVDISCARD_ALL) {
236 AVPacket copy = s->streams[i]->attached_pic;
237 copy.buf = av_buffer_ref(copy.buf);
238 if (!copy.buf)
239 return AVERROR(ENOMEM);
240
241 add_to_pktbuf(&s->internal->raw_packet_buffer, &copy,
242 &s->internal->raw_packet_buffer_end);
243 }
244 return 0;
245 }
246
247 int avformat_open_input(AVFormatContext **ps, const char *filename,
248 AVInputFormat *fmt, AVDictionary **options)
249 {
250 AVFormatContext *s = *ps;
251 int ret = 0;
252 AVDictionary *tmp = NULL;
253 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
254
255 if (!s && !(s = avformat_alloc_context()))
256 return AVERROR(ENOMEM);
257 if (fmt)
258 s->iformat = fmt;
259
260 if (options)
261 av_dict_copy(&tmp, *options, 0);
262
263 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
264 goto fail;
265
266 if ((ret = init_input(s, filename, &tmp)) < 0)
267 goto fail;
268
269 /* Check filename in case an image number is expected. */
270 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
271 if (!av_filename_number_test(filename)) {
272 ret = AVERROR(EINVAL);
273 goto fail;
274 }
275 }
276
277 s->duration = s->start_time = AV_NOPTS_VALUE;
278 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
279
280 /* Allocate private data. */
281 if (s->iformat->priv_data_size > 0) {
282 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
283 ret = AVERROR(ENOMEM);
284 goto fail;
285 }
286 if (s->iformat->priv_class) {
287 *(const AVClass **) s->priv_data = s->iformat->priv_class;
288 av_opt_set_defaults(s->priv_data);
289 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
290 goto fail;
291 }
292 }
293
294 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
295 if (s->pb)
296 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
297
298 if (s->iformat->read_header)
299 if ((ret = s->iformat->read_header(s)) < 0)
300 goto fail;
301
302 if (id3v2_extra_meta &&
303 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
304 goto fail;
305 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
306
307 if ((ret = queue_attached_pictures(s)) < 0)
308 goto fail;
309
310 if (s->pb && !s->internal->data_offset)
311 s->internal->data_offset = avio_tell(s->pb);
312
313 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
314
315 if (options) {
316 av_dict_free(options);
317 *options = tmp;
318 }
319 *ps = s;
320 return 0;
321
322 fail:
323 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
324 av_dict_free(&tmp);
325 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
326 avio_close(s->pb);
327 avformat_free_context(s);
328 *ps = NULL;
329 return ret;
330 }
331
332 /*******************************************************/
333
334 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
335 {
336 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
337 AVProbeData *pd = &st->probe_data;
338 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
339 --st->probe_packets;
340
341 if (pkt) {
342 int err;
343 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
344 AVPROBE_PADDING_SIZE)) < 0)
345 return err;
346 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
347 pd->buf_size += pkt->size;
348 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
349 } else {
350 st->probe_packets = 0;
351 if (!pd->buf_size) {
352 av_log(s, AV_LOG_ERROR,
353 "nothing to probe for stream %d\n", st->index);
354 return 0;
355 }
356 }
357
358 if (!st->probe_packets ||
359 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
360 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
361 ? AVPROBE_SCORE_MAX / 4 : 0);
362 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
363 pd->buf_size = 0;
364 av_freep(&pd->buf);
365 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
366 }
367 }
368 }
369 return 0;
370 }
371
372 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
373 {
374 int ret, i, err;
375 AVStream *st;
376
377 for (;;) {
378 AVPacketList *pktl = s->internal->raw_packet_buffer;
379
380 if (pktl) {
381 *pkt = pktl->pkt;
382 st = s->streams[pkt->stream_index];
383 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
384 !st->probe_packets ||
385 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
386 AVProbeData *pd;
387 if (st->probe_packets)
388 if ((err = probe_codec(s, st, NULL)) < 0)
389 return err;
390 pd = &st->probe_data;
391 av_freep(&pd->buf);
392 pd->buf_size = 0;
393 s->internal->raw_packet_buffer = pktl->next;
394 s->internal->raw_packet_buffer_remaining_size += pkt->size;
395 av_free(pktl);
396 return 0;
397 }
398 }
399
400 pkt->data = NULL;
401 pkt->size = 0;
402 av_init_packet(pkt);
403 ret = s->iformat->read_packet(s, pkt);
404 if (ret < 0) {
405 if (!pktl || ret == AVERROR(EAGAIN))
406 return ret;
407 for (i = 0; i < s->nb_streams; i++) {
408 st = s->streams[i];
409 if (st->probe_packets)
410 if ((err = probe_codec(s, st, NULL)) < 0)
411 return err;
412 }
413 continue;
414 }
415
416 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
417 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
418 av_log(s, AV_LOG_WARNING,
419 "Dropped corrupted packet (stream = %d)\n",
420 pkt->stream_index);
421 av_free_packet(pkt);
422 continue;
423 }
424
425 st = s->streams[pkt->stream_index];
426
427 switch (st->codec->codec_type) {
428 case AVMEDIA_TYPE_VIDEO:
429 if (s->video_codec_id)
430 st->codec->codec_id = s->video_codec_id;
431 break;
432 case AVMEDIA_TYPE_AUDIO:
433 if (s->audio_codec_id)
434 st->codec->codec_id = s->audio_codec_id;
435 break;
436 case AVMEDIA_TYPE_SUBTITLE:
437 if (s->subtitle_codec_id)
438 st->codec->codec_id = s->subtitle_codec_id;
439 break;
440 }
441
442 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
443 !st->probe_packets))
444 return ret;
445
446 add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
447 &s->internal->raw_packet_buffer_end);
448 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
449
450 if ((err = probe_codec(s, st, pkt)) < 0)
451 return err;
452 }
453 }
454
455 /**********************************************************/
456
457 /**
458 * Return the frame duration in seconds. Return 0 if not available.
459 */
460 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
461 AVCodecParserContext *pc, AVPacket *pkt)
462 {
463 AVRational codec_framerate = s->iformat ? st->codec->framerate :
464 av_inv_q(st->codec->time_base);
465 int frame_size;
466
467 *pnum = 0;
468 *pden = 0;
469 switch (st->codec->codec_type) {
470 case AVMEDIA_TYPE_VIDEO:
471 if (st->avg_frame_rate.num) {
472 *pnum = st->avg_frame_rate.den;
473 *pden = st->avg_frame_rate.num;
474 } else if (st->time_base.num * 1000LL > st->time_base.den) {
475 *pnum = st->time_base.num;
476 *pden = st->time_base.den;
477 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
478 *pnum = codec_framerate.den;
479 *pden = codec_framerate.num;
480 if (pc && pc->repeat_pict) {
481 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
482 *pden /= 1 + pc->repeat_pict;
483 else
484 *pnum *= 1 + pc->repeat_pict;
485 }
486 /* If this codec can be interlaced or progressive then we need
487 * a parser to compute duration of a packet. Thus if we have
488 * no parser in such case leave duration undefined. */
489 if (st->codec->ticks_per_frame > 1 && !pc)
490 *pnum = *pden = 0;
491 }
492 break;
493 case AVMEDIA_TYPE_AUDIO:
494 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
495 if (frame_size <= 0 || st->codec->sample_rate <= 0)
496 break;
497 *pnum = frame_size;
498 *pden = st->codec->sample_rate;
499 break;
500 default:
501 break;
502 }
503 }
504
505 static int is_intra_only(enum AVCodecID id)
506 {
507 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
508 if (!d)
509 return 0;
510 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
511 return 0;
512 return 1;
513 }
514
515 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
516 int64_t dts, int64_t pts)
517 {
518 AVStream *st = s->streams[stream_index];
519 AVPacketList *pktl = s->internal->packet_buffer;
520
521 if (st->first_dts != AV_NOPTS_VALUE ||
522 dts == AV_NOPTS_VALUE ||
523 st->cur_dts == AV_NOPTS_VALUE)
524 return;
525
526 st->first_dts = dts - st->cur_dts;
527 st->cur_dts = dts;
528
529 for (; pktl; pktl = pktl->next) {
530 if (pktl->pkt.stream_index != stream_index)
531 continue;
532 // FIXME: think more about this check
533 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
534 pktl->pkt.pts += st->first_dts;
535
536 if (pktl->pkt.dts != AV_NOPTS_VALUE)
537 pktl->pkt.dts += st->first_dts;
538
539 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
540 st->start_time = pktl->pkt.pts;
541 }
542 if (st->start_time == AV_NOPTS_VALUE)
543 st->start_time = pts;
544 }
545
546 static void update_initial_durations(AVFormatContext *s, AVStream *st,
547 int stream_index, int duration)
548 {
549 AVPacketList *pktl = s->internal->packet_buffer;
550 int64_t cur_dts = 0;
551
552 if (st->first_dts != AV_NOPTS_VALUE) {
553 cur_dts = st->first_dts;
554 for (; pktl; pktl = pktl->next) {
555 if (pktl->pkt.stream_index == stream_index) {
556 if (pktl->pkt.pts != pktl->pkt.dts ||
557 pktl->pkt.dts != AV_NOPTS_VALUE ||
558 pktl->pkt.duration)
559 break;
560 cur_dts -= duration;
561 }
562 }
563 pktl = s->internal->packet_buffer;
564 st->first_dts = cur_dts;
565 } else if (st->cur_dts)
566 return;
567
568 for (; pktl; pktl = pktl->next) {
569 if (pktl->pkt.stream_index != stream_index)
570 continue;
571 if (pktl->pkt.pts == pktl->pkt.dts &&
572 pktl->pkt.dts == AV_NOPTS_VALUE &&
573 !pktl->pkt.duration) {
574 pktl->pkt.dts = cur_dts;
575 if (!st->codec->has_b_frames)
576 pktl->pkt.pts = cur_dts;
577 cur_dts += duration;
578 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
579 pktl->pkt.duration = duration;
580 } else
581 break;
582 }
583 if (st->first_dts == AV_NOPTS_VALUE)
584 st->cur_dts = cur_dts;
585 }
586
587 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
588 AVCodecParserContext *pc, AVPacket *pkt)
589 {
590 int num, den, presentation_delayed, delay, i;
591 int64_t offset;
592
593 if (s->flags & AVFMT_FLAG_NOFILLIN)
594 return;
595
596 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
597 pkt->dts = AV_NOPTS_VALUE;
598
599 /* do we have a video B-frame ? */
600 delay = st->codec->has_b_frames;
601 presentation_delayed = 0;
602
603 /* XXX: need has_b_frame, but cannot get it if the codec is
604 * not initialized */
605 if (delay &&
606 pc && pc->pict_type != AV_PICTURE_TYPE_B)
607 presentation_delayed = 1;
608
609 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
610 st->pts_wrap_bits < 63 &&
611 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
612 pkt->dts -= 1LL << st->pts_wrap_bits;
613 }
614
615 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
616 * We take the conservative approach and discard both.
617 * Note: If this is misbehaving for an H.264 file, then possibly
618 * presentation_delayed is not set correctly. */
619 if (delay == 1 && pkt->dts == pkt->pts &&
620 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
621 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
622 pkt->dts = AV_NOPTS_VALUE;
623 }
624
625 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
626 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
627 if (den && num) {
628 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
629 den * (int64_t) st->time_base.num,
630 AV_ROUND_DOWN);
631
632 if (pkt->duration != 0 && s->internal->packet_buffer)
633 update_initial_durations(s, st, pkt->stream_index,
634 pkt->duration);
635 }
636 }
637
638 /* Correct timestamps with byte offset if demuxers only have timestamps
639 * on packet boundaries */
640 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
641 /* this will estimate bitrate based on this frame's duration and size */
642 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
643 if (pkt->pts != AV_NOPTS_VALUE)
644 pkt->pts += offset;
645 if (pkt->dts != AV_NOPTS_VALUE)
646 pkt->dts += offset;
647 }
648
649 /* This may be redundant, but it should not hurt. */
650 if (pkt->dts != AV_NOPTS_VALUE &&
651 pkt->pts != AV_NOPTS_VALUE &&
652 pkt->pts > pkt->dts)
653 presentation_delayed = 1;
654
655 av_log(NULL, AV_LOG_TRACE,
656 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
657 "cur_dts:%"PRId64" st:%d pc:%p\n",
658 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
659 pkt->stream_index, pc);
660 /* Interpolate PTS and DTS if they are not present. We skip H.264
661 * currently because delay and has_b_frames are not reliably set. */
662 if ((delay == 0 || (delay == 1 && pc)) &&
663 st->codec->codec_id != AV_CODEC_ID_H264) {
664 if (presentation_delayed) {
665 /* DTS = decompression timestamp */
666 /* PTS = presentation timestamp */
667 if (pkt->dts == AV_NOPTS_VALUE)
668 pkt->dts = st->last_IP_pts;
669 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
670 if (pkt->dts == AV_NOPTS_VALUE)
671 pkt->dts = st->cur_dts;
672
673 /* This is tricky: the dts must be incremented by the duration
674 * of the frame we are displaying, i.e. the last I- or P-frame. */
675 if (st->last_IP_duration == 0)
676 st->last_IP_duration = pkt->duration;
677 if (pkt->dts != AV_NOPTS_VALUE)
678 st->cur_dts = pkt->dts + st->last_IP_duration;
679 st->last_IP_duration = pkt->duration;
680 st->last_IP_pts = pkt->pts;
681 /* Cannot compute PTS if not present (we can compute it only
682 * by knowing the future. */
683 } else if (pkt->pts != AV_NOPTS_VALUE ||
684 pkt->dts != AV_NOPTS_VALUE ||
685 pkt->duration ||
686 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
687 int duration = pkt->duration;
688 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
689 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
690 if (den && num) {
691 duration = av_rescale_rnd(1,
692 num * (int64_t) st->time_base.den,
693 den * (int64_t) st->time_base.num,
694 AV_ROUND_DOWN);
695 if (duration != 0 && s->internal->packet_buffer)
696 update_initial_durations(s, st, pkt->stream_index,
697 duration);
698 }
699 }
700
701 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
702 duration) {
703 /* presentation is not delayed : PTS and DTS are the same */
704 if (pkt->pts == AV_NOPTS_VALUE)
705 pkt->pts = pkt->dts;
706 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
707 pkt->pts);
708 if (pkt->pts == AV_NOPTS_VALUE)
709 pkt->pts = st->cur_dts;
710 pkt->dts = pkt->pts;
711 if (pkt->pts != AV_NOPTS_VALUE)
712 st->cur_dts = pkt->pts + duration;
713 }
714 }
715 }
716
717 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
718 st->pts_buffer[0] = pkt->pts;
719 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
720 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
721 if (pkt->dts == AV_NOPTS_VALUE)
722 pkt->dts = st->pts_buffer[0];
723 // We skipped it above so we try here.
724 if (st->codec->codec_id == AV_CODEC_ID_H264)
725 // This should happen on the first packet
726 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
727 if (pkt->dts > st->cur_dts)
728 st->cur_dts = pkt->dts;
729 }
730
731 av_log(NULL, AV_LOG_TRACE,
732 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
733 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
734
735 /* update flags */
736 if (is_intra_only(st->codec->codec_id))
737 pkt->flags |= AV_PKT_FLAG_KEY;
738 if (pc)
739 pkt->convergence_duration = pc->convergence_duration;
740 }
741
742 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
743 {
744 while (*pkt_buf) {
745 AVPacketList *pktl = *pkt_buf;
746 *pkt_buf = pktl->next;
747 av_free_packet(&pktl->pkt);
748 av_freep(&pktl);
749 }
750 *pkt_buf_end = NULL;
751 }
752
753 /**
754 * Parse a packet, add all split parts to parse_queue.
755 *
756 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
757 */
758 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
759 {
760 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
761 AVStream *st = s->streams[stream_index];
762 uint8_t *data = pkt ? pkt->data : NULL;
763 int size = pkt ? pkt->size : 0;
764 int ret = 0, got_output = 0;
765
766 if (!pkt) {
767 av_init_packet(&flush_pkt);
768 pkt = &flush_pkt;
769 got_output = 1;
770 }
771
772 while (size > 0 || (pkt == &flush_pkt && got_output)) {
773 int len;
774
775 av_init_packet(&out_pkt);
776 len = av_parser_parse2(st->parser, st->codec,
777 &out_pkt.data, &out_pkt.size, data, size,
778 pkt->pts, pkt->dts, pkt->pos);
779
780 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
781 /* increment read pointer */
782 data += len;
783 size -= len;
784
785 got_output = !!out_pkt.size;
786
787 if (!out_pkt.size)
788 continue;
789
790 if (pkt->side_data) {
791 out_pkt.side_data = pkt->side_data;
792 out_pkt.side_data_elems = pkt->side_data_elems;
793 pkt->side_data = NULL;
794 pkt->side_data_elems = 0;
795 }
796
797 /* set the duration */
798 out_pkt.duration = 0;
799 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
800 if (st->codec->sample_rate > 0) {
801 out_pkt.duration =
802 av_rescale_q_rnd(st->parser->duration,
803 (AVRational) { 1, st->codec->sample_rate },
804 st->time_base,
805 AV_ROUND_DOWN);
806 }
807 }
808
809 out_pkt.stream_index = st->index;
810 out_pkt.pts = st->parser->pts;
811 out_pkt.dts = st->parser->dts;
812 out_pkt.pos = st->parser->pos;
813
814 if (st->parser->key_frame == 1 ||
815 (st->parser->key_frame == -1 &&
816 st->parser->pict_type == AV_PICTURE_TYPE_I))
817 out_pkt.flags |= AV_PKT_FLAG_KEY;
818
819 compute_pkt_fields(s, st, st->parser, &out_pkt);
820
821 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
822 out_pkt.flags & AV_PKT_FLAG_KEY) {
823 ff_reduce_index(s, st->index);
824 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
825 0, 0, AVINDEX_KEYFRAME);
826 }
827
828 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
829 out_pkt.buf = pkt->buf;
830 pkt->buf = NULL;
831 }
832 if ((ret = av_dup_packet(&out_pkt)) < 0)
833 goto fail;
834
835 if (!add_to_pktbuf(&s->internal->parse_queue, &out_pkt, &s->internal->parse_queue_end)) {
836 av_free_packet(&out_pkt);
837 ret = AVERROR(ENOMEM);
838 goto fail;
839 }
840 }
841
842 /* end of the stream => close and free the parser */
843 if (pkt == &flush_pkt) {
844 av_parser_close(st->parser);
845 st->parser = NULL;
846 }
847
848 fail:
849 av_free_packet(pkt);
850 return ret;
851 }
852
853 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
854 AVPacketList **pkt_buffer_end,
855 AVPacket *pkt)
856 {
857 AVPacketList *pktl;
858 av_assert0(*pkt_buffer);
859 pktl = *pkt_buffer;
860 *pkt = pktl->pkt;
861 *pkt_buffer = pktl->next;
862 if (!pktl->next)
863 *pkt_buffer_end = NULL;
864 av_freep(&pktl);
865 return 0;
866 }
867
868 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
869 {
870 int ret = 0, i, got_packet = 0;
871 AVDictionary *metadata = NULL;
872
873 av_init_packet(pkt);
874
875 while (!got_packet && !s->internal->parse_queue) {
876 AVStream *st;
877 AVPacket cur_pkt;
878
879 /* read next packet */
880 ret = ff_read_packet(s, &cur_pkt);
881 if (ret < 0) {
882 if (ret == AVERROR(EAGAIN))
883 return ret;
884 /* flush the parsers */
885 for (i = 0; i < s->nb_streams; i++) {
886 st = s->streams[i];
887 if (st->parser && st->need_parsing)
888 parse_packet(s, NULL, st->index);
889 }
890 /* all remaining packets are now in parse_queue =>
891 * really terminate parsing */
892 break;
893 }
894 ret = 0;
895 st = s->streams[cur_pkt.stream_index];
896
897 if (cur_pkt.pts != AV_NOPTS_VALUE &&
898 cur_pkt.dts != AV_NOPTS_VALUE &&
899 cur_pkt.pts < cur_pkt.dts) {
900 av_log(s, AV_LOG_WARNING,
901 "Invalid timestamps stream=%d, pts=%"PRId64", "
902 "dts=%"PRId64", size=%d\n",
903 cur_pkt.stream_index, cur_pkt.pts,
904 cur_pkt.dts, cur_pkt.size);
905 }
906 if (s->debug & FF_FDEBUG_TS)
907 av_log(s, AV_LOG_DEBUG,
908 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
909 "size=%d, duration=%d, flags=%d\n",
910 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
911 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
912
913 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
914 st->parser = av_parser_init(st->codec->codec_id);
915 if (!st->parser)
916 /* no parser available: just output the raw packets */
917 st->need_parsing = AVSTREAM_PARSE_NONE;
918 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
919 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
920 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
921 st->parser->flags |= PARSER_FLAG_ONCE;
922 }
923
924 if (!st->need_parsing || !st->parser) {
925 /* no parsing needed: we just output the packet as is */
926 *pkt = cur_pkt;
927 compute_pkt_fields(s, st, NULL, pkt);
928 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
929 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
930 ff_reduce_index(s, st->index);
931 av_add_index_entry(st, pkt->pos, pkt->dts,
932 0, 0, AVINDEX_KEYFRAME);
933 }
934 got_packet = 1;
935 } else if (st->discard < AVDISCARD_ALL) {
936 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
937 return ret;
938 } else {
939 /* free packet */
940 av_free_packet(&cur_pkt);
941 }
942 }
943
944 if (!got_packet && s->internal->parse_queue)
945 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
946
947 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
948 if (metadata) {
949 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
950 av_dict_copy(&s->metadata, metadata, 0);
951 av_dict_free(&metadata);
952 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
953 }
954
955 if (s->debug & FF_FDEBUG_TS)
956 av_log(s, AV_LOG_DEBUG,
957 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
958 "size=%d, duration=%d, flags=%d\n",
959 pkt->stream_index, pkt->pts, pkt->dts,
960 pkt->size, pkt->duration, pkt->flags);
961
962 return ret;
963 }
964
965 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
966 {
967 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
968 int eof = 0;
969
970 if (!genpts)
971 return s->internal->packet_buffer
972 ? read_from_packet_buffer(&s->internal->packet_buffer,
973 &s->internal->packet_buffer_end, pkt)
974 : read_frame_internal(s, pkt);
975
976 for (;;) {
977 int ret;
978 AVPacketList *pktl = s->internal->packet_buffer;
979
980 if (pktl) {
981 AVPacket *next_pkt = &pktl->pkt;
982
983 if (next_pkt->dts != AV_NOPTS_VALUE) {
984 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
985 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
986 if (pktl->pkt.stream_index == next_pkt->stream_index &&
987 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
988 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
989 // not B-frame
990 next_pkt->pts = pktl->pkt.dts;
991 }
992 pktl = pktl->next;
993 }
994 pktl = s->internal->packet_buffer;
995 }
996
997 /* read packet from packet buffer, if there is data */
998 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
999 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1000 return read_from_packet_buffer(&s->internal->packet_buffer,
1001 &s->internal->packet_buffer_end, pkt);
1002 }
1003
1004 ret = read_frame_internal(s, pkt);
1005 if (ret < 0) {
1006 if (pktl && ret != AVERROR(EAGAIN)) {
1007 eof = 1;
1008 continue;
1009 } else
1010 return ret;
1011 }
1012
1013 if (av_dup_packet(add_to_pktbuf(&s->internal->packet_buffer, pkt,
1014 &s->internal->packet_buffer_end)) < 0)
1015 return AVERROR(ENOMEM);
1016 }
1017 }
1018
1019 /* XXX: suppress the packet queue */
1020 static void flush_packet_queue(AVFormatContext *s)
1021 {
1022 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1023 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1024 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1025
1026 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1027 }
1028
1029 /*******************************************************/
1030 /* seek support */
1031
1032 int av_find_default_stream_index(AVFormatContext *s)
1033 {
1034 int first_audio_index = -1;
1035 int i;
1036 AVStream *st;
1037
1038 if (s->nb_streams <= 0)
1039 return -1;
1040 for (i = 0; i < s->nb_streams; i++) {
1041 st = s->streams[i];
1042 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1043 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1044 return i;
1045 }
1046 if (first_audio_index < 0 &&
1047 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1048 first_audio_index = i;
1049 }
1050 return first_audio_index >= 0 ? first_audio_index : 0;
1051 }
1052
1053 /** Flush the frame reader. */
1054 void ff_read_frame_flush(AVFormatContext *s)
1055 {
1056 AVStream *st;
1057 int i, j;
1058
1059 flush_packet_queue(s);
1060
1061 /* Reset read state for each stream. */
1062 for (i = 0; i < s->nb_streams; i++) {
1063 st = s->streams[i];
1064
1065 if (st->parser) {
1066 av_parser_close(st->parser);
1067 st->parser = NULL;
1068 }
1069 st->last_IP_pts = AV_NOPTS_VALUE;
1070 /* We set the current DTS to an unspecified origin. */
1071 st->cur_dts = AV_NOPTS_VALUE;
1072
1073 st->probe_packets = MAX_PROBE_PACKETS;
1074
1075 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1076 st->pts_buffer[j] = AV_NOPTS_VALUE;
1077 }
1078 }
1079
1080 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1081 {
1082 int i;
1083
1084 for (i = 0; i < s->nb_streams; i++) {
1085 AVStream *st = s->streams[i];
1086
1087 st->cur_dts =
1088 av_rescale(timestamp,
1089 st->time_base.den * (int64_t) ref_st->time_base.num,
1090 st->time_base.num * (int64_t) ref_st->time_base.den);
1091 }
1092 }
1093
1094 void ff_reduce_index(AVFormatContext *s, int stream_index)
1095 {
1096 AVStream *st = s->streams[stream_index];
1097 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1098
1099 if ((unsigned) st->nb_index_entries >= max_entries) {
1100 int i;
1101 for (i = 0; 2 * i < st->nb_index_entries; i++)
1102 st->index_entries[i] = st->index_entries[2 * i];
1103 st->nb_index_entries = i;
1104 }
1105 }
1106
1107 int ff_add_index_entry(AVIndexEntry **index_entries,
1108 int *nb_index_entries,
1109 unsigned int *index_entries_allocated_size,
1110 int64_t pos, int64_t timestamp,
1111 int size, int distance, int flags)
1112 {
1113 AVIndexEntry *entries, *ie;
1114 int index;
1115
1116 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1117 return -1;
1118
1119 entries = av_fast_realloc(*index_entries,
1120 index_entries_allocated_size,
1121 (*nb_index_entries + 1) *
1122 sizeof(AVIndexEntry));
1123 if (!entries)
1124 return -1;
1125
1126 *index_entries = entries;
1127
1128 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1129 timestamp, AVSEEK_FLAG_ANY);
1130
1131 if (index < 0) {
1132 index = (*nb_index_entries)++;
1133 ie = &entries[index];
1134 assert(index == 0 || ie[-1].timestamp < timestamp);
1135 } else {
1136 ie = &entries[index];
1137 if (ie->timestamp != timestamp) {
1138 if (ie->timestamp <= timestamp)
1139 return -1;
1140 memmove(entries + index + 1, entries + index,
1141 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1142 (*nb_index_entries)++;
1143 } else if (ie->pos == pos && distance < ie->min_distance)
1144 // do not reduce the distance
1145 distance = ie->min_distance;
1146 }
1147
1148 ie->pos = pos;
1149 ie->timestamp = timestamp;
1150 ie->min_distance = distance;
1151 ie->size = size;
1152 ie->flags = flags;
1153
1154 return index;
1155 }
1156
1157 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1158 int size, int distance, int flags)
1159 {
1160 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1161 &st->index_entries_allocated_size, pos,
1162 timestamp, size, distance, flags);
1163 }
1164
1165 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1166 int64_t wanted_timestamp, int flags)
1167 {
1168 int a, b, m;
1169 int64_t timestamp;
1170
1171 a = -1;
1172 b = nb_entries;
1173
1174 // Optimize appending index entries at the end.
1175 if (b && entries[b - 1].timestamp < wanted_timestamp)
1176 a = b - 1;
1177
1178 while (b - a > 1) {
1179 m = (a + b) >> 1;
1180 timestamp = entries[m].timestamp;
1181 if (timestamp >= wanted_timestamp)
1182 b = m;
1183 if (timestamp <= wanted_timestamp)
1184 a = m;
1185 }
1186 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1187
1188 if (!(flags & AVSEEK_FLAG_ANY))
1189 while (m >= 0 && m < nb_entries &&
1190 !(entries[m].flags & AVINDEX_KEYFRAME))
1191 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1192
1193 if (m == nb_entries)
1194 return -1;
1195 return m;
1196 }
1197
1198 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1199 {
1200 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1201 wanted_timestamp, flags);
1202 }
1203
1204 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1205 int64_t target_ts, int flags)
1206 {
1207 AVInputFormat *avif = s->iformat;
1208 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1209 int64_t ts_min, ts_max, ts;
1210 int index;
1211 int64_t ret;
1212 AVStream *st;
1213
1214 if (stream_index < 0)
1215 return -1;
1216
1217 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1218
1219 ts_max =
1220 ts_min = AV_NOPTS_VALUE;
1221 pos_limit = -1; // GCC falsely says it may be uninitialized.
1222
1223 st = s->streams[stream_index];
1224 if (st->index_entries) {
1225 AVIndexEntry *e;
1226
1227 /* FIXME: Whole function must be checked for non-keyframe entries in
1228 * index case, especially read_timestamp(). */
1229 index = av_index_search_timestamp(st, target_ts,
1230 flags | AVSEEK_FLAG_BACKWARD);
1231 index = FFMAX(index, 0);
1232 e = &st->index_entries[index];
1233
1234 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1235 pos_min = e->pos;
1236 ts_min = e->timestamp;
1237 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1238 pos_min, ts_min);
1239 } else {
1240 assert(index == 0);
1241 }
1242
1243 index = av_index_search_timestamp(st, target_ts,
1244 flags & ~AVSEEK_FLAG_BACKWARD);
1245 assert(index < st->nb_index_entries);
1246 if (index >= 0) {
1247 e = &st->index_entries[index];
1248 assert(e->timestamp >= target_ts);
1249 pos_max = e->pos;
1250 ts_max = e->timestamp;
1251 pos_limit = pos_max - e->min_distance;
1252 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1253 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1254 }
1255 }
1256
1257 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1258 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1259 if (pos < 0)
1260 return -1;
1261
1262 /* do the seek */
1263 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1264 return ret;
1265
1266 ff_update_cur_dts(s, st, ts);
1267
1268 return 0;
1269 }
1270
1271 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1272 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1273 int64_t ts_min, int64_t ts_max,
1274 int flags, int64_t *ts_ret,
1275 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1276 int64_t *, int64_t))
1277 {
1278 int64_t pos, ts;
1279 int64_t start_pos, filesize;
1280 int no_change;
1281
1282 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1283
1284 if (ts_min == AV_NOPTS_VALUE) {
1285 pos_min = s->internal->data_offset;
1286 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1287 if (ts_min == AV_NOPTS_VALUE)
1288 return -1;
1289 }
1290
1291 if (ts_max == AV_NOPTS_VALUE) {
1292 int step = 1024;
1293 filesize = avio_size(s->pb);
1294 pos_max = filesize - 1;
1295 do {
1296 pos_max -= step;
1297 ts_max = read_timestamp(s, stream_index, &pos_max,
1298 pos_max + step);
1299 step += step;
1300 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1301 if (ts_max == AV_NOPTS_VALUE)
1302 return -1;
1303
1304 for (;;) {
1305 int64_t tmp_pos = pos_max + 1;
1306 int64_t tmp_ts = read_timestamp(s, stream_index,
1307 &tmp_pos, INT64_MAX);
1308 if (tmp_ts == AV_NOPTS_VALUE)
1309 break;
1310 ts_max = tmp_ts;
1311 pos_max = tmp_pos;
1312 if (tmp_pos >= filesize)
1313 break;
1314 }
1315 pos_limit = pos_max;
1316 }
1317
1318 if (ts_min > ts_max)
1319 return -1;
1320 else if (ts_min == ts_max)
1321 pos_limit = pos_min;
1322
1323 no_change = 0;
1324 while (pos_min < pos_limit) {
1325 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1326 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1327 assert(pos_limit <= pos_max);
1328
1329 if (no_change == 0) {
1330 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1331 // interpolate position (better than dichotomy)
1332 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1333 ts_max - ts_min) +
1334 pos_min - approximate_keyframe_distance;
1335 } else if (no_change == 1) {
1336 // bisection if interpolation did not change min / max pos last time
1337 pos = (pos_min + pos_limit) >> 1;
1338 } else {
1339 /* linear search if bisection failed, can only happen if there
1340 * are very few or no keyframes between min/max */
1341 pos = pos_min;
1342 }
1343 if (pos <= pos_min)
1344 pos = pos_min + 1;
1345 else if (pos > pos_limit)
1346 pos = pos_limit;
1347 start_pos = pos;
1348
1349 // May pass pos_limit instead of -1.
1350 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1351 if (pos == pos_max)
1352 no_change++;
1353 else
1354 no_change = 0;
1355 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1356 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1357 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1358 pos_limit, start_pos, no_change);
1359 if (ts == AV_NOPTS_VALUE) {
1360 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1361 return -1;
1362 }
1363 assert(ts != AV_NOPTS_VALUE);
1364 if (target_ts <= ts) {
1365 pos_limit = start_pos - 1;
1366 pos_max = pos;
1367 ts_max = ts;
1368 }
1369 if (target_ts >= ts) {
1370 pos_min = pos;
1371 ts_min = ts;
1372 }
1373 }
1374
1375 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1376 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1377 pos_min = pos;
1378 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1379 pos_min++;
1380 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1381 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1382 pos, ts_min, target_ts, ts_max);
1383 *ts_ret = ts;
1384 return pos;
1385 }
1386
1387 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1388 int64_t pos, int flags)
1389 {
1390 int64_t pos_min, pos_max;
1391
1392 pos_min = s->internal->data_offset;
1393 pos_max = avio_size(s->pb) - 1;
1394
1395 if (pos < pos_min)
1396 pos = pos_min;
1397 else if (pos > pos_max)
1398 pos = pos_max;
1399
1400 avio_seek(s->pb, pos, SEEK_SET);
1401
1402 return 0;
1403 }
1404
1405 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1406 int64_t timestamp, int flags)
1407 {
1408 int index;
1409 int64_t ret;
1410 AVStream *st;
1411 AVIndexEntry *ie;
1412
1413 st = s->streams[stream_index];
1414
1415 index = av_index_search_timestamp(st, timestamp, flags);
1416
1417 if (index < 0 && st->nb_index_entries &&
1418 timestamp < st->index_entries[0].timestamp)
1419 return -1;
1420
1421 if (index < 0 || index == st->nb_index_entries - 1) {
1422 AVPacket pkt;
1423
1424 if (st->nb_index_entries) {
1425 assert(st->index_entries);
1426 ie = &st->index_entries[st->nb_index_entries - 1];
1427 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1428 return ret;
1429 ff_update_cur_dts(s, st, ie->timestamp);
1430 } else {
1431 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1432 return ret;
1433 }
1434 for (;;) {
1435 int read_status;
1436 do {
1437 read_status = av_read_frame(s, &pkt);
1438 } while (read_status == AVERROR(EAGAIN));
1439 if (read_status < 0)
1440 break;
1441 av_free_packet(&pkt);
1442 if (stream_index == pkt.stream_index)
1443 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1444 break;
1445 }
1446 index = av_index_search_timestamp(st, timestamp, flags);
1447 }
1448 if (index < 0)
1449 return -1;
1450
1451 ff_read_frame_flush(s);
1452 if (s->iformat->read_seek)
1453 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1454 return 0;
1455 ie = &st->index_entries[index];
1456 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1457 return ret;
1458 ff_update_cur_dts(s, st, ie->timestamp);
1459
1460 return 0;
1461 }
1462
1463 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1464 int64_t timestamp, int flags)
1465 {
1466 int ret;
1467 AVStream *st;
1468
1469 if (flags & AVSEEK_FLAG_BYTE) {
1470 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1471 return -1;
1472 ff_read_frame_flush(s);
1473 return seek_frame_byte(s, stream_index, timestamp, flags);
1474 }
1475
1476 if (stream_index < 0) {
1477 stream_index = av_find_default_stream_index(s);
1478 if (stream_index < 0)
1479 return -1;
1480
1481 st = s->streams[stream_index];
1482 /* timestamp for default must be expressed in AV_TIME_BASE units */
1483 timestamp = av_rescale(timestamp, st->time_base.den,
1484 AV_TIME_BASE * (int64_t) st->time_base.num);
1485 }
1486
1487 /* first, we try the format specific seek */
1488 if (s->iformat->read_seek) {
1489 ff_read_frame_flush(s);
1490 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1491 } else
1492 ret = -1;
1493 if (ret >= 0)
1494 return 0;
1495
1496 if (s->iformat->read_timestamp &&
1497 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1498 ff_read_frame_flush(s);
1499 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1500 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1501 ff_read_frame_flush(s);
1502 return seek_frame_generic(s, stream_index, timestamp, flags);
1503 } else
1504 return -1;
1505 }
1506
1507 int av_seek_frame(AVFormatContext *s, int stream_index,
1508 int64_t timestamp, int flags)
1509 {
1510 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1511
1512 if (ret >= 0)
1513 ret = queue_attached_pictures(s);
1514
1515 return ret;
1516 }
1517
1518 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1519 int64_t ts, int64_t max_ts, int flags)
1520 {
1521 if (min_ts > ts || max_ts < ts)
1522 return -1;
1523
1524 if (s->iformat->read_seek2) {
1525 int ret;
1526 ff_read_frame_flush(s);
1527 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1528 ts, max_ts, flags);
1529
1530 if (ret >= 0)
1531 ret = queue_attached_pictures(s);
1532 return ret;
1533 }
1534
1535 if (s->iformat->read_timestamp) {
1536 // try to seek via read_timestamp()
1537 }
1538
1539 // Fall back on old API if new is not implemented but old is.
1540 // Note the old API has somewhat different semantics.
1541 if (s->iformat->read_seek || 1)
1542 return av_seek_frame(s, stream_index, ts,
1543 flags | ((uint64_t) ts - min_ts >
1544 (uint64_t) max_ts - ts
1545 ? AVSEEK_FLAG_BACKWARD : 0));
1546
1547 // try some generic seek like seek_frame_generic() but with new ts semantics
1548 }
1549
1550 /*******************************************************/
1551
1552 /**
1553 * Return TRUE if the stream has accurate duration in any stream.
1554 *
1555 * @return TRUE if the stream has accurate duration for at least one component.
1556 */
1557 static int has_duration(AVFormatContext *ic)
1558 {
1559 int i;
1560 AVStream *st;
1561
1562 for (i = 0; i < ic->nb_streams; i++) {
1563 st = ic->streams[i];
1564 if (st->duration != AV_NOPTS_VALUE)
1565 return 1;
1566 }
1567 if (ic->duration != AV_NOPTS_VALUE)
1568 return 1;
1569 return 0;
1570 }
1571
1572 /**
1573 * Estimate the stream timings from the one of each components.
1574 *
1575 * Also computes the global bitrate if possible.
1576 */
1577 static void update_stream_timings(AVFormatContext *ic)
1578 {
1579 int64_t start_time, start_time1, end_time, end_time1;
1580 int64_t duration, duration1, filesize;
1581 int i;
1582 AVStream *st;
1583
1584 start_time = INT64_MAX;
1585 end_time = INT64_MIN;
1586 duration = INT64_MIN;
1587 for (i = 0; i < ic->nb_streams; i++) {
1588 st = ic->streams[i];
1589 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1590 start_time1 = av_rescale_q(st->start_time, st->time_base,
1591 AV_TIME_BASE_Q);
1592 start_time = FFMIN(start_time, start_time1);
1593 if (st->duration != AV_NOPTS_VALUE) {
1594 end_time1 = start_time1 +
1595 av_rescale_q(st->duration, st->time_base,
1596 AV_TIME_BASE_Q);
1597 end_time = FFMAX(end_time, end_time1);
1598 }
1599 }
1600 if (st->duration != AV_NOPTS_VALUE) {
1601 duration1 = av_rescale_q(st->duration, st->time_base,
1602 AV_TIME_BASE_Q);
1603 duration = FFMAX(duration, duration1);
1604 }
1605 }
1606 if (start_time != INT64_MAX) {
1607 ic->start_time = start_time;
1608 if (end_time != INT64_MIN)
1609 duration = FFMAX(duration, end_time - start_time);
1610 }
1611 if (duration != INT64_MIN) {
1612 ic->duration = duration;
1613 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1614 /* compute the bitrate */
1615 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1616 (double) ic->duration;
1617 }
1618 }
1619
1620 static void fill_all_stream_timings(AVFormatContext *ic)
1621 {
1622 int i;
1623 AVStream *st;
1624
1625 update_stream_timings(ic);
1626 for (i = 0; i < ic->nb_streams; i++) {
1627 st = ic->streams[i];
1628 if (st->start_time == AV_NOPTS_VALUE) {
1629 if (ic->start_time != AV_NOPTS_VALUE)
1630 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1631 st->time_base);
1632 if (ic->duration != AV_NOPTS_VALUE)
1633 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1634 st->time_base);
1635 }
1636 }
1637 }
1638
1639 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1640 {
1641 int64_t filesize, duration;
1642 int i;
1643 AVStream *st;
1644
1645 /* if bit_rate is already set, we believe it */
1646 if (ic->bit_rate <= 0) {
1647 int bit_rate = 0;
1648 for (i = 0; i < ic->nb_streams; i++) {
1649 st = ic->streams[i];
1650 if (st->codec->bit_rate > 0) {
1651 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1652 bit_rate = 0;
1653 break;
1654 }
1655 bit_rate += st->codec->bit_rate;
1656 }
1657 }
1658 ic->bit_rate = bit_rate;
1659 }
1660
1661 /* if duration is already set, we believe it */
1662 if (ic->duration == AV_NOPTS_VALUE &&
1663 ic->bit_rate != 0) {
1664 filesize = ic->pb ? avio_size(ic->pb) : 0;
1665 if (filesize > 0) {
1666 for (i = 0; i < ic->nb_streams; i++) {
1667 st = ic->streams[i];
1668 duration = av_rescale(8 * filesize, st->time_base.den,
1669 ic->bit_rate *
1670 (int64_t) st->time_base.num);
1671 if (st->duration == AV_NOPTS_VALUE)
1672 st->duration = duration;
1673 }
1674 }
1675 }
1676 }
1677
1678 #define DURATION_MAX_READ_SIZE 250000
1679 #define DURATION_MAX_RETRY 3
1680
1681 /* only usable for MPEG-PS streams */
1682 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1683 {
1684 AVPacket pkt1, *pkt = &pkt1;
1685 AVStream *st;
1686 int read_size, i, ret;
1687 int64_t end_time;
1688 int64_t filesize, offset, duration;
1689 int retry = 0;
1690
1691 /* flush packet queue */
1692 flush_packet_queue(ic);
1693
1694 for (i = 0; i < ic->nb_streams; i++) {
1695 st = ic->streams[i];
1696 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1697 av_log(st->codec, AV_LOG_WARNING,
1698 "start time is not set in estimate_timings_from_pts\n");
1699
1700 if (st->parser) {
1701 av_parser_close(st->parser);
1702 st->parser = NULL;
1703 }
1704 }
1705
1706 /* estimate the end time (duration) */
1707 /* XXX: may need to support wrapping */
1708 filesize = ic->pb ? avio_size(ic->pb) : 0;
1709 end_time = AV_NOPTS_VALUE;
1710 do {
1711 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1712 if (offset < 0)
1713 offset = 0;
1714
1715 avio_seek(ic->pb, offset, SEEK_SET);
1716 read_size = 0;
1717 for (;;) {
1718 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1719 break;
1720
1721 do {
1722 ret = ff_read_packet(ic, pkt);
1723 } while (ret == AVERROR(EAGAIN));
1724 if (ret != 0)
1725 break;
1726 read_size += pkt->size;
1727 st = ic->streams[pkt->stream_index];
1728 if (pkt->pts != AV_NOPTS_VALUE &&
1729 (st->start_time != AV_NOPTS_VALUE ||
1730 st->first_dts != AV_NOPTS_VALUE)) {
1731 duration = end_time = pkt->pts;
1732 if (st->start_time != AV_NOPTS_VALUE)
1733 duration -= st->start_time;
1734 else
1735 duration -= st->first_dts;
1736 if (duration < 0)
1737 duration += 1LL << st->pts_wrap_bits;
1738 if (duration > 0) {
1739 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1740 st->duration = duration;
1741 }
1742 }
1743 av_free_packet(pkt);
1744 }
1745 } while (end_time == AV_NOPTS_VALUE &&
1746 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1747 ++retry <= DURATION_MAX_RETRY);
1748
1749 fill_all_stream_timings(ic);
1750
1751 avio_seek(ic->pb, old_offset, SEEK_SET);
1752 for (i = 0; i < ic->nb_streams; i++) {
1753 st = ic->streams[i];
1754 st->cur_dts = st->first_dts;
1755 st->last_IP_pts = AV_NOPTS_VALUE;
1756 }
1757 }
1758
1759 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1760 {
1761 int64_t file_size;
1762
1763 /* get the file size, if possible */
1764 if (ic->iformat->flags & AVFMT_NOFILE) {
1765 file_size = 0;
1766 } else {
1767 file_size = avio_size(ic->pb);
1768 file_size = FFMAX(0, file_size);
1769 }
1770
1771 if ((!strcmp(ic->iformat->name, "mpeg") ||
1772 !strcmp(ic->iformat->name, "mpegts")) &&
1773 file_size && ic->pb->seekable) {
1774 /* get accurate estimate from the PTSes */
1775 estimate_timings_from_pts(ic, old_offset);
1776 } else if (has_duration(ic)) {
1777 /* at least one component has timings - we use them for all
1778 * the components */
1779 fill_all_stream_timings(ic);
1780 } else {
1781 av_log(ic, AV_LOG_WARNING,
1782 "Estimating duration from bitrate, this may be inaccurate\n");
1783 /* less precise: use bitrate info */
1784 estimate_timings_from_bit_rate(ic);
1785 }
1786 update_stream_timings(ic);
1787
1788 {
1789 int i;
1790 AVStream av_unused *st;
1791 for (i = 0; i < ic->nb_streams; i++) {
1792 st = ic->streams[i];
1793 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1794 (double) st->start_time / AV_TIME_BASE,
1795 (double) st->duration / AV_TIME_BASE);
1796 }
1797 av_log(ic, AV_LOG_TRACE,
1798 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1799 (double) ic->start_time / AV_TIME_BASE,
1800 (double) ic->duration / AV_TIME_BASE,
1801 ic->bit_rate / 1000);
1802 }
1803 }
1804
1805 static int has_codec_parameters(AVStream *st)
1806 {
1807 AVCodecContext *avctx = st->codec;
1808 int val;
1809
1810 switch (avctx->codec_type) {
1811 case AVMEDIA_TYPE_AUDIO:
1812 val = avctx->sample_rate && avctx->channels;
1813 if (st->info->found_decoder >= 0 &&
1814 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1815 return 0;
1816 break;
1817 case AVMEDIA_TYPE_VIDEO:
1818 val = avctx->width;
1819 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1820 return 0;
1821 break;
1822 default:
1823 val = 1;
1824 break;
1825 }
1826 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1827 }
1828
1829 static int has_decode_delay_been_guessed(AVStream *st)
1830 {
1831 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1832 st->info->nb_decoded_frames >= 6;
1833 }
1834
1835 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1836 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1837 AVDictionary **options)
1838 {
1839 const AVCodec *codec;
1840 int got_picture = 1, ret = 0;
1841 AVFrame *frame = av_frame_alloc();
1842 AVPacket pkt = *avpkt;
1843
1844 if (!frame)
1845 return AVERROR(ENOMEM);
1846
1847 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1848 AVDictionary *thread_opt = NULL;
1849
1850 codec = st->codec->codec ? st->codec->codec
1851 : avcodec_find_decoder(st->codec->codec_id);
1852
1853 if (!codec) {
1854 st->info->found_decoder = -1;
1855 ret = -1;
1856 goto fail;
1857 }
1858
1859 /* Force thread count to 1 since the H.264 decoder will not extract
1860 * SPS and PPS to extradata during multi-threaded decoding. */
1861 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1862 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1863 if (!options)
1864 av_dict_free(&thread_opt);
1865 if (ret < 0) {
1866 st->info->found_decoder = -1;
1867 goto fail;
1868 }
1869 st->info->found_decoder = 1;
1870 } else if (!st->info->found_decoder)
1871 st->info->found_decoder = 1;
1872
1873 if (st->info->found_decoder < 0) {
1874 ret = -1;
1875 goto fail;
1876 }
1877
1878 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1879 ret >= 0 &&
1880 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1881 (!st->codec_info_nb_frames &&
1882 (st->codec->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1883 got_picture = 0;
1884 switch (st->codec->codec_type) {
1885 case AVMEDIA_TYPE_VIDEO:
1886 ret = avcodec_decode_video2(st->codec, frame,
1887 &got_picture, &pkt);
1888 break;
1889 case AVMEDIA_TYPE_AUDIO:
1890 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1891 break;
1892 default:
1893 break;
1894 }
1895 if (ret >= 0) {
1896 if (got_picture)
1897 st->info->nb_decoded_frames++;
1898 pkt.data += ret;
1899 pkt.size -= ret;
1900 ret = got_picture;
1901 }
1902 }
1903
1904 fail:
1905 av_frame_free(&frame);
1906 return ret;
1907 }
1908
1909 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1910 {
1911 while (tags->id != AV_CODEC_ID_NONE) {
1912 if (tags->id == id)
1913 return tags->tag;
1914 tags++;
1915 }
1916 return 0;
1917 }
1918
1919 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1920 {
1921 int i;
1922 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1923 if (tag == tags[i].tag)
1924 return tags[i].id;
1925 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1926 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1927 return tags[i].id;
1928 return AV_CODEC_ID_NONE;
1929 }
1930
1931 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1932 {
1933 if (flt) {
1934 switch (bps) {
1935 case 32:
1936 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1937 case 64:
1938 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1939 default:
1940 return AV_CODEC_ID_NONE;
1941 }
1942 } else {
1943 bps >>= 3;
1944 if (sflags & (1 << (bps - 1))) {
1945 switch (bps) {
1946 case 1:
1947 return AV_CODEC_ID_PCM_S8;
1948 case 2:
1949 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1950 case 3:
1951 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1952 case 4:
1953 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1954 default:
1955 return AV_CODEC_ID_NONE;
1956 }
1957 } else {
1958 switch (bps) {
1959 case 1:
1960 return AV_CODEC_ID_PCM_U8;
1961 case 2:
1962 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1963 case 3:
1964 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1965 case 4:
1966 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1967 default:
1968 return AV_CODEC_ID_NONE;
1969 }
1970 }
1971 }
1972 }
1973
1974 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1975 {
1976 int i;
1977 for (i = 0; tags && tags[i]; i++) {
1978 int tag = ff_codec_get_tag(tags[i], id);
1979 if (tag)
1980 return tag;
1981 }
1982 return 0;
1983 }
1984
1985 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
1986 {
1987 int i;
1988 for (i = 0; tags && tags[i]; i++) {
1989 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
1990 if (id != AV_CODEC_ID_NONE)
1991 return id;
1992 }
1993 return AV_CODEC_ID_NONE;
1994 }
1995
1996 static void compute_chapters_end(AVFormatContext *s)
1997 {
1998 unsigned int i, j;
1999 int64_t max_time = s->duration +
2000 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2001
2002 for (i = 0; i < s->nb_chapters; i++)
2003 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2004 AVChapter *ch = s->chapters[i];
2005 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2006 ch->time_base)
2007 : INT64_MAX;
2008
2009 for (j = 0; j < s->nb_chapters; j++) {
2010 AVChapter *ch1 = s->chapters[j];
2011 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2012 ch->time_base);
2013 if (j != i && next_start > ch->start && next_start < end)
2014 end = next_start;
2015 }
2016 ch->end = (end == INT64_MAX) ? ch->start : end;
2017 }
2018 }
2019
2020 static int get_std_framerate(int i)
2021 {
2022 if (i < 60 * 12)
2023 return (i + 1) * 1001;
2024 else
2025 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2026 }
2027
2028 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2029 {
2030 int i, count, ret, read_size, j;
2031 AVStream *st;
2032 AVPacket pkt1, *pkt;
2033 int64_t old_offset = avio_tell(ic->pb);
2034 // new streams might appear, no options for those
2035 int orig_nb_streams = ic->nb_streams;
2036
2037 for (i = 0; i < ic->nb_streams; i++) {
2038 const AVCodec *codec;
2039 AVDictionary *thread_opt = NULL;
2040 st = ic->streams[i];
2041
2042 // only for the split stuff
2043 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2044 st->parser = av_parser_init(st->codec->codec_id);
2045 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2046 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2047 }
2048 codec = st->codec->codec ? st->codec->codec
2049 : avcodec_find_decoder(st->codec->codec_id);
2050
2051 /* Force thread count to 1 since the H.264 decoder will not extract
2052 * SPS and PPS to extradata during multi-threaded decoding. */
2053 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2054
2055 /* Ensure that subtitle_header is properly set. */
2056 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2057 && codec && !st->codec->codec)
2058 avcodec_open2(st->codec, codec,
2059 options ? &options[i] : &thread_opt);
2060
2061 // Try to just open decoders, in case this is enough to get parameters.
2062 if (!has_codec_parameters(st)) {
2063 if (codec && !st->codec->codec)
2064 avcodec_open2(st->codec, codec,
2065 options ? &options[i] : &thread_opt);
2066 }
2067 if (!options)
2068 av_dict_free(&thread_opt);
2069 }
2070
2071 for (i = 0; i < ic->nb_streams; i++) {
2072 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2073 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2074 }
2075
2076 count = 0;
2077 read_size = 0;
2078 for (;;) {
2079 if (ff_check_interrupt(&ic->interrupt_callback)) {
2080 ret = AVERROR_EXIT;
2081 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2082 break;
2083 }
2084
2085 /* check if one codec still needs to be handled */
2086 for (i = 0; i < ic->nb_streams; i++) {
2087 int fps_analyze_framecount = 20;
2088
2089 st = ic->streams[i];
2090 if (!has_codec_parameters(st))
2091 break;
2092 /* If the timebase is coarse (like the usual millisecond precision
2093 * of mkv), we need to analyze more frames to reliably arrive at
2094 * the correct fps. */
2095 if (av_q2d(st->time_base) > 0.0005)
2096 fps_analyze_framecount *= 2;
2097 if (ic->fps_probe_size >= 0)
2098 fps_analyze_framecount = ic->fps_probe_size;
2099 /* variable fps and no guess at the real fps */
2100 if (!st->avg_frame_rate.num &&
2101 st->codec_info_nb_frames < fps_analyze_framecount &&
2102 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2103 break;
2104 if (st->parser && st->parser->parser->split &&
2105 !st->codec->extradata)
2106 break;
2107 if (st->first_dts == AV_NOPTS_VALUE &&
2108 st->codec_info_nb_frames < ic->max_ts_probe &&
2109 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2110 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2111 break;
2112 }
2113 if (i == ic->nb_streams) {
2114 /* NOTE: If the format has no header, then we need to read some
2115 * packets to get most of the streams, so we cannot stop here. */
2116 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2117 /* If we found the info for all the codecs, we can stop. */
2118 ret = count;
2119 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2120 break;
2121 }
2122 }
2123 /* We did not get all the codec info, but we read too much data. */
2124 if (read_size >= ic->probesize) {
2125 ret = count;
2126 av_log(ic, AV_LOG_DEBUG,
2127 "Probe buffer size limit %d reached\n", ic->probesize);
2128 break;
2129 }
2130
2131 /* NOTE: A new stream can be added there if no header in file
2132 * (AVFMTCTX_NOHEADER). */
2133 ret = read_frame_internal(ic, &pkt1);
2134 if (ret == AVERROR(EAGAIN))
2135 continue;
2136
2137 if (ret < 0) {
2138 /* EOF or error*/
2139 AVPacket empty_pkt = { 0 };
2140 int err = 0;
2141 av_init_packet(&empty_pkt);
2142
2143 /* We could not have all the codec parameters before EOF. */
2144 ret = -1;
2145 for (i = 0; i < ic->nb_streams; i++) {
2146 st = ic->streams[i];
2147
2148 /* flush the decoders */
2149 if (st->info->found_decoder == 1) {
2150 do {
2151 err = try_decode_frame(st, &empty_pkt,
2152 (options && i < orig_nb_streams)
2153 ? &options[i] : NULL);
2154 } while (err > 0 && !has_codec_parameters(st));
2155 }
2156
2157 if (err < 0) {
2158 av_log(ic, AV_LOG_WARNING,
2159 "decoding for stream %d failed\n", st->index);
2160 } else if (!has_codec_parameters(st)) {
2161 char buf[256];
2162 avcodec_string(buf, sizeof(buf), st->codec, 0);
2163 av_log(ic, AV_LOG_WARNING,
2164 "Could not find codec parameters (%s)\n", buf);
2165 } else {
2166 ret = 0;
2167 }
2168 }
2169 break;
2170 }
2171
2172 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2173 pkt = &pkt1;
2174 } else {
2175 pkt = add_to_pktbuf(&ic->internal->packet_buffer, &pkt1,
2176 &ic->internal->packet_buffer_end);
2177 if ((ret = av_dup_packet(pkt)) < 0)
2178 goto find_stream_info_err;
2179 }
2180
2181 read_size += pkt->size;
2182
2183 st = ic->streams[pkt->stream_index];
2184 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2185 /* check for non-increasing dts */
2186 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2187 st->info->fps_last_dts >= pkt->dts) {
2188 av_log(ic, AV_LOG_WARNING,
2189 "Non-increasing DTS in stream %d: packet %d with DTS "
2190 "%"PRId64", packet %d with DTS %"PRId64"\n",
2191 st->index, st->info->fps_last_dts_idx,
2192 st->info->fps_last_dts, st->codec_info_nb_frames,
2193 pkt->dts);
2194 st->info->fps_first_dts =
2195 st->info->fps_last_dts = AV_NOPTS_VALUE;
2196 }
2197 /* Check for a discontinuity in dts. If the difference in dts
2198 * is more than 1000 times the average packet duration in the
2199 * sequence, we treat it as a discontinuity. */
2200 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2201 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2202 (pkt->dts - st->info->fps_last_dts) / 1000 >
2203 (st->info->fps_last_dts - st->info->fps_first_dts) /
2204 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2205 av_log(ic, AV_LOG_WARNING,
2206 "DTS discontinuity in stream %d: packet %d with DTS "
2207 "%"PRId64", packet %d with DTS %"PRId64"\n",
2208 st->index, st->info->fps_last_dts_idx,
2209 st->info->fps_last_dts, st->codec_info_nb_frames,
2210 pkt->dts);
2211 st->info->fps_first_dts =
2212 st->info->fps_last_dts = AV_NOPTS_VALUE;
2213 }
2214
2215 /* update stored dts values */
2216 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2217 st->info->fps_first_dts = pkt->dts;
2218 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2219 }
2220 st->info->fps_last_dts = pkt->dts;
2221 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2222
2223 /* check max_analyze_duration */
2224 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2225 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2226 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2227 ic->max_analyze_duration);
2228 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2229 av_packet_unref(pkt);
2230 break;
2231 }
2232 }
2233 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2234 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2235 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2236 st->codec->extradata_size = i;
2237 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2238 AV_INPUT_BUFFER_PADDING_SIZE);
2239 if (!st->codec->extradata)
2240 return AVERROR(ENOMEM);
2241 memcpy(st->codec->extradata, pkt->data,
2242 st->codec->extradata_size);
2243 }
2244 }
2245
2246 /* If still no information, we try to open the codec and to
2247 * decompress the frame. We try to avoid that in most cases as
2248 * it takes longer and uses more memory. For MPEG-4, we need to
2249 * decompress for QuickTime.
2250 *
2251 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2252 * least one frame of codec data, this makes sure the codec initializes
2253 * the channel configuration and does not only trust the values from
2254 * the container. */
2255 try_decode_frame(st, pkt,
2256 (options && i < orig_nb_streams) ? &options[i] : NULL);
2257
2258 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2259 av_packet_unref(pkt);
2260
2261 st->codec_info_nb_frames++;
2262 count++;
2263 }
2264
2265 // close codecs which were opened in try_decode_frame()
2266 for (i = 0; i < ic->nb_streams; i++) {
2267 st = ic->streams[i];
2268 avcodec_close(st->codec);
2269 }
2270 for (i = 0; i < ic->nb_streams; i++) {
2271 st = ic->streams[i];
2272 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2273 /* estimate average framerate if not set by demuxer */
2274 if (!st->avg_frame_rate.num &&
2275 st->info->fps_last_dts != st->info->fps_first_dts) {
2276 int64_t delta_dts = st->info->fps_last_dts -
2277 st->info->fps_first_dts;
2278 int delta_packets = st->info->fps_last_dts_idx -
2279 st->info->fps_first_dts_idx;
2280 int best_fps = 0;
2281 double best_error = 0.01;
2282
2283 if (delta_dts >= INT64_MAX / st->time_base.num ||
2284 delta_packets >= INT64_MAX / st->time_base.den ||
2285 delta_dts < 0)
2286 continue;
2287 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2288 delta_packets * (int64_t) st->time_base.den,
2289 delta_dts * (int64_t) st->time_base.num, 60000);
2290
2291 /* Round guessed framerate to a "standard" framerate if it's
2292 * within 1% of the original estimate. */
2293 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2294 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2295 double error = fabs(av_q2d(st->avg_frame_rate) /
2296 av_q2d(std_fps) - 1);
2297
2298 if (error < best_error) {
2299 best_error = error;
2300 best_fps = std_fps.num;
2301 }
2302 }
2303 if (best_fps)
2304 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2305 best_fps, 12 * 1001, INT_MAX);
2306 }
2307 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2308 if (!st->codec->bits_per_coded_sample)
2309 st->codec->bits_per_coded_sample =
2310 av_get_bits_per_sample(st->codec->codec_id);
2311 // set stream disposition based on audio service type
2312 switch (st->codec->audio_service_type) {
2313 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2314 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2315 break;
2316 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2317 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2318 break;
2319 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2320 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2321 break;
2322 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2323 st->disposition = AV_DISPOSITION_COMMENT;
2324 break;
2325 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2326 st->disposition = AV_DISPOSITION_KARAOKE;
2327 break;
2328 }
2329 }
2330 }
2331
2332 estimate_timings(ic, old_offset);
2333
2334 compute_chapters_end(ic);
2335
2336 find_stream_info_err:
2337 for (i = 0; i < ic->nb_streams; i++) {
2338 ic->streams[i]->codec->thread_count = 0;
2339 av_freep(&ic->streams[i]->info);
2340 }
2341 return ret;
2342 }
2343
2344 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2345 {
2346 int i, j;
2347
2348 for (i = 0; i < ic->nb_programs; i++)
2349 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2350 if (ic->programs[i]->stream_index[j] == s)
2351 return ic->programs[i];
2352 return NULL;
2353 }
2354
2355 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2356 int wanted_stream_nb, int related_stream,
2357 AVCodec **decoder_ret, int flags)
2358 {
2359 int i, nb_streams = ic->nb_streams;
2360 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2361 unsigned *program = NULL;
2362 AVCodec *decoder = NULL, *best_decoder = NULL;
2363
2364 if (related_stream >= 0 && wanted_stream_nb < 0) {
2365 AVProgram *p = find_program_from_stream(ic, related_stream);
2366 if (p) {
2367 program = p->stream_index;
2368 nb_streams = p->nb_stream_indexes;
2369 }
2370 }
2371 for (i = 0; i < nb_streams; i++) {
2372 int real_stream_index = program ? program[i] : i;
2373 AVStream *st = ic->streams[real_stream_index];
2374 AVCodecContext *avctx = st->codec;
2375 if (avctx->codec_type != type)
2376 continue;
2377 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2378 continue;
2379 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2380 AV_DISPOSITION_VISUAL_IMPAIRED))
2381 continue;
2382 if (decoder_ret) {
2383 decoder = avcodec_find_decoder(st->codec->codec_id);
2384 if (!decoder) {
2385 if (ret < 0)
2386 ret = AVERROR_DECODER_NOT_FOUND;
2387 continue;
2388 }
2389 }
2390 if (best_count >= st->codec_info_nb_frames)
2391 continue;
2392 best_count = st->codec_info_nb_frames;
2393 ret = real_stream_index;
2394 best_decoder = decoder;
2395 if (program && i == nb_streams - 1 && ret < 0) {
2396 program = NULL;
2397 nb_streams = ic->nb_streams;
2398 /* no related stream found, try again with everything */
2399 i = 0;
2400 }
2401 }
2402 if (decoder_ret)
2403 *decoder_ret = best_decoder;
2404 return ret;
2405 }
2406
2407 /*******************************************************/
2408
2409 int av_read_play(AVFormatContext *s)
2410 {
2411 if (s->iformat->read_play)
2412 return s->iformat->read_play(s);
2413 if (s->pb)
2414 return avio_pause(s->pb, 0);
2415 return AVERROR(ENOSYS);
2416 }
2417
2418 int av_read_pause(AVFormatContext *s)
2419 {
2420 if (s->iformat->read_pause)
2421 return s->iformat->read_pause(s);
2422 if (s->pb)
2423 return avio_pause(s->pb, 1);
2424 return AVERROR(ENOSYS);
2425 }
2426
2427 void avformat_free_context(AVFormatContext *s)
2428 {
2429 int i, j;
2430 AVStream *st;
2431
2432 if (!s)
2433 return;
2434
2435 av_opt_free(s);
2436 if (s->iformat && s->iformat->priv_class && s->priv_data)
2437 av_opt_free(s->priv_data);
2438
2439 for (i = 0; i < s->nb_streams; i++) {
2440 /* free all data in a stream component */
2441 st = s->streams[i];
2442
2443 for (j = 0; j < st->nb_side_data; j++)
2444 av_freep(&st->side_data[j].data);
2445 av_freep(&st->side_data);
2446 st->nb_side_data = 0;
2447
2448 if (st->parser) {
2449 av_parser_close(st->parser);
2450 }
2451 if (st->attached_pic.data)
2452 av_free_packet(&st->attached_pic);
2453 av_dict_free(&st->metadata);
2454 av_freep(&st->probe_data.buf);
2455 av_free(st->index_entries);
2456 av_free(st->codec->extradata);
2457 av_free(st->codec->subtitle_header);
2458 av_free(st->codec);
2459 av_free(st->priv_data);
2460 av_free(st->info);
2461 av_free(st);
2462 }
2463 for (i = s->nb_programs - 1; i >= 0; i--) {
2464 av_dict_free(&s->programs[i]->metadata);
2465 av_freep(&s->programs[i]->stream_index);
2466 av_freep(&s->programs[i]);
2467 }
2468 av_freep(&s->programs);
2469 av_freep(&s->priv_data);
2470 while (s->nb_chapters--) {
2471 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2472 av_free(s->chapters[s->nb_chapters]);
2473 }
2474 av_freep(&s->chapters);
2475 av_dict_free(&s->metadata);
2476 av_freep(&s->streams);
2477 av_freep(&s->internal);
2478 av_free(s);
2479 }
2480
2481 void avformat_close_input(AVFormatContext **ps)
2482 {
2483 AVFormatContext *s = *ps;
2484 AVIOContext *pb = s->pb;
2485
2486 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2487 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2488 pb = NULL;
2489
2490 flush_packet_queue(s);
2491
2492 if (s->iformat)
2493 if (s->iformat->read_close)
2494 s->iformat->read_close(s);
2495
2496 avformat_free_context(s);
2497
2498 *ps = NULL;
2499
2500 avio_close(pb);
2501 }
2502
2503 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2504 {
2505 AVStream *st;
2506 int i;
2507
2508 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2509 sizeof(*s->streams)) < 0) {
2510 s->nb_streams = 0;
2511 return NULL;
2512 }
2513
2514 st = av_mallocz(sizeof(AVStream));
2515 if (!st)
2516 return NULL;
2517 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2518 av_free(st);
2519 return NULL;
2520 }
2521
2522 st->codec = avcodec_alloc_context3(c);
2523 if (!st->codec) {
2524 av_free(st->info);
2525 av_free(st);
2526 return NULL;
2527 }
2528 if (s->iformat) {
2529 /* no default bitrate if decoding */
2530 st->codec->bit_rate = 0;
2531
2532 /* default pts setting is MPEG-like */
2533 avpriv_set_pts_info(st, 33, 1, 90000);
2534 }
2535
2536 st->index = s->nb_streams;
2537 st->start_time = AV_NOPTS_VALUE;
2538 st->duration = AV_NOPTS_VALUE;
2539 /* we set the current DTS to 0 so that formats without any timestamps
2540 * but durations get some timestamps, formats with some unknown
2541 * timestamps have their first few packets buffered and the
2542 * timestamps corrected before they are returned to the user */
2543 st->cur_dts = 0;
2544 st->first_dts = AV_NOPTS_VALUE;
2545 st->probe_packets = MAX_PROBE_PACKETS;
2546
2547 st->last_IP_pts = AV_NOPTS_VALUE;
2548 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2549 st->pts_buffer[i] = AV_NOPTS_VALUE;
2550
2551 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2552
2553 st->info->fps_first_dts = AV_NOPTS_VALUE;
2554 st->info->fps_last_dts = AV_NOPTS_VALUE;
2555
2556 s->streams[s->nb_streams++] = st;
2557 return st;
2558 }
2559
2560 AVProgram *av_new_program(AVFormatContext *ac, int id)
2561 {
2562 AVProgram *program = NULL;
2563 int i;
2564
2565 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2566
2567 for (i = 0; i < ac->nb_programs; i++)
2568 if (ac->programs[i]->id == id)
2569 program = ac->programs[i];
2570
2571 if (!program) {
2572 program = av_mallocz(sizeof(AVProgram));
2573 if (!program)
2574 return NULL;
2575 dynarray_add(&ac->programs, &ac->nb_programs, program);
2576 program->discard = AVDISCARD_NONE;
2577 }
2578 program->id = id;
2579
2580 return program;
2581 }
2582
2583 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2584 int64_t start, int64_t end, const char *title)
2585 {
2586 AVChapter *chapter = NULL;
2587 int i;
2588
2589 for (i = 0; i < s->nb_chapters; i++)
2590 if (s->chapters[i]->id == id)
2591 chapter = s->chapters[i];
2592
2593 if (!chapter) {
2594 chapter = av_mallocz(sizeof(AVChapter));
2595 if (!chapter)
2596 return NULL;
2597 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2598 }
2599 av_dict_set(&chapter->metadata, "title", title, 0);
2600 chapter->id = id;
2601 chapter->time_base = time_base;
2602 chapter->start = start;
2603 chapter->end = end;
2604
2605 return chapter;
2606 }
2607
2608 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2609 {
2610 int i, j;
2611 AVProgram *program = NULL;
2612
2613 if (idx >= ac->nb_streams) {
2614 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2615 return;
2616 }
2617
2618 for (i = 0; i < ac->nb_programs; i++) {
2619 if (ac->programs[i]->id != progid)
2620 continue;
2621 program = ac->programs[i];
2622 for (j = 0; j < program->nb_stream_indexes; j++)
2623 if (program->stream_index[j] == idx)
2624 return;
2625
2626 if (av_reallocp_array(&program->stream_index,
2627 program->nb_stream_indexes + 1,
2628 sizeof(*program->stream_index)) < 0) {
2629 program->nb_stream_indexes = 0;
2630 return;
2631 }
2632 program->stream_index[program->nb_stream_indexes++] = idx;
2633 return;
2634 }
2635 }
2636
2637 uint64_t ff_ntp_time(void)
2638 {
2639 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2640 }
2641
2642 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2643 {
2644 const char *p;
2645 char *q, buf1[20], c;
2646 int nd, len, percentd_found;
2647
2648 q = buf;
2649 p = path;
2650 percentd_found = 0;
2651 for (;;) {
2652 c = *p++;
2653 if (c == '\0')
2654 break;
2655 if (c == '%') {
2656 do {
2657 nd = 0;
2658 while (av_isdigit(*p))
2659 nd = nd * 10 + *p++ - '0';
2660 c = *p++;
2661 } while (av_isdigit(c));
2662
2663 switch (c) {
2664 case '%':
2665 goto addchar;
2666 case 'd':
2667 if (percentd_found)
2668 goto fail;
2669 percentd_found = 1;
2670 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2671 len = strlen(buf1);
2672 if ((q - buf + len) > buf_size - 1)
2673 goto fail;
2674 memcpy(q, buf1, len);
2675 q += len;
2676 break;
2677 default:
2678 goto fail;
2679 }
2680 } else {
2681 addchar:
2682 if ((q - buf) < buf_size - 1)
2683 *q++ = c;
2684 }
2685 }
2686 if (!percentd_found)
2687 goto fail;
2688 *q = '\0';
2689 return 0;
2690 fail:
2691 *q = '\0';
2692 return -1;
2693 }
2694
2695 void av_url_split(char *proto, int proto_size,
2696 char *authorization, int authorization_size,
2697 char *hostname, int hostname_size,
2698 int *port_ptr, char *path, int path_size, const char *url)
2699 {
2700 const char *p, *ls, *at, *col, *brk;
2701
2702 if (port_ptr)
2703 *port_ptr = -1;
2704 if (proto_size > 0)
2705 proto[0] = 0;
2706 if (authorization_size > 0)
2707 authorization[0] = 0;
2708 if (hostname_size > 0)
2709 hostname[0] = 0;
2710 if (path_size > 0)
2711 path[0] = 0;
2712
2713 /* parse protocol */
2714 if ((p = strchr(url, ':'))) {
2715 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2716 p++; /* skip ':' */
2717 if (*p == '/')
2718 p++;
2719 if (*p == '/')
2720 p++;
2721 } else {
2722 /* no protocol means plain filename */
2723 av_strlcpy(path, url, path_size);
2724 return;
2725 }
2726
2727 /* separate path from hostname */
2728 ls = strchr(p, '/');
2729 if (!ls)
2730 ls = strchr(p, '?');
2731 if (ls)
2732 av_strlcpy(path, ls, path_size);
2733 else
2734 ls = &p[strlen(p)]; // XXX
2735
2736 /* the rest is hostname, use that to parse auth/port */
2737 if (ls != p) {
2738 /* authorization (user[:pass]@hostname) */
2739 if ((at = strchr(p, '@')) && at < ls) {
2740 av_strlcpy(authorization, p,
2741 FFMIN(authorization_size, at + 1 - p));
2742 p = at + 1; /* skip '@' */
2743 }
2744
2745 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2746 /* [host]:port */
2747 av_strlcpy(hostname, p + 1,
2748 FFMIN(hostname_size, brk - p));
2749 if (brk[1] == ':' && port_ptr)
2750 *port_ptr = atoi(brk + 2);
2751 } else if ((col = strchr(p, ':')) && col < ls) {
2752 av_strlcpy(hostname, p,
2753 FFMIN(col + 1 - p, hostname_size));
2754 if (port_ptr)
2755 *port_ptr = atoi(col + 1);
2756 } else
2757 av_strlcpy(hostname, p,
2758 FFMIN(ls + 1 - p, hostname_size));
2759 }
2760 }
2761
2762 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2763 {
2764 int i;
2765 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2766 '4', '5', '6', '7',
2767 '8', '9', 'A', 'B',
2768 'C', 'D', 'E', 'F' };
2769 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2770 '4', '5', '6', '7',
2771 '8', '9', 'a', 'b',
2772 'c', 'd', 'e', 'f' };
2773 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2774
2775 for (i = 0; i < s; i++) {
2776 buff[i * 2] = hex_table[src[i] >> 4];
2777 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2778 }
2779
2780 return buff;
2781 }
2782
2783 int ff_hex_to_data(uint8_t *data, const char *p)
2784 {
2785 int c, len, v;
2786
2787 len = 0;
2788 v = 1;
2789 for (;;) {
2790 p += strspn(p, SPACE_CHARS);
2791 if (*p == '\0')
2792 break;
2793 c = av_toupper((unsigned char) *p++);
2794 if (c >= '0' && c <= '9')
2795 c = c - '0';
2796 else if (c >= 'A' && c <= 'F')
2797 c = c - 'A' + 10;
2798 else
2799 break;
2800 v = (v << 4) | c;
2801 if (v & 0x100) {
2802 if (data)
2803 data[len] = v;
2804 len++;
2805 v = 1;
2806 }
2807 }
2808 return len;
2809 }
2810
2811 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2812 unsigned int pts_num, unsigned int pts_den)
2813 {
2814 AVRational new_tb;
2815 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2816 if (new_tb.num != pts_num)
2817 av_log(NULL, AV_LOG_DEBUG,
2818 "st:%d removing common factor %d from timebase\n",
2819 s->index, pts_num / new_tb.num);
2820 } else
2821 av_log(NULL, AV_LOG_WARNING,
2822 "st:%d has too large timebase, reducing\n", s->index);
2823
2824 if (new_tb.num <= 0 || new_tb.den <= 0) {
2825 av_log(NULL, AV_LOG_ERROR,
2826 "Ignoring attempt to set invalid timebase for st:%d\n",
2827 s->index);
2828 return;
2829 }
2830 s->time_base = new_tb;
2831 s->pts_wrap_bits = pts_wrap_bits;
2832 }
2833
2834 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2835 void *context)
2836 {
2837 const char *ptr = str;
2838
2839 /* Parse key=value pairs. */
2840 for (;;) {
2841 const char *key;
2842 char *dest = NULL, *dest_end;
2843 int key_len, dest_len = 0;
2844
2845 /* Skip whitespace and potential commas. */
2846 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2847 ptr++;
2848 if (!*ptr)
2849 break;
2850
2851 key = ptr;
2852
2853 if (!(ptr = strchr(key, '=')))
2854 break;
2855 ptr++;
2856 key_len = ptr - key;
2857
2858 callback_get_buf(context, key, key_len, &dest, &dest_len);
2859 dest_end = dest + dest_len - 1;
2860
2861 if (*ptr == '\"') {
2862 ptr++;
2863 while (*ptr && *ptr != '\"') {
2864 if (*ptr == '\\') {
2865 if (!ptr[1])
2866 break;
2867 if (dest && dest < dest_end)
2868 *dest++ = ptr[1];
2869 ptr += 2;
2870 } else {
2871 if (dest && dest < dest_end)
2872 *dest++ = *ptr;
2873 ptr++;
2874 }
2875 }
2876 if (*ptr == '\"')
2877 ptr++;
2878 } else {
2879 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2880 if (dest && dest < dest_end)
2881 *dest++ = *ptr;
2882 }
2883 if (dest)
2884 *dest = 0;
2885 }
2886 }
2887
2888 int ff_find_stream_index(AVFormatContext *s, int id)
2889 {
2890 int i;
2891 for (i = 0; i < s->nb_streams; i++)
2892 if (s->streams[i]->id == id)
2893 return i;
2894 return -1;
2895 }
2896
2897 int64_t ff_iso8601_to_unix_time(const char *datestr)
2898 {
2899 struct tm time1 = { 0 }, time2 = { 0 };
2900 const char *ret1, *ret2;
2901 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
2902 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
2903 if (ret2 && !ret1)
2904 return av_timegm(&time2);
2905 else
2906 return av_timegm(&time1);
2907 }
2908
2909 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2910 int std_compliance)
2911 {
2912 if (ofmt) {
2913 if (ofmt->query_codec)
2914 return ofmt->query_codec(codec_id, std_compliance);
2915 else if (ofmt->codec_tag)
2916 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2917 else if (codec_id == ofmt->video_codec ||
2918 codec_id == ofmt->audio_codec ||
2919 codec_id == ofmt->subtitle_codec)
2920 return 1;
2921 }
2922 return AVERROR_PATCHWELCOME;
2923 }
2924
2925 int avformat_network_init(void)
2926 {
2927 #if CONFIG_NETWORK
2928 int ret;
2929 ff_network_inited_globally = 1;
2930 if ((ret = ff_network_init()) < 0)
2931 return ret;
2932 ff_tls_init();
2933 #endif
2934 return 0;
2935 }
2936
2937 int avformat_network_deinit(void)
2938 {
2939 #if CONFIG_NETWORK
2940 ff_network_close();
2941 ff_tls_deinit();
2942 #endif
2943 return 0;
2944 }
2945
2946 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2947 uint64_t channel_layout, int32_t sample_rate,
2948 int32_t width, int32_t height)
2949 {
2950 uint32_t flags = 0;
2951 int size = 4;
2952 uint8_t *data;
2953 if (!pkt)
2954 return AVERROR(EINVAL);
2955 if (channels) {
2956 size += 4;
2957 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2958 }
2959 if (channel_layout) {
2960 size += 8;
2961 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
2962 }
2963 if (sample_rate) {
2964 size += 4;
2965 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
2966 }
2967 if (width || height) {
2968 size += 8;
2969 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
2970 }
2971 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
2972 if (!data)
2973 return AVERROR(ENOMEM);
2974 bytestream_put_le32(&data, flags);
2975 if (channels)
2976 bytestream_put_le32(&data, channels);
2977 if (channel_layout)
2978 bytestream_put_le64(&data, channel_layout);
2979 if (sample_rate)
2980 bytestream_put_le32(&data, sample_rate);
2981 if (width || height) {
2982 bytestream_put_le32(&data, width);
2983 bytestream_put_le32(&data, height);
2984 }
2985 return 0;
2986 }
2987
2988 int ff_generate_avci_extradata(AVStream *st)
2989 {
2990 static const uint8_t avci100_1080p_extradata[] = {
2991 // SPS
2992 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
2993 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
2994 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
2995 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
2996 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
2997 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
2998 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
2999 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3000 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3001 // PPS
3002 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3003 0xd0
3004 };
3005 static const uint8_t avci100_1080i_extradata[] = {
3006 // SPS
3007 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3008 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3009 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3010 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3011 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3012 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3013 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3014 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3015 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3016 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3017 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3018 // PPS
3019 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3020 0xd0
3021 };
3022 static const uint8_t avci50_1080i_extradata[] = {
3023 // SPS
3024 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3025 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3026 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3027 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3028 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3029 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3030 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3031 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3032 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3033 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3034 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3035 // PPS
3036 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3037 0x11
3038 };
3039 static const uint8_t avci100_720p_extradata[] = {
3040 // SPS
3041 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3042 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3043 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3044 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3045 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3046 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3047 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3048 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3049 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3050 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3051 // PPS
3052 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3053 0x11
3054 };
3055
3056 const uint8_t *data = NULL;
3057 int size = 0;
3058
3059 if (st->codec->width == 1920) {
3060 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3061 data = avci100_1080p_extradata;
3062 size = sizeof(avci100_1080p_extradata);
3063 } else {
3064 data = avci100_1080i_extradata;
3065 size = sizeof(avci100_1080i_extradata);
3066 }
3067 } else if (st->codec->width == 1440) {
3068 data = avci50_1080i_extradata;
3069 size = sizeof(avci50_1080i_extradata);
3070 } else if (st->codec->width == 1280) {
3071 data = avci100_720p_extradata;
3072 size = sizeof(avci100_720p_extradata);
3073 }
3074
3075 if (!size)
3076 return 0;
3077
3078 av_freep(&st->codec->extradata);
3079 st->codec->extradata_size = 0;
3080 st->codec->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3081 if (!st->codec->extradata)
3082 return AVERROR(ENOMEM);
3083
3084 memcpy(st->codec->extradata, data, size);
3085 st->codec->extradata_size = size;
3086
3087 return 0;
3088 }
3089
3090 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3091 int *size)
3092 {
3093 int i;
3094
3095 for (i = 0; i < st->nb_side_data; i++) {
3096 if (st->side_data[i].type == type) {
3097 if (size)
3098 *size = st->side_data[i].size;
3099 return st->side_data[i].data;
3100 }
3101 }
3102 return NULL;
3103 }
3104
3105 uint8_t *ff_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3106 int size)
3107 {
3108 AVPacketSideData *sd, *tmp;
3109 int i;
3110 uint8_t *data = av_malloc(size);
3111
3112 if (!data)
3113 return NULL;
3114
3115 for (i = 0; i < st->nb_side_data; i++) {
3116 sd = &st->side_data[i];
3117
3118 if (sd->type == type) {
3119 av_freep(&sd->data);
3120 sd->data = data;
3121 sd->size = size;
3122 return sd->data;
3123 }
3124 }
3125
3126 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3127 if (!tmp) {
3128 av_freep(&data);
3129 return NULL;
3130 }
3131
3132 st->side_data = tmp;
3133 st->nb_side_data++;
3134
3135 sd = &st->side_data[st->nb_side_data - 1];
3136 sd->type = type;
3137 sd->data = data;
3138 sd->size = size;
3139 return data;
3140 }