31faa958a61b9273fd41dbaaafc70ac6e5ebed0c
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #undef NDEBUG
23 #include <assert.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26
27 #include "config.h"
28
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
38
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
41
42 #include "audiointerleave.h"
43 #include "avformat.h"
44 #include "id3v2.h"
45 #include "internal.h"
46 #include "metadata.h"
47 #if CONFIG_NETWORK
48 #include "network.h"
49 #endif
50 #include "riff.h"
51 #include "url.h"
52
53 /**
54 * @file
55 * various utility functions for use within Libav
56 */
57
58 unsigned avformat_version(void)
59 {
60 return LIBAVFORMAT_VERSION_INT;
61 }
62
63 const char *avformat_configuration(void)
64 {
65 return LIBAV_CONFIGURATION;
66 }
67
68 const char *avformat_license(void)
69 {
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
72 }
73
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
76
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
80 {
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
84 int ret = 0;
85
86 do {
87 int prev_size = pkt->size;
88 int read_size;
89
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
95 }
96 read_size = FFMIN(size, chunk_size);
97
98 ret = av_grow_packet(pkt, read_size);
99 if (ret < 0)
100 break;
101
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
105 break;
106 }
107
108 size -= read_size;
109 } while (size > 0);
110
111 pkt->pos = orig_pos;
112 if (!pkt->size)
113 av_packet_unref(pkt);
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
115 }
116
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
118 {
119 av_init_packet(pkt);
120 pkt->data = NULL;
121 pkt->size = 0;
122 pkt->pos = avio_tell(s);
123
124 return append_packet_chunked(s, pkt, size);
125 }
126
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
128 {
129 if (!pkt->size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
132 }
133
134 int av_filename_number_test(const char *filename)
135 {
136 char buf[1024];
137 return filename &&
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
139 }
140
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
143 {
144 static const struct {
145 const char *name;
146 enum AVCodecID id;
147 enum AVMediaType type;
148 } fmt_id_type[] = {
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
158 { 0 }
159 };
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
161
162 if (fmt) {
163 int i;
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
167 fmt->name, score);
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codecpar->codec_id = fmt_id_type[i].id;
171 st->codecpar->codec_type = fmt_id_type[i].type;
172 #if FF_API_LAVF_AVCTX
173 FF_DISABLE_DEPRECATION_WARNINGS
174 st->codec->codec_type = st->codecpar->codec_type;
175 st->codec->codec_id = st->codecpar->codec_id;
176 FF_ENABLE_DEPRECATION_WARNINGS
177 #endif
178 break;
179 }
180 }
181 }
182 return !!fmt;
183 }
184
185 /************************************************************/
186 /* input media file */
187
188 /* Open input file and probe the format if necessary. */
189 static int init_input(AVFormatContext *s, const char *filename,
190 AVDictionary **options)
191 {
192 int ret;
193 AVProbeData pd = { filename, NULL, 0 };
194
195 if (s->pb) {
196 s->flags |= AVFMT_FLAG_CUSTOM_IO;
197 if (!s->iformat)
198 return av_probe_input_buffer(s->pb, &s->iformat, filename,
199 s, 0, s->probesize);
200 else if (s->iformat->flags & AVFMT_NOFILE)
201 return AVERROR(EINVAL);
202 return 0;
203 }
204
205 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
206 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
207 return 0;
208
209 ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ, options);
210 if (ret < 0)
211 return ret;
212 if (s->iformat)
213 return 0;
214 return av_probe_input_buffer(s->pb, &s->iformat, filename,
215 s, 0, s->probesize);
216 }
217
218 static int add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
219 AVPacketList **plast_pktl, int ref)
220 {
221 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
222 int ret;
223
224 if (!pktl)
225 return AVERROR(ENOMEM);
226
227 if (ref) {
228 if ((ret = av_packet_ref(&pktl->pkt, pkt)) < 0) {
229 av_free(pktl);
230 return ret;
231 }
232 } else {
233 pktl->pkt = *pkt;
234 }
235
236 if (*packet_buffer)
237 (*plast_pktl)->next = pktl;
238 else
239 *packet_buffer = pktl;
240
241 /* Add the packet in the buffered packet list. */
242 *plast_pktl = pktl;
243 return 0;
244 }
245
246 static int queue_attached_pictures(AVFormatContext *s)
247 {
248 int i, ret;
249 for (i = 0; i < s->nb_streams; i++)
250 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
251 s->streams[i]->discard < AVDISCARD_ALL) {
252
253 ret = add_to_pktbuf(&s->internal->raw_packet_buffer,
254 &s->streams[i]->attached_pic,
255 &s->internal->raw_packet_buffer_end, 1);
256 if (ret < 0)
257 return ret;
258 }
259 return 0;
260 }
261
262 #if FF_API_LAVF_AVCTX
263 FF_DISABLE_DEPRECATION_WARNINGS
264 static int update_stream_avctx(AVFormatContext *s)
265 {
266 int i, ret;
267 for (i = 0; i < s->nb_streams; i++) {
268 AVStream *st = s->streams[i];
269
270 if (!st->internal->need_codec_update)
271 continue;
272
273 ret = avcodec_parameters_to_context(st->codec, st->codecpar);
274 if (ret < 0)
275 return ret;
276
277 st->internal->need_codec_update = 0;
278 }
279 return 0;
280 }
281 FF_ENABLE_DEPRECATION_WARNINGS
282 #endif
283
284 int avformat_open_input(AVFormatContext **ps, const char *filename,
285 AVInputFormat *fmt, AVDictionary **options)
286 {
287 AVFormatContext *s = *ps;
288 int i, ret = 0;
289 AVDictionary *tmp = NULL;
290 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
291
292 if (!s && !(s = avformat_alloc_context()))
293 return AVERROR(ENOMEM);
294 if (fmt)
295 s->iformat = fmt;
296
297 if (options)
298 av_dict_copy(&tmp, *options, 0);
299
300 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
301 goto fail;
302
303 if ((ret = init_input(s, filename, &tmp)) < 0)
304 goto fail;
305
306 /* Check filename in case an image number is expected. */
307 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
308 if (!av_filename_number_test(filename)) {
309 ret = AVERROR(EINVAL);
310 goto fail;
311 }
312 }
313
314 s->duration = s->start_time = AV_NOPTS_VALUE;
315 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
316
317 /* Allocate private data. */
318 if (s->iformat->priv_data_size > 0) {
319 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
320 ret = AVERROR(ENOMEM);
321 goto fail;
322 }
323 if (s->iformat->priv_class) {
324 *(const AVClass **) s->priv_data = s->iformat->priv_class;
325 av_opt_set_defaults(s->priv_data);
326 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
327 goto fail;
328 }
329 }
330
331 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
332 if (s->pb)
333 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
334
335 if (s->iformat->read_header)
336 if ((ret = s->iformat->read_header(s)) < 0)
337 goto fail;
338
339 if (id3v2_extra_meta &&
340 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
341 goto fail;
342 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
343
344 if ((ret = queue_attached_pictures(s)) < 0)
345 goto fail;
346
347 if (s->pb && !s->internal->data_offset)
348 s->internal->data_offset = avio_tell(s->pb);
349
350 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
351
352 #if FF_API_LAVF_AVCTX
353 update_stream_avctx(s);
354 #endif
355
356 for (i = 0; i < s->nb_streams; i++)
357 s->streams[i]->internal->orig_codec_id = s->streams[i]->codecpar->codec_id;
358
359 if (options) {
360 av_dict_free(options);
361 *options = tmp;
362 }
363 *ps = s;
364 return 0;
365
366 fail:
367 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
368 av_dict_free(&tmp);
369 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
370 avio_close(s->pb);
371 avformat_free_context(s);
372 *ps = NULL;
373 return ret;
374 }
375
376 /*******************************************************/
377
378 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
379 {
380 if (st->codecpar->codec_id == AV_CODEC_ID_PROBE) {
381 AVProbeData *pd = &st->probe_data;
382 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
383 --st->probe_packets;
384
385 if (pkt) {
386 int err;
387 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
388 AVPROBE_PADDING_SIZE)) < 0)
389 return err;
390 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
391 pd->buf_size += pkt->size;
392 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
393 } else {
394 st->probe_packets = 0;
395 if (!pd->buf_size) {
396 av_log(s, AV_LOG_ERROR,
397 "nothing to probe for stream %d\n", st->index);
398 return 0;
399 }
400 }
401
402 if (!st->probe_packets ||
403 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
404 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
405 ? AVPROBE_SCORE_MAX / 4 : 0);
406 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE) {
407 pd->buf_size = 0;
408 av_freep(&pd->buf);
409 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
410 }
411 }
412 }
413 return 0;
414 }
415
416 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
417 {
418 int ret, i, err;
419 AVStream *st;
420
421 for (;;) {
422 AVPacketList *pktl = s->internal->raw_packet_buffer;
423
424 if (pktl) {
425 *pkt = pktl->pkt;
426 st = s->streams[pkt->stream_index];
427 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE ||
428 !st->probe_packets ||
429 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
430 AVProbeData *pd;
431 if (st->probe_packets)
432 if ((err = probe_codec(s, st, NULL)) < 0)
433 return err;
434 pd = &st->probe_data;
435 av_freep(&pd->buf);
436 pd->buf_size = 0;
437 s->internal->raw_packet_buffer = pktl->next;
438 s->internal->raw_packet_buffer_remaining_size += pkt->size;
439 av_free(pktl);
440 return 0;
441 }
442 }
443
444 pkt->data = NULL;
445 pkt->size = 0;
446 av_init_packet(pkt);
447 ret = s->iformat->read_packet(s, pkt);
448 if (ret < 0) {
449 if (!pktl || ret == AVERROR(EAGAIN))
450 return ret;
451 for (i = 0; i < s->nb_streams; i++) {
452 st = s->streams[i];
453 if (st->probe_packets)
454 if ((err = probe_codec(s, st, NULL)) < 0)
455 return err;
456 }
457 continue;
458 }
459
460 if (!pkt->buf) {
461 AVPacket tmp = { 0 };
462 ret = av_packet_ref(&tmp, pkt);
463 if (ret < 0)
464 return ret;
465 *pkt = tmp;
466 }
467
468 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
469 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
470 av_log(s, AV_LOG_WARNING,
471 "Dropped corrupted packet (stream = %d)\n",
472 pkt->stream_index);
473 av_packet_unref(pkt);
474 continue;
475 }
476
477 st = s->streams[pkt->stream_index];
478
479 switch (st->codecpar->codec_type) {
480 case AVMEDIA_TYPE_VIDEO:
481 if (s->video_codec_id)
482 st->codecpar->codec_id = s->video_codec_id;
483 break;
484 case AVMEDIA_TYPE_AUDIO:
485 if (s->audio_codec_id)
486 st->codecpar->codec_id = s->audio_codec_id;
487 break;
488 case AVMEDIA_TYPE_SUBTITLE:
489 if (s->subtitle_codec_id)
490 st->codecpar->codec_id = s->subtitle_codec_id;
491 break;
492 }
493
494 if (!pktl && (st->codecpar->codec_id != AV_CODEC_ID_PROBE ||
495 !st->probe_packets))
496 return ret;
497
498 err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
499 &s->internal->raw_packet_buffer_end, 0);
500 if (err)
501 return err;
502 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
503
504 if ((err = probe_codec(s, st, pkt)) < 0)
505 return err;
506 }
507 }
508
509 /**********************************************************/
510
511 /**
512 * Return the frame duration in seconds. Return 0 if not available.
513 */
514 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
515 AVCodecParserContext *pc, AVPacket *pkt)
516 {
517 AVRational codec_framerate = s->iformat ? st->internal->avctx->framerate :
518 (AVRational){ 0, 1 };
519 int frame_size;
520
521 *pnum = 0;
522 *pden = 0;
523 switch (st->codecpar->codec_type) {
524 case AVMEDIA_TYPE_VIDEO:
525 if (st->avg_frame_rate.num) {
526 *pnum = st->avg_frame_rate.den;
527 *pden = st->avg_frame_rate.num;
528 } else if (st->time_base.num * 1000LL > st->time_base.den) {
529 *pnum = st->time_base.num;
530 *pden = st->time_base.den;
531 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
532 *pnum = codec_framerate.den;
533 *pden = codec_framerate.num;
534 if (pc && pc->repeat_pict) {
535 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
536 *pden /= 1 + pc->repeat_pict;
537 else
538 *pnum *= 1 + pc->repeat_pict;
539 }
540 /* If this codec can be interlaced or progressive then we need
541 * a parser to compute duration of a packet. Thus if we have
542 * no parser in such case leave duration undefined. */
543 if (st->internal->avctx->ticks_per_frame > 1 && !pc)
544 *pnum = *pden = 0;
545 }
546 break;
547 case AVMEDIA_TYPE_AUDIO:
548 frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
549 if (frame_size <= 0 || st->codecpar->sample_rate <= 0)
550 break;
551 *pnum = frame_size;
552 *pden = st->codecpar->sample_rate;
553 break;
554 default:
555 break;
556 }
557 }
558
559 static int is_intra_only(enum AVCodecID id)
560 {
561 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
562 if (!d)
563 return 0;
564 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
565 return 0;
566 return 1;
567 }
568
569 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
570 int64_t dts, int64_t pts)
571 {
572 AVStream *st = s->streams[stream_index];
573 AVPacketList *pktl = s->internal->packet_buffer;
574
575 if (st->first_dts != AV_NOPTS_VALUE ||
576 dts == AV_NOPTS_VALUE ||
577 st->cur_dts == AV_NOPTS_VALUE)
578 return;
579
580 st->first_dts = dts - st->cur_dts;
581 st->cur_dts = dts;
582
583 for (; pktl; pktl = pktl->next) {
584 if (pktl->pkt.stream_index != stream_index)
585 continue;
586 // FIXME: think more about this check
587 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
588 pktl->pkt.pts += st->first_dts;
589
590 if (pktl->pkt.dts != AV_NOPTS_VALUE)
591 pktl->pkt.dts += st->first_dts;
592
593 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
594 st->start_time = pktl->pkt.pts;
595 }
596 if (st->start_time == AV_NOPTS_VALUE)
597 st->start_time = pts;
598 }
599
600 static void update_initial_durations(AVFormatContext *s, AVStream *st,
601 int stream_index, int duration)
602 {
603 AVPacketList *pktl = s->internal->packet_buffer;
604 int64_t cur_dts = 0;
605
606 if (st->first_dts != AV_NOPTS_VALUE) {
607 cur_dts = st->first_dts;
608 for (; pktl; pktl = pktl->next) {
609 if (pktl->pkt.stream_index == stream_index) {
610 if (pktl->pkt.pts != pktl->pkt.dts ||
611 pktl->pkt.dts != AV_NOPTS_VALUE ||
612 pktl->pkt.duration)
613 break;
614 cur_dts -= duration;
615 }
616 }
617 pktl = s->internal->packet_buffer;
618 st->first_dts = cur_dts;
619 } else if (st->cur_dts)
620 return;
621
622 for (; pktl; pktl = pktl->next) {
623 if (pktl->pkt.stream_index != stream_index)
624 continue;
625 if (pktl->pkt.pts == pktl->pkt.dts &&
626 pktl->pkt.dts == AV_NOPTS_VALUE &&
627 !pktl->pkt.duration) {
628 pktl->pkt.dts = cur_dts;
629 if (!st->internal->avctx->has_b_frames)
630 pktl->pkt.pts = cur_dts;
631 cur_dts += duration;
632 if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
633 pktl->pkt.duration = duration;
634 } else
635 break;
636 }
637 if (st->first_dts == AV_NOPTS_VALUE)
638 st->cur_dts = cur_dts;
639 }
640
641 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
642 AVCodecParserContext *pc, AVPacket *pkt)
643 {
644 int num, den, presentation_delayed, delay, i;
645 int64_t offset;
646
647 if (s->flags & AVFMT_FLAG_NOFILLIN)
648 return;
649
650 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
651 pkt->dts = AV_NOPTS_VALUE;
652
653 /* do we have a video B-frame ? */
654 delay = st->internal->avctx->has_b_frames;
655 presentation_delayed = 0;
656
657 /* XXX: need has_b_frame, but cannot get it if the codec is
658 * not initialized */
659 if (delay &&
660 pc && pc->pict_type != AV_PICTURE_TYPE_B)
661 presentation_delayed = 1;
662
663 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
664 st->pts_wrap_bits < 63 &&
665 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
666 pkt->dts -= 1LL << st->pts_wrap_bits;
667 }
668
669 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
670 * We take the conservative approach and discard both.
671 * Note: If this is misbehaving for an H.264 file, then possibly
672 * presentation_delayed is not set correctly. */
673 if (delay == 1 && pkt->dts == pkt->pts &&
674 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
675 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
676 pkt->dts = AV_NOPTS_VALUE;
677 }
678
679 if (pkt->duration == 0 && st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
680 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
681 if (den && num) {
682 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
683 den * (int64_t) st->time_base.num,
684 AV_ROUND_DOWN);
685
686 if (pkt->duration != 0 && s->internal->packet_buffer)
687 update_initial_durations(s, st, pkt->stream_index,
688 pkt->duration);
689 }
690 }
691
692 /* Correct timestamps with byte offset if demuxers only have timestamps
693 * on packet boundaries */
694 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
695 /* this will estimate bitrate based on this frame's duration and size */
696 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
697 if (pkt->pts != AV_NOPTS_VALUE)
698 pkt->pts += offset;
699 if (pkt->dts != AV_NOPTS_VALUE)
700 pkt->dts += offset;
701 }
702
703 /* This may be redundant, but it should not hurt. */
704 if (pkt->dts != AV_NOPTS_VALUE &&
705 pkt->pts != AV_NOPTS_VALUE &&
706 pkt->pts > pkt->dts)
707 presentation_delayed = 1;
708
709 av_log(NULL, AV_LOG_TRACE,
710 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
711 "cur_dts:%"PRId64" st:%d pc:%p\n",
712 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
713 pkt->stream_index, pc);
714 /* Interpolate PTS and DTS if they are not present. We skip H.264
715 * currently because delay and has_b_frames are not reliably set. */
716 if ((delay == 0 || (delay == 1 && pc)) &&
717 st->codecpar->codec_id != AV_CODEC_ID_H264) {
718 if (presentation_delayed) {
719 /* DTS = decompression timestamp */
720 /* PTS = presentation timestamp */
721 if (pkt->dts == AV_NOPTS_VALUE)
722 pkt->dts = st->last_IP_pts;
723 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
724 if (pkt->dts == AV_NOPTS_VALUE)
725 pkt->dts = st->cur_dts;
726
727 /* This is tricky: the dts must be incremented by the duration
728 * of the frame we are displaying, i.e. the last I- or P-frame. */
729 if (st->last_IP_duration == 0)
730 st->last_IP_duration = pkt->duration;
731 if (pkt->dts != AV_NOPTS_VALUE)
732 st->cur_dts = pkt->dts + st->last_IP_duration;
733 st->last_IP_duration = pkt->duration;
734 st->last_IP_pts = pkt->pts;
735 /* Cannot compute PTS if not present (we can compute it only
736 * by knowing the future. */
737 } else if (pkt->pts != AV_NOPTS_VALUE ||
738 pkt->dts != AV_NOPTS_VALUE ||
739 pkt->duration ||
740 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
741 int duration = pkt->duration;
742 if (!duration && st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
743 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
744 if (den && num) {
745 duration = av_rescale_rnd(1,
746 num * (int64_t) st->time_base.den,
747 den * (int64_t) st->time_base.num,
748 AV_ROUND_DOWN);
749 if (duration != 0 && s->internal->packet_buffer)
750 update_initial_durations(s, st, pkt->stream_index,
751 duration);
752 }
753 }
754
755 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
756 duration) {
757 /* presentation is not delayed : PTS and DTS are the same */
758 if (pkt->pts == AV_NOPTS_VALUE)
759 pkt->pts = pkt->dts;
760 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
761 pkt->pts);
762 if (pkt->pts == AV_NOPTS_VALUE)
763 pkt->pts = st->cur_dts;
764 pkt->dts = pkt->pts;
765 if (pkt->pts != AV_NOPTS_VALUE)
766 st->cur_dts = pkt->pts + duration;
767 }
768 }
769 }
770
771 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
772 st->pts_buffer[0] = pkt->pts;
773 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
774 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
775 if (pkt->dts == AV_NOPTS_VALUE)
776 pkt->dts = st->pts_buffer[0];
777 // We skipped it above so we try here.
778 if (st->codecpar->codec_id == AV_CODEC_ID_H264)
779 // This should happen on the first packet
780 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
781 if (pkt->dts > st->cur_dts)
782 st->cur_dts = pkt->dts;
783 }
784
785 av_log(NULL, AV_LOG_TRACE,
786 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
787 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
788
789 /* update flags */
790 if (is_intra_only(st->codecpar->codec_id))
791 pkt->flags |= AV_PKT_FLAG_KEY;
792 #if FF_API_CONVERGENCE_DURATION
793 FF_DISABLE_DEPRECATION_WARNINGS
794 if (pc)
795 pkt->convergence_duration = pc->convergence_duration;
796 FF_ENABLE_DEPRECATION_WARNINGS
797 #endif
798 }
799
800 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
801 {
802 while (*pkt_buf) {
803 AVPacketList *pktl = *pkt_buf;
804 *pkt_buf = pktl->next;
805 av_packet_unref(&pktl->pkt);
806 av_freep(&pktl);
807 }
808 *pkt_buf_end = NULL;
809 }
810
811 /**
812 * Parse a packet, add all split parts to parse_queue.
813 *
814 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
815 */
816 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
817 {
818 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
819 AVStream *st = s->streams[stream_index];
820 uint8_t *data = pkt ? pkt->data : NULL;
821 int size = pkt ? pkt->size : 0;
822 int ret = 0, got_output = 0;
823
824 if (!pkt) {
825 av_init_packet(&flush_pkt);
826 pkt = &flush_pkt;
827 got_output = 1;
828 }
829
830 while (size > 0 || (pkt == &flush_pkt && got_output)) {
831 int len;
832
833 av_init_packet(&out_pkt);
834 len = av_parser_parse2(st->parser, st->internal->avctx,
835 &out_pkt.data, &out_pkt.size, data, size,
836 pkt->pts, pkt->dts, pkt->pos);
837
838 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
839 /* increment read pointer */
840 data += len;
841 size -= len;
842
843 got_output = !!out_pkt.size;
844
845 if (!out_pkt.size)
846 continue;
847
848 if (pkt->side_data) {
849 out_pkt.side_data = pkt->side_data;
850 out_pkt.side_data_elems = pkt->side_data_elems;
851 pkt->side_data = NULL;
852 pkt->side_data_elems = 0;
853 }
854
855 /* set the duration */
856 out_pkt.duration = 0;
857 if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
858 if (st->internal->avctx->sample_rate > 0) {
859 out_pkt.duration =
860 av_rescale_q_rnd(st->parser->duration,
861 (AVRational) { 1, st->internal->avctx->sample_rate },
862 st->time_base,
863 AV_ROUND_DOWN);
864 }
865 }
866
867 out_pkt.stream_index = st->index;
868 out_pkt.pts = st->parser->pts;
869 out_pkt.dts = st->parser->dts;
870 out_pkt.pos = st->parser->pos;
871
872 if (st->parser->key_frame == 1 ||
873 (st->parser->key_frame == -1 &&
874 st->parser->pict_type == AV_PICTURE_TYPE_I))
875 out_pkt.flags |= AV_PKT_FLAG_KEY;
876
877 compute_pkt_fields(s, st, st->parser, &out_pkt);
878
879 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
880 out_pkt.flags & AV_PKT_FLAG_KEY) {
881 ff_reduce_index(s, st->index);
882 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
883 0, 0, AVINDEX_KEYFRAME);
884 }
885
886 if ((ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,
887 &s->internal->parse_queue_end,
888 1))) {
889 av_packet_unref(&out_pkt);
890 goto fail;
891 }
892 }
893
894 /* end of the stream => close and free the parser */
895 if (pkt == &flush_pkt) {
896 av_parser_close(st->parser);
897 st->parser = NULL;
898 }
899
900 fail:
901 av_packet_unref(pkt);
902 return ret;
903 }
904
905 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
906 AVPacketList **pkt_buffer_end,
907 AVPacket *pkt)
908 {
909 AVPacketList *pktl;
910 av_assert0(*pkt_buffer);
911 pktl = *pkt_buffer;
912 *pkt = pktl->pkt;
913 *pkt_buffer = pktl->next;
914 if (!pktl->next)
915 *pkt_buffer_end = NULL;
916 av_freep(&pktl);
917 return 0;
918 }
919
920 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
921 {
922 int ret = 0, i, got_packet = 0;
923 AVDictionary *metadata = NULL;
924
925 av_init_packet(pkt);
926
927 while (!got_packet && !s->internal->parse_queue) {
928 AVStream *st;
929 AVPacket cur_pkt;
930
931 /* read next packet */
932 ret = ff_read_packet(s, &cur_pkt);
933 if (ret < 0) {
934 if (ret == AVERROR(EAGAIN))
935 return ret;
936 /* flush the parsers */
937 for (i = 0; i < s->nb_streams; i++) {
938 st = s->streams[i];
939 if (st->parser && st->need_parsing)
940 parse_packet(s, NULL, st->index);
941 }
942 /* all remaining packets are now in parse_queue =>
943 * really terminate parsing */
944 break;
945 }
946 ret = 0;
947 st = s->streams[cur_pkt.stream_index];
948
949 if (cur_pkt.pts != AV_NOPTS_VALUE &&
950 cur_pkt.dts != AV_NOPTS_VALUE &&
951 cur_pkt.pts < cur_pkt.dts) {
952 av_log(s, AV_LOG_WARNING,
953 "Invalid timestamps stream=%d, pts=%"PRId64", "
954 "dts=%"PRId64", size=%d\n",
955 cur_pkt.stream_index, cur_pkt.pts,
956 cur_pkt.dts, cur_pkt.size);
957 }
958 if (s->debug & FF_FDEBUG_TS)
959 av_log(s, AV_LOG_DEBUG,
960 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
961 "size=%d, duration=%"PRId64", flags=%d\n",
962 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
963 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
964
965 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
966 st->parser = av_parser_init(st->codecpar->codec_id);
967 if (!st->parser)
968 /* no parser available: just output the raw packets */
969 st->need_parsing = AVSTREAM_PARSE_NONE;
970 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
971 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
972 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
973 st->parser->flags |= PARSER_FLAG_ONCE;
974 }
975
976 if (!st->need_parsing || !st->parser) {
977 /* no parsing needed: we just output the packet as is */
978 *pkt = cur_pkt;
979 compute_pkt_fields(s, st, NULL, pkt);
980 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
981 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
982 ff_reduce_index(s, st->index);
983 av_add_index_entry(st, pkt->pos, pkt->dts,
984 0, 0, AVINDEX_KEYFRAME);
985 }
986 got_packet = 1;
987 } else if (st->discard < AVDISCARD_ALL) {
988 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
989 return ret;
990 } else {
991 /* free packet */
992 av_packet_unref(&cur_pkt);
993 }
994 }
995
996 if (!got_packet && s->internal->parse_queue)
997 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
998
999 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
1000 if (metadata) {
1001 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
1002 av_dict_copy(&s->metadata, metadata, 0);
1003 av_dict_free(&metadata);
1004 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
1005 }
1006
1007 #if FF_API_LAVF_AVCTX
1008 update_stream_avctx(s);
1009 #endif
1010
1011 if (s->debug & FF_FDEBUG_TS)
1012 av_log(s, AV_LOG_DEBUG,
1013 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
1014 "size=%d, duration=%"PRId64", flags=%d\n",
1015 pkt->stream_index, pkt->pts, pkt->dts,
1016 pkt->size, pkt->duration, pkt->flags);
1017
1018 return ret;
1019 }
1020
1021 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1022 {
1023 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1024 int eof = 0;
1025
1026 if (!genpts)
1027 return s->internal->packet_buffer
1028 ? read_from_packet_buffer(&s->internal->packet_buffer,
1029 &s->internal->packet_buffer_end, pkt)
1030 : read_frame_internal(s, pkt);
1031
1032 for (;;) {
1033 int ret;
1034 AVPacketList *pktl = s->internal->packet_buffer;
1035
1036 if (pktl) {
1037 AVPacket *next_pkt = &pktl->pkt;
1038
1039 if (next_pkt->dts != AV_NOPTS_VALUE) {
1040 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1041 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1042 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1043 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1044 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1045 // not B-frame
1046 next_pkt->pts = pktl->pkt.dts;
1047 }
1048 pktl = pktl->next;
1049 }
1050 pktl = s->internal->packet_buffer;
1051 }
1052
1053 /* read packet from packet buffer, if there is data */
1054 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1055 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1056 return read_from_packet_buffer(&s->internal->packet_buffer,
1057 &s->internal->packet_buffer_end, pkt);
1058 }
1059
1060 ret = read_frame_internal(s, pkt);
1061 if (ret < 0) {
1062 if (pktl && ret != AVERROR(EAGAIN)) {
1063 eof = 1;
1064 continue;
1065 } else
1066 return ret;
1067 }
1068
1069 ret = add_to_pktbuf(&s->internal->packet_buffer, pkt,
1070 &s->internal->packet_buffer_end, 1);
1071 if (ret < 0)
1072 return ret;
1073 }
1074 }
1075
1076 /* XXX: suppress the packet queue */
1077 static void flush_packet_queue(AVFormatContext *s)
1078 {
1079 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1080 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1081 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1082
1083 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1084 }
1085
1086 /*******************************************************/
1087 /* seek support */
1088
1089 int av_find_default_stream_index(AVFormatContext *s)
1090 {
1091 int first_audio_index = -1;
1092 int i;
1093 AVStream *st;
1094
1095 if (s->nb_streams <= 0)
1096 return -1;
1097 for (i = 0; i < s->nb_streams; i++) {
1098 st = s->streams[i];
1099 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1100 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1101 return i;
1102 }
1103 if (first_audio_index < 0 &&
1104 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
1105 first_audio_index = i;
1106 }
1107 return first_audio_index >= 0 ? first_audio_index : 0;
1108 }
1109
1110 /** Flush the frame reader. */
1111 void ff_read_frame_flush(AVFormatContext *s)
1112 {
1113 AVStream *st;
1114 int i, j;
1115
1116 flush_packet_queue(s);
1117
1118 /* Reset read state for each stream. */
1119 for (i = 0; i < s->nb_streams; i++) {
1120 st = s->streams[i];
1121
1122 if (st->parser) {
1123 av_parser_close(st->parser);
1124 st->parser = NULL;
1125 }
1126 st->last_IP_pts = AV_NOPTS_VALUE;
1127 /* We set the current DTS to an unspecified origin. */
1128 st->cur_dts = AV_NOPTS_VALUE;
1129
1130 st->probe_packets = MAX_PROBE_PACKETS;
1131
1132 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1133 st->pts_buffer[j] = AV_NOPTS_VALUE;
1134 }
1135 }
1136
1137 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1138 {
1139 int i;
1140
1141 for (i = 0; i < s->nb_streams; i++) {
1142 AVStream *st = s->streams[i];
1143
1144 st->cur_dts =
1145 av_rescale(timestamp,
1146 st->time_base.den * (int64_t) ref_st->time_base.num,
1147 st->time_base.num * (int64_t) ref_st->time_base.den);
1148 }
1149 }
1150
1151 void ff_reduce_index(AVFormatContext *s, int stream_index)
1152 {
1153 AVStream *st = s->streams[stream_index];
1154 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1155
1156 if ((unsigned) st->nb_index_entries >= max_entries) {
1157 int i;
1158 for (i = 0; 2 * i < st->nb_index_entries; i++)
1159 st->index_entries[i] = st->index_entries[2 * i];
1160 st->nb_index_entries = i;
1161 }
1162 }
1163
1164 int ff_add_index_entry(AVIndexEntry **index_entries,
1165 int *nb_index_entries,
1166 unsigned int *index_entries_allocated_size,
1167 int64_t pos, int64_t timestamp,
1168 int size, int distance, int flags)
1169 {
1170 AVIndexEntry *entries, *ie;
1171 int index;
1172
1173 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1174 return -1;
1175
1176 entries = av_fast_realloc(*index_entries,
1177 index_entries_allocated_size,
1178 (*nb_index_entries + 1) *
1179 sizeof(AVIndexEntry));
1180 if (!entries)
1181 return -1;
1182
1183 *index_entries = entries;
1184
1185 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1186 timestamp, AVSEEK_FLAG_ANY);
1187
1188 if (index < 0) {
1189 index = (*nb_index_entries)++;
1190 ie = &entries[index];
1191 assert(index == 0 || ie[-1].timestamp < timestamp);
1192 } else {
1193 ie = &entries[index];
1194 if (ie->timestamp != timestamp) {
1195 if (ie->timestamp <= timestamp)
1196 return -1;
1197 memmove(entries + index + 1, entries + index,
1198 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1199 (*nb_index_entries)++;
1200 } else if (ie->pos == pos && distance < ie->min_distance)
1201 // do not reduce the distance
1202 distance = ie->min_distance;
1203 }
1204
1205 ie->pos = pos;
1206 ie->timestamp = timestamp;
1207 ie->min_distance = distance;
1208 ie->size = size;
1209 ie->flags = flags;
1210
1211 return index;
1212 }
1213
1214 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1215 int size, int distance, int flags)
1216 {
1217 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1218 &st->index_entries_allocated_size, pos,
1219 timestamp, size, distance, flags);
1220 }
1221
1222 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1223 int64_t wanted_timestamp, int flags)
1224 {
1225 int a, b, m;
1226 int64_t timestamp;
1227
1228 a = -1;
1229 b = nb_entries;
1230
1231 // Optimize appending index entries at the end.
1232 if (b && entries[b - 1].timestamp < wanted_timestamp)
1233 a = b - 1;
1234
1235 while (b - a > 1) {
1236 m = (a + b) >> 1;
1237 timestamp = entries[m].timestamp;
1238 if (timestamp >= wanted_timestamp)
1239 b = m;
1240 if (timestamp <= wanted_timestamp)
1241 a = m;
1242 }
1243 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1244
1245 if (!(flags & AVSEEK_FLAG_ANY))
1246 while (m >= 0 && m < nb_entries &&
1247 !(entries[m].flags & AVINDEX_KEYFRAME))
1248 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1249
1250 if (m == nb_entries)
1251 return -1;
1252 return m;
1253 }
1254
1255 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1256 {
1257 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1258 wanted_timestamp, flags);
1259 }
1260
1261 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1262 int64_t target_ts, int flags)
1263 {
1264 AVInputFormat *avif = s->iformat;
1265 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1266 int64_t ts_min, ts_max, ts;
1267 int index;
1268 int64_t ret;
1269 AVStream *st;
1270
1271 if (stream_index < 0)
1272 return -1;
1273
1274 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1275
1276 ts_max =
1277 ts_min = AV_NOPTS_VALUE;
1278 pos_limit = -1; // GCC falsely says it may be uninitialized.
1279
1280 st = s->streams[stream_index];
1281 if (st->index_entries) {
1282 AVIndexEntry *e;
1283
1284 /* FIXME: Whole function must be checked for non-keyframe entries in
1285 * index case, especially read_timestamp(). */
1286 index = av_index_search_timestamp(st, target_ts,
1287 flags | AVSEEK_FLAG_BACKWARD);
1288 index = FFMAX(index, 0);
1289 e = &st->index_entries[index];
1290
1291 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1292 pos_min = e->pos;
1293 ts_min = e->timestamp;
1294 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1295 pos_min, ts_min);
1296 } else {
1297 assert(index == 0);
1298 }
1299
1300 index = av_index_search_timestamp(st, target_ts,
1301 flags & ~AVSEEK_FLAG_BACKWARD);
1302 assert(index < st->nb_index_entries);
1303 if (index >= 0) {
1304 e = &st->index_entries[index];
1305 assert(e->timestamp >= target_ts);
1306 pos_max = e->pos;
1307 ts_max = e->timestamp;
1308 pos_limit = pos_max - e->min_distance;
1309 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1310 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1311 }
1312 }
1313
1314 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1315 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1316 if (pos < 0)
1317 return -1;
1318
1319 /* do the seek */
1320 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1321 return ret;
1322
1323 ff_update_cur_dts(s, st, ts);
1324
1325 return 0;
1326 }
1327
1328 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1329 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1330 int64_t ts_min, int64_t ts_max,
1331 int flags, int64_t *ts_ret,
1332 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1333 int64_t *, int64_t))
1334 {
1335 int64_t pos, ts;
1336 int64_t start_pos, filesize;
1337 int no_change;
1338
1339 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1340
1341 if (ts_min == AV_NOPTS_VALUE) {
1342 pos_min = s->internal->data_offset;
1343 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1344 if (ts_min == AV_NOPTS_VALUE)
1345 return -1;
1346 }
1347
1348 if (ts_max == AV_NOPTS_VALUE) {
1349 int step = 1024;
1350 filesize = avio_size(s->pb);
1351 pos_max = filesize - 1;
1352 do {
1353 pos_max -= step;
1354 ts_max = read_timestamp(s, stream_index, &pos_max,
1355 pos_max + step);
1356 step += step;
1357 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1358 if (ts_max == AV_NOPTS_VALUE)
1359 return -1;
1360
1361 for (;;) {
1362 int64_t tmp_pos = pos_max + 1;
1363 int64_t tmp_ts = read_timestamp(s, stream_index,
1364 &tmp_pos, INT64_MAX);
1365 if (tmp_ts == AV_NOPTS_VALUE)
1366 break;
1367 ts_max = tmp_ts;
1368 pos_max = tmp_pos;
1369 if (tmp_pos >= filesize)
1370 break;
1371 }
1372 pos_limit = pos_max;
1373 }
1374
1375 if (ts_min > ts_max)
1376 return -1;
1377 else if (ts_min == ts_max)
1378 pos_limit = pos_min;
1379
1380 no_change = 0;
1381 while (pos_min < pos_limit) {
1382 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1383 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1384 assert(pos_limit <= pos_max);
1385
1386 if (no_change == 0) {
1387 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1388 // interpolate position (better than dichotomy)
1389 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1390 ts_max - ts_min) +
1391 pos_min - approximate_keyframe_distance;
1392 } else if (no_change == 1) {
1393 // bisection if interpolation did not change min / max pos last time
1394 pos = (pos_min + pos_limit) >> 1;
1395 } else {
1396 /* linear search if bisection failed, can only happen if there
1397 * are very few or no keyframes between min/max */
1398 pos = pos_min;
1399 }
1400 if (pos <= pos_min)
1401 pos = pos_min + 1;
1402 else if (pos > pos_limit)
1403 pos = pos_limit;
1404 start_pos = pos;
1405
1406 // May pass pos_limit instead of -1.
1407 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1408 if (pos == pos_max)
1409 no_change++;
1410 else
1411 no_change = 0;
1412 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1413 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1414 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1415 pos_limit, start_pos, no_change);
1416 if (ts == AV_NOPTS_VALUE) {
1417 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1418 return -1;
1419 }
1420 assert(ts != AV_NOPTS_VALUE);
1421 if (target_ts <= ts) {
1422 pos_limit = start_pos - 1;
1423 pos_max = pos;
1424 ts_max = ts;
1425 }
1426 if (target_ts >= ts) {
1427 pos_min = pos;
1428 ts_min = ts;
1429 }
1430 }
1431
1432 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1433 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1434 pos_min = pos;
1435 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1436 pos_min++;
1437 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1438 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1439 pos, ts_min, target_ts, ts_max);
1440 *ts_ret = ts;
1441 return pos;
1442 }
1443
1444 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1445 int64_t pos, int flags)
1446 {
1447 int64_t pos_min, pos_max;
1448
1449 pos_min = s->internal->data_offset;
1450 pos_max = avio_size(s->pb) - 1;
1451
1452 if (pos < pos_min)
1453 pos = pos_min;
1454 else if (pos > pos_max)
1455 pos = pos_max;
1456
1457 avio_seek(s->pb, pos, SEEK_SET);
1458
1459 return 0;
1460 }
1461
1462 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1463 int64_t timestamp, int flags)
1464 {
1465 int index;
1466 int64_t ret;
1467 AVStream *st;
1468 AVIndexEntry *ie;
1469
1470 st = s->streams[stream_index];
1471
1472 index = av_index_search_timestamp(st, timestamp, flags);
1473
1474 if (index < 0 && st->nb_index_entries &&
1475 timestamp < st->index_entries[0].timestamp)
1476 return -1;
1477
1478 if (index < 0 || index == st->nb_index_entries - 1) {
1479 AVPacket pkt;
1480
1481 if (st->nb_index_entries) {
1482 assert(st->index_entries);
1483 ie = &st->index_entries[st->nb_index_entries - 1];
1484 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1485 return ret;
1486 ff_update_cur_dts(s, st, ie->timestamp);
1487 } else {
1488 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1489 return ret;
1490 }
1491 for (;;) {
1492 int read_status;
1493 do {
1494 read_status = av_read_frame(s, &pkt);
1495 } while (read_status == AVERROR(EAGAIN));
1496 if (read_status < 0)
1497 break;
1498 av_packet_unref(&pkt);
1499 if (stream_index == pkt.stream_index)
1500 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1501 break;
1502 }
1503 index = av_index_search_timestamp(st, timestamp, flags);
1504 }
1505 if (index < 0)
1506 return -1;
1507
1508 ff_read_frame_flush(s);
1509 if (s->iformat->read_seek)
1510 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1511 return 0;
1512 ie = &st->index_entries[index];
1513 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1514 return ret;
1515 ff_update_cur_dts(s, st, ie->timestamp);
1516
1517 return 0;
1518 }
1519
1520 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1521 int64_t timestamp, int flags)
1522 {
1523 int ret;
1524 AVStream *st;
1525
1526 if (flags & AVSEEK_FLAG_BYTE) {
1527 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1528 return -1;
1529 ff_read_frame_flush(s);
1530 return seek_frame_byte(s, stream_index, timestamp, flags);
1531 }
1532
1533 if (stream_index < 0) {
1534 stream_index = av_find_default_stream_index(s);
1535 if (stream_index < 0)
1536 return -1;
1537
1538 st = s->streams[stream_index];
1539 /* timestamp for default must be expressed in AV_TIME_BASE units */
1540 timestamp = av_rescale(timestamp, st->time_base.den,
1541 AV_TIME_BASE * (int64_t) st->time_base.num);
1542 }
1543
1544 /* first, we try the format specific seek */
1545 if (s->iformat->read_seek) {
1546 ff_read_frame_flush(s);
1547 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1548 } else
1549 ret = -1;
1550 if (ret >= 0)
1551 return 0;
1552
1553 if (s->iformat->read_timestamp &&
1554 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1555 ff_read_frame_flush(s);
1556 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1557 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1558 ff_read_frame_flush(s);
1559 return seek_frame_generic(s, stream_index, timestamp, flags);
1560 } else
1561 return -1;
1562 }
1563
1564 int av_seek_frame(AVFormatContext *s, int stream_index,
1565 int64_t timestamp, int flags)
1566 {
1567 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1568
1569 if (ret >= 0)
1570 ret = queue_attached_pictures(s);
1571
1572 return ret;
1573 }
1574
1575 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1576 int64_t ts, int64_t max_ts, int flags)
1577 {
1578 if (min_ts > ts || max_ts < ts)
1579 return -1;
1580
1581 if (s->iformat->read_seek2) {
1582 int ret;
1583 ff_read_frame_flush(s);
1584 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1585 ts, max_ts, flags);
1586
1587 if (ret >= 0)
1588 ret = queue_attached_pictures(s);
1589 return ret;
1590 }
1591
1592 if (s->iformat->read_timestamp) {
1593 // try to seek via read_timestamp()
1594 }
1595
1596 // Fall back on old API if new is not implemented but old is.
1597 // Note the old API has somewhat different semantics.
1598 if (s->iformat->read_seek || 1)
1599 return av_seek_frame(s, stream_index, ts,
1600 flags | ((uint64_t) ts - min_ts >
1601 (uint64_t) max_ts - ts
1602 ? AVSEEK_FLAG_BACKWARD : 0));
1603
1604 // try some generic seek like seek_frame_generic() but with new ts semantics
1605 }
1606
1607 /*******************************************************/
1608
1609 /**
1610 * Return TRUE if the stream has accurate duration in any stream.
1611 *
1612 * @return TRUE if the stream has accurate duration for at least one component.
1613 */
1614 static int has_duration(AVFormatContext *ic)
1615 {
1616 int i;
1617 AVStream *st;
1618
1619 for (i = 0; i < ic->nb_streams; i++) {
1620 st = ic->streams[i];
1621 if (st->duration != AV_NOPTS_VALUE)
1622 return 1;
1623 }
1624 if (ic->duration != AV_NOPTS_VALUE)
1625 return 1;
1626 return 0;
1627 }
1628
1629 /**
1630 * Estimate the stream timings from the one of each components.
1631 *
1632 * Also computes the global bitrate if possible.
1633 */
1634 static void update_stream_timings(AVFormatContext *ic)
1635 {
1636 int64_t start_time, start_time1, end_time, end_time1;
1637 int64_t duration, duration1, filesize;
1638 int i;
1639 AVStream *st;
1640
1641 start_time = INT64_MAX;
1642 end_time = INT64_MIN;
1643 duration = INT64_MIN;
1644 for (i = 0; i < ic->nb_streams; i++) {
1645 st = ic->streams[i];
1646 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1647 start_time1 = av_rescale_q(st->start_time, st->time_base,
1648 AV_TIME_BASE_Q);
1649 start_time = FFMIN(start_time, start_time1);
1650 if (st->duration != AV_NOPTS_VALUE) {
1651 end_time1 = start_time1 +
1652 av_rescale_q(st->duration, st->time_base,
1653 AV_TIME_BASE_Q);
1654 end_time = FFMAX(end_time, end_time1);
1655 }
1656 }
1657 if (st->duration != AV_NOPTS_VALUE) {
1658 duration1 = av_rescale_q(st->duration, st->time_base,
1659 AV_TIME_BASE_Q);
1660 duration = FFMAX(duration, duration1);
1661 }
1662 }
1663 if (start_time != INT64_MAX) {
1664 ic->start_time = start_time;
1665 if (end_time != INT64_MIN)
1666 duration = FFMAX(duration, end_time - start_time);
1667 }
1668 if (duration != INT64_MIN) {
1669 ic->duration = duration;
1670 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1671 /* compute the bitrate */
1672 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1673 (double) ic->duration;
1674 }
1675 }
1676
1677 static void fill_all_stream_timings(AVFormatContext *ic)
1678 {
1679 int i;
1680 AVStream *st;
1681
1682 update_stream_timings(ic);
1683 for (i = 0; i < ic->nb_streams; i++) {
1684 st = ic->streams[i];
1685 if (st->start_time == AV_NOPTS_VALUE) {
1686 if (ic->start_time != AV_NOPTS_VALUE)
1687 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1688 st->time_base);
1689 if (ic->duration != AV_NOPTS_VALUE)
1690 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1691 st->time_base);
1692 }
1693 }
1694 }
1695
1696 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1697 {
1698 int64_t filesize, duration;
1699 int i;
1700 AVStream *st;
1701
1702 /* if bit_rate is already set, we believe it */
1703 if (ic->bit_rate <= 0) {
1704 int bit_rate = 0;
1705 for (i = 0; i < ic->nb_streams; i++) {
1706 st = ic->streams[i];
1707 if (st->codecpar->bit_rate > 0) {
1708 if (INT_MAX - st->codecpar->bit_rate < bit_rate) {
1709 bit_rate = 0;
1710 break;
1711 }
1712 bit_rate += st->codecpar->bit_rate;
1713 }
1714 }
1715 ic->bit_rate = bit_rate;
1716 }
1717
1718 /* if duration is already set, we believe it */
1719 if (ic->duration == AV_NOPTS_VALUE &&
1720 ic->bit_rate != 0) {
1721 filesize = ic->pb ? avio_size(ic->pb) : 0;
1722 if (filesize > 0) {
1723 for (i = 0; i < ic->nb_streams; i++) {
1724 st = ic->streams[i];
1725 duration = av_rescale(8 * filesize, st->time_base.den,
1726 ic->bit_rate *
1727 (int64_t) st->time_base.num);
1728 if (st->duration == AV_NOPTS_VALUE)
1729 st->duration = duration;
1730 }
1731 }
1732 }
1733 }
1734
1735 #define DURATION_MAX_READ_SIZE 250000
1736 #define DURATION_MAX_RETRY 3
1737
1738 /* only usable for MPEG-PS streams */
1739 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1740 {
1741 AVPacket pkt1, *pkt = &pkt1;
1742 AVStream *st;
1743 int read_size, i, ret;
1744 int64_t end_time;
1745 int64_t filesize, offset, duration;
1746 int retry = 0;
1747
1748 /* flush packet queue */
1749 flush_packet_queue(ic);
1750
1751 for (i = 0; i < ic->nb_streams; i++) {
1752 st = ic->streams[i];
1753 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1754 av_log(ic, AV_LOG_WARNING,
1755 "start time is not set in estimate_timings_from_pts\n");
1756
1757 if (st->parser) {
1758 av_parser_close(st->parser);
1759 st->parser = NULL;
1760 }
1761 }
1762
1763 /* estimate the end time (duration) */
1764 /* XXX: may need to support wrapping */
1765 filesize = ic->pb ? avio_size(ic->pb) : 0;
1766 end_time = AV_NOPTS_VALUE;
1767 do {
1768 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1769 if (offset < 0)
1770 offset = 0;
1771
1772 avio_seek(ic->pb, offset, SEEK_SET);
1773 read_size = 0;
1774 for (;;) {
1775 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1776 break;
1777
1778 do {
1779 ret = ff_read_packet(ic, pkt);
1780 } while (ret == AVERROR(EAGAIN));
1781 if (ret != 0)
1782 break;
1783 read_size += pkt->size;
1784 st = ic->streams[pkt->stream_index];
1785 if (pkt->pts != AV_NOPTS_VALUE &&
1786 (st->start_time != AV_NOPTS_VALUE ||
1787 st->first_dts != AV_NOPTS_VALUE)) {
1788 duration = end_time = pkt->pts;
1789 if (st->start_time != AV_NOPTS_VALUE)
1790 duration -= st->start_time;
1791 else
1792 duration -= st->first_dts;
1793 if (duration < 0)
1794 duration += 1LL << st->pts_wrap_bits;
1795 if (duration > 0) {
1796 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1797 st->duration = duration;
1798 }
1799 }
1800 av_packet_unref(pkt);
1801 }
1802 } while (end_time == AV_NOPTS_VALUE &&
1803 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1804 ++retry <= DURATION_MAX_RETRY);
1805
1806 fill_all_stream_timings(ic);
1807
1808 avio_seek(ic->pb, old_offset, SEEK_SET);
1809 for (i = 0; i < ic->nb_streams; i++) {
1810 st = ic->streams[i];
1811 st->cur_dts = st->first_dts;
1812 st->last_IP_pts = AV_NOPTS_VALUE;
1813 }
1814 }
1815
1816 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1817 {
1818 int64_t file_size;
1819
1820 /* get the file size, if possible */
1821 if (ic->iformat->flags & AVFMT_NOFILE) {
1822 file_size = 0;
1823 } else {
1824 file_size = avio_size(ic->pb);
1825 file_size = FFMAX(0, file_size);
1826 }
1827
1828 if ((!strcmp(ic->iformat->name, "mpeg") ||
1829 !strcmp(ic->iformat->name, "mpegts")) &&
1830 file_size && ic->pb->seekable) {
1831 /* get accurate estimate from the PTSes */
1832 estimate_timings_from_pts(ic, old_offset);
1833 } else if (has_duration(ic)) {
1834 /* at least one component has timings - we use them for all
1835 * the components */
1836 fill_all_stream_timings(ic);
1837 } else {
1838 av_log(ic, AV_LOG_WARNING,
1839 "Estimating duration from bitrate, this may be inaccurate\n");
1840 /* less precise: use bitrate info */
1841 estimate_timings_from_bit_rate(ic);
1842 }
1843 update_stream_timings(ic);
1844
1845 {
1846 int i;
1847 AVStream av_unused *st;
1848 for (i = 0; i < ic->nb_streams; i++) {
1849 st = ic->streams[i];
1850 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1851 (double) st->start_time / AV_TIME_BASE,
1852 (double) st->duration / AV_TIME_BASE);
1853 }
1854 av_log(ic, AV_LOG_TRACE,
1855 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1856 (double) ic->start_time / AV_TIME_BASE,
1857 (double) ic->duration / AV_TIME_BASE,
1858 ic->bit_rate / 1000);
1859 }
1860 }
1861
1862 static int has_codec_parameters(AVStream *st)
1863 {
1864 AVCodecContext *avctx = st->internal->avctx;
1865 int val;
1866
1867 switch (avctx->codec_type) {
1868 case AVMEDIA_TYPE_AUDIO:
1869 val = avctx->sample_rate && avctx->channels;
1870 if (st->info->found_decoder >= 0 &&
1871 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1872 return 0;
1873 break;
1874 case AVMEDIA_TYPE_VIDEO:
1875 val = avctx->width;
1876 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1877 return 0;
1878 break;
1879 default:
1880 val = 1;
1881 break;
1882 }
1883 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1884 }
1885
1886 static int has_decode_delay_been_guessed(AVStream *st)
1887 {
1888 return st->internal->avctx->codec_id != AV_CODEC_ID_H264 ||
1889 st->info->nb_decoded_frames >= 6;
1890 }
1891
1892 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1893 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt,
1894 AVDictionary **options)
1895 {
1896 AVCodecContext *avctx = st->internal->avctx;
1897 const AVCodec *codec;
1898 int got_picture = 1, ret = 0;
1899 AVFrame *frame = av_frame_alloc();
1900 AVPacket pkt = *avpkt;
1901
1902 if (!frame)
1903 return AVERROR(ENOMEM);
1904
1905 if (!avcodec_is_open(avctx) && !st->info->found_decoder) {
1906 AVDictionary *thread_opt = NULL;
1907
1908 #if FF_API_LAVF_AVCTX
1909 FF_DISABLE_DEPRECATION_WARNINGS
1910 codec = st->codec->codec ? st->codec->codec
1911 : avcodec_find_decoder(st->codecpar->codec_id);
1912 FF_ENABLE_DEPRECATION_WARNINGS
1913 #else
1914 codec = avcodec_find_decoder(st->codecpar->codec_id);
1915 #endif
1916
1917 if (!codec) {
1918 st->info->found_decoder = -1;
1919 ret = -1;
1920 goto fail;
1921 }
1922
1923 /* Force thread count to 1 since the H.264 decoder will not extract
1924 * SPS and PPS to extradata during multi-threaded decoding. */
1925 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1926 ret = avcodec_open2(avctx, codec, options ? options : &thread_opt);
1927 if (!options)
1928 av_dict_free(&thread_opt);
1929 if (ret < 0) {
1930 st->info->found_decoder = -1;
1931 goto fail;
1932 }
1933 st->info->found_decoder = 1;
1934 } else if (!st->info->found_decoder)
1935 st->info->found_decoder = 1;
1936
1937 if (st->info->found_decoder < 0) {
1938 ret = -1;
1939 goto fail;
1940 }
1941
1942 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1943 ret >= 0 &&
1944 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1945 (!st->codec_info_nb_frames &&
1946 (avctx->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1947 got_picture = 0;
1948 switch (avctx->codec_type) {
1949 case AVMEDIA_TYPE_VIDEO:
1950 ret = avcodec_decode_video2(avctx, frame,
1951 &got_picture, &pkt);
1952 break;
1953 case AVMEDIA_TYPE_AUDIO:
1954 ret = avcodec_decode_audio4(avctx, frame, &got_picture, &pkt);
1955 break;
1956 default:
1957 break;
1958 }
1959 if (ret >= 0) {
1960 if (got_picture)
1961 st->info->nb_decoded_frames++;
1962 pkt.data += ret;
1963 pkt.size -= ret;
1964 ret = got_picture;
1965 }
1966 }
1967
1968 fail:
1969 av_frame_free(&frame);
1970 return ret;
1971 }
1972
1973 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1974 {
1975 while (tags->id != AV_CODEC_ID_NONE) {
1976 if (tags->id == id)
1977 return tags->tag;
1978 tags++;
1979 }
1980 return 0;
1981 }
1982
1983 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1984 {
1985 int i;
1986 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1987 if (tag == tags[i].tag)
1988 return tags[i].id;
1989 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1990 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1991 return tags[i].id;
1992 return AV_CODEC_ID_NONE;
1993 }
1994
1995 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1996 {
1997 if (flt) {
1998 switch (bps) {
1999 case 32:
2000 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2001 case 64:
2002 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2003 default:
2004 return AV_CODEC_ID_NONE;
2005 }
2006 } else {
2007 bps >>= 3;
2008 if (sflags & (1 << (bps - 1))) {
2009 switch (bps) {
2010 case 1:
2011 return AV_CODEC_ID_PCM_S8;
2012 case 2:
2013 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2014 case 3:
2015 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2016 case 4:
2017 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2018 default:
2019 return AV_CODEC_ID_NONE;
2020 }
2021 } else {
2022 switch (bps) {
2023 case 1:
2024 return AV_CODEC_ID_PCM_U8;
2025 case 2:
2026 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2027 case 3:
2028 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2029 case 4:
2030 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2031 default:
2032 return AV_CODEC_ID_NONE;
2033 }
2034 }
2035 }
2036 }
2037
2038 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
2039 {
2040 int i;
2041 for (i = 0; tags && tags[i]; i++) {
2042 int tag = ff_codec_get_tag(tags[i], id);
2043 if (tag)
2044 return tag;
2045 }
2046 return 0;
2047 }
2048
2049 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2050 {
2051 int i;
2052 for (i = 0; tags && tags[i]; i++) {
2053 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2054 if (id != AV_CODEC_ID_NONE)
2055 return id;
2056 }
2057 return AV_CODEC_ID_NONE;
2058 }
2059
2060 static void compute_chapters_end(AVFormatContext *s)
2061 {
2062 unsigned int i, j;
2063 int64_t max_time = s->duration +
2064 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2065
2066 for (i = 0; i < s->nb_chapters; i++)
2067 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2068 AVChapter *ch = s->chapters[i];
2069 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2070 ch->time_base)
2071 : INT64_MAX;
2072
2073 for (j = 0; j < s->nb_chapters; j++) {
2074 AVChapter *ch1 = s->chapters[j];
2075 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2076 ch->time_base);
2077 if (j != i && next_start > ch->start && next_start < end)
2078 end = next_start;
2079 }
2080 ch->end = (end == INT64_MAX) ? ch->start : end;
2081 }
2082 }
2083
2084 static int get_std_framerate(int i)
2085 {
2086 if (i < 60 * 12)
2087 return (i + 1) * 1001;
2088 else
2089 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2090 }
2091
2092 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2093 {
2094 int i, count, ret, read_size, j;
2095 AVStream *st;
2096 AVCodecContext *avctx;
2097 AVPacket pkt1, *pkt;
2098 int64_t old_offset = avio_tell(ic->pb);
2099 // new streams might appear, no options for those
2100 int orig_nb_streams = ic->nb_streams;
2101
2102 for (i = 0; i < ic->nb_streams; i++) {
2103 const AVCodec *codec;
2104 AVDictionary *thread_opt = NULL;
2105 st = ic->streams[i];
2106 avctx = st->internal->avctx;
2107
2108 // only for the split stuff
2109 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2110 st->parser = av_parser_init(st->codecpar->codec_id);
2111 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2112 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2113 }
2114
2115 /* check if the caller has overridden the codec id */
2116 #if FF_API_LAVF_AVCTX
2117 FF_DISABLE_DEPRECATION_WARNINGS
2118 if (st->codec->codec_id != st->internal->orig_codec_id) {
2119 st->codecpar->codec_id = st->codec->codec_id;
2120 st->codecpar->codec_type = st->codec->codec_type;
2121 st->internal->orig_codec_id = st->codec->codec_id;
2122 }
2123 FF_ENABLE_DEPRECATION_WARNINGS
2124 #endif
2125 if (st->codecpar->codec_id != st->internal->orig_codec_id)
2126 st->internal->orig_codec_id = st->codecpar->codec_id;
2127
2128 ret = avcodec_parameters_to_context(avctx, st->codecpar);
2129 if (ret < 0)
2130 goto find_stream_info_err;
2131 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE &&
2132 st->codecpar->codec_id != AV_CODEC_ID_NONE)
2133 st->internal->avctx_inited = 1;
2134
2135 #if FF_API_LAVF_AVCTX
2136 FF_DISABLE_DEPRECATION_WARNINGS
2137 codec = st->codec->codec ? st->codec->codec
2138 : avcodec_find_decoder(st->codecpar->codec_id);
2139 FF_ENABLE_DEPRECATION_WARNINGS
2140 #else
2141 codec = avcodec_find_decoder(st->codecpar->codec_id);
2142 #endif
2143
2144 /* Force thread count to 1 since the H.264 decoder will not extract
2145 * SPS and PPS to extradata during multi-threaded decoding. */
2146 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2147
2148 /* Ensure that subtitle_header is properly set. */
2149 if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE
2150 && codec && !avctx->codec)
2151 avcodec_open2(avctx, codec,
2152 options ? &options[i] : &thread_opt);
2153
2154 // Try to just open decoders, in case this is enough to get parameters.
2155 if (!has_codec_parameters(st)) {
2156 if (codec && !avctx->codec)
2157 avcodec_open2(avctx, codec,
2158 options ? &options[i] : &thread_opt);
2159 }
2160 if (!options)
2161 av_dict_free(&thread_opt);
2162 }
2163
2164 for (i = 0; i < ic->nb_streams; i++) {
2165 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2166 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2167 }
2168
2169 count = 0;
2170 read_size = 0;
2171 for (;;) {
2172 if (ff_check_interrupt(&ic->interrupt_callback)) {
2173 ret = AVERROR_EXIT;
2174 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2175 break;
2176 }
2177
2178 /* check if one codec still needs to be handled */
2179 for (i = 0; i < ic->nb_streams; i++) {
2180 int fps_analyze_framecount = 20;
2181
2182 st = ic->streams[i];
2183 if (!has_codec_parameters(st))
2184 break;
2185 /* If the timebase is coarse (like the usual millisecond precision
2186 * of mkv), we need to analyze more frames to reliably arrive at
2187 * the correct fps. */
2188 if (av_q2d(st->time_base) > 0.0005)
2189 fps_analyze_framecount *= 2;
2190 if (ic->fps_probe_size >= 0)
2191 fps_analyze_framecount = ic->fps_probe_size;
2192 /* variable fps and no guess at the real fps */
2193 if (!st->avg_frame_rate.num &&
2194 st->codec_info_nb_frames < fps_analyze_framecount &&
2195 st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
2196 break;
2197 if (st->parser && st->parser->parser->split &&
2198 !st->codecpar->extradata)
2199 break;
2200 if (st->first_dts == AV_NOPTS_VALUE &&
2201 st->codec_info_nb_frames < ic->max_ts_probe &&
2202 (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
2203 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))
2204 break;
2205 }
2206 if (i == ic->nb_streams) {
2207 /* NOTE: If the format has no header, then we need to read some
2208 * packets to get most of the streams, so we cannot stop here. */
2209 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2210 /* If we found the info for all the codecs, we can stop. */
2211 ret = count;
2212 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2213 break;
2214 }
2215 }
2216 /* We did not get all the codec info, but we read too much data. */
2217 if (read_size >= ic->probesize) {
2218 ret = count;
2219 av_log(ic, AV_LOG_DEBUG,
2220 "Probe buffer size limit %d reached\n", ic->probesize);
2221 break;
2222 }
2223
2224 /* NOTE: A new stream can be added there if no header in file
2225 * (AVFMTCTX_NOHEADER). */
2226 ret = read_frame_internal(ic, &pkt1);
2227 if (ret == AVERROR(EAGAIN))
2228 continue;
2229
2230 if (ret < 0) {
2231 /* EOF or error*/
2232 AVPacket empty_pkt = { 0 };
2233 int err = 0;
2234 av_init_packet(&empty_pkt);
2235
2236 /* We could not have all the codec parameters before EOF. */
2237 ret = -1;
2238 for (i = 0; i < ic->nb_streams; i++) {
2239 st = ic->streams[i];
2240
2241 /* flush the decoders */
2242 if (st->info->found_decoder == 1) {
2243 do {
2244 err = try_decode_frame(ic, st, &empty_pkt,
2245 (options && i < orig_nb_streams)
2246 ? &options[i] : NULL);
2247 } while (err > 0 && !has_codec_parameters(st));
2248 }
2249
2250 if (err < 0) {
2251 av_log(ic, AV_LOG_WARNING,
2252 "decoding for stream %d failed\n", st->index);
2253 } else if (!has_codec_parameters(st)) {
2254 char buf[256];
2255 avcodec_string(buf, sizeof(buf), st->internal->avctx, 0);
2256 av_log(ic, AV_LOG_WARNING,
2257 "Could not find codec parameters (%s)\n", buf);
2258 } else {
2259 ret = 0;
2260 }
2261 }
2262 break;
2263 }
2264
2265 pkt = &pkt1;
2266
2267 if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
2268 ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt,
2269 &ic->internal->packet_buffer_end, 0);
2270 if (ret < 0)
2271 goto find_stream_info_err;
2272 }
2273
2274 read_size += pkt->size;
2275
2276 st = ic->streams[pkt->stream_index];
2277 avctx = st->internal->avctx;
2278 if (!st->internal->avctx_inited) {
2279 ret = avcodec_parameters_to_context(avctx, st->codecpar);
2280 if (ret < 0)
2281 goto find_stream_info_err;
2282 st->internal->avctx_inited = 1;
2283 }
2284
2285 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2286 /* check for non-increasing dts */
2287 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2288 st->info->fps_last_dts >= pkt->dts) {
2289 av_log(ic, AV_LOG_WARNING,
2290 "Non-increasing DTS in stream %d: packet %d with DTS "
2291 "%"PRId64", packet %d with DTS %"PRId64"\n",
2292 st->index, st->info->fps_last_dts_idx,
2293 st->info->fps_last_dts, st->codec_info_nb_frames,
2294 pkt->dts);
2295 st->info->fps_first_dts =
2296 st->info->fps_last_dts = AV_NOPTS_VALUE;
2297 }
2298 /* Check for a discontinuity in dts. If the difference in dts
2299 * is more than 1000 times the average packet duration in the
2300 * sequence, we treat it as a discontinuity. */
2301 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2302 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2303 (pkt->dts - st->info->fps_last_dts) / 1000 >
2304 (st->info->fps_last_dts - st->info->fps_first_dts) /
2305 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2306 av_log(ic, AV_LOG_WARNING,
2307 "DTS discontinuity in stream %d: packet %d with DTS "
2308 "%"PRId64", packet %d with DTS %"PRId64"\n",
2309 st->index, st->info->fps_last_dts_idx,
2310 st->info->fps_last_dts, st->codec_info_nb_frames,
2311 pkt->dts);
2312 st->info->fps_first_dts =
2313 st->info->fps_last_dts = AV_NOPTS_VALUE;
2314 }
2315
2316 /* update stored dts values */
2317 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2318 st->info->fps_first_dts = pkt->dts;
2319 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2320 }
2321 st->info->fps_last_dts = pkt->dts;
2322 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2323
2324 /* check max_analyze_duration */
2325 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2326 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2327 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2328 ic->max_analyze_duration);
2329 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2330 av_packet_unref(pkt);
2331 break;
2332 }
2333 }
2334 if (st->parser && st->parser->parser->split && !avctx->extradata) {
2335 int i = st->parser->parser->split(avctx, pkt->data, pkt->size);
2336 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2337 avctx->extradata_size = i;
2338 avctx->extradata = av_mallocz(avctx->extradata_size +
2339 AV_INPUT_BUFFER_PADDING_SIZE);
2340 if (!avctx->extradata)
2341 return AVERROR(ENOMEM);
2342 memcpy(avctx->extradata, pkt->data,
2343 avctx->extradata_size);
2344 }
2345 }
2346
2347 /* If still no information, we try to open the codec and to
2348 * decompress the frame. We try to avoid that in most cases as
2349 * it takes longer and uses more memory. For MPEG-4, we need to
2350 * decompress for QuickTime.
2351 *
2352 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2353 * least one frame of codec data, this makes sure the codec initializes
2354 * the channel configuration and does not only trust the values from
2355 * the container. */
2356 try_decode_frame(ic, st, pkt,
2357 (options && i < orig_nb_streams) ? &options[i] : NULL);
2358
2359 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2360 av_packet_unref(pkt);
2361
2362 st->codec_info_nb_frames++;
2363 count++;
2364 }
2365
2366 // close codecs which were opened in try_decode_frame()
2367 for (i = 0; i < ic->nb_streams; i++) {
2368 st = ic->streams[i];
2369 avcodec_close(st->internal->avctx);
2370 }
2371 for (i = 0; i < ic->nb_streams; i++) {
2372 st = ic->streams[i];
2373 avctx = st->internal->avctx;
2374 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2375 /* estimate average framerate if not set by demuxer */
2376 if (!st->avg_frame_rate.num &&
2377 st->info->fps_last_dts != st->info->fps_first_dts) {
2378 int64_t delta_dts = st->info->fps_last_dts -
2379 st->info->fps_first_dts;
2380 int delta_packets = st->info->fps_last_dts_idx -
2381 st->info->fps_first_dts_idx;
2382 int best_fps = 0;
2383 double best_error = 0.01;
2384
2385 if (delta_dts >= INT64_MAX / st->time_base.num ||
2386 delta_packets >= INT64_MAX / st->time_base.den ||
2387 delta_dts < 0)
2388 continue;
2389 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2390 delta_packets * (int64_t) st->time_base.den,
2391 delta_dts * (int64_t) st->time_base.num, 60000);
2392
2393 /* Round guessed framerate to a "standard" framerate if it's
2394 * within 1% of the original estimate. */
2395 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2396 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2397 double error = fabs(av_q2d(st->avg_frame_rate) /
2398 av_q2d(std_fps) - 1);
2399
2400 if (error < best_error) {
2401 best_error = error;
2402 best_fps = std_fps.num;
2403 }
2404 }
2405 if (best_fps)
2406 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2407 best_fps, 12 * 1001, INT_MAX);
2408 }
2409 } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2410 if (!avctx->bits_per_coded_sample)
2411 avctx->bits_per_coded_sample =
2412 av_get_bits_per_sample(avctx->codec_id);
2413 // set stream disposition based on audio service type
2414 switch (avctx->audio_service_type) {
2415 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2416 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2417 break;
2418 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2419 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2420 break;
2421 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2422 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2423 break;
2424 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2425 st->disposition = AV_DISPOSITION_COMMENT;
2426 break;
2427 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2428 st->disposition = AV_DISPOSITION_KARAOKE;
2429 break;
2430 }
2431 }
2432 }
2433
2434 compute_chapters_end(ic);
2435
2436 /* update the stream parameters from the internal codec contexts */
2437 for (i = 0; i < ic->nb_streams; i++) {
2438 st = ic->streams[i];
2439 if (!st->internal->avctx_inited)
2440 continue;
2441
2442 ret = avcodec_parameters_from_context(st->codecpar, st->internal->avctx);
2443 if (ret < 0)
2444 goto find_stream_info_err;
2445
2446 #if FF_API_LAVF_AVCTX
2447 FF_DISABLE_DEPRECATION_WARNINGS
2448 ret = avcodec_parameters_to_context(st->codec, st->codecpar);
2449 if (ret < 0)
2450 goto find_stream_info_err;
2451
2452 if (st->internal->avctx->subtitle_header) {
2453 st->codec->subtitle_header = av_malloc(st->internal->avctx->subtitle_header_size);
2454 if (!st->codec->subtitle_header)
2455 goto find_stream_info_err;
2456 st->codec->subtitle_header_size = st->internal->avctx->subtitle_header_size;
2457 memcpy(st->codec->subtitle_header, st->internal->avctx->subtitle_header,
2458 st->codec->subtitle_header_size);
2459 }
2460 FF_ENABLE_DEPRECATION_WARNINGS
2461 #endif
2462
2463 st->internal->avctx_inited = 0;
2464 }
2465
2466 estimate_timings(ic, old_offset);
2467
2468 find_stream_info_err:
2469 for (i = 0; i < ic->nb_streams; i++) {
2470 av_freep(&ic->streams[i]->info);
2471 }
2472 return ret;
2473 }
2474
2475 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2476 {
2477 int i, j;
2478
2479 for (i = 0; i < ic->nb_programs; i++)
2480 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2481 if (ic->programs[i]->stream_index[j] == s)
2482 return ic->programs[i];
2483 return NULL;
2484 }
2485
2486 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2487 int wanted_stream_nb, int related_stream,
2488 AVCodec **decoder_ret, int flags)
2489 {
2490 int i, nb_streams = ic->nb_streams;
2491 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2492 unsigned *program = NULL;
2493 AVCodec *decoder = NULL, *best_decoder = NULL;
2494
2495 if (related_stream >= 0 && wanted_stream_nb < 0) {
2496 AVProgram *p = find_program_from_stream(ic, related_stream);
2497 if (p) {
2498 program = p->stream_index;
2499 nb_streams = p->nb_stream_indexes;
2500 }
2501 }
2502 for (i = 0; i < nb_streams; i++) {
2503 int real_stream_index = program ? program[i] : i;
2504 AVStream *st = ic->streams[real_stream_index];
2505 AVCodecParameters *par = st->codecpar;
2506 if (par->codec_type != type)
2507 continue;
2508 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2509 continue;
2510 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2511 AV_DISPOSITION_VISUAL_IMPAIRED))
2512 continue;
2513 if (decoder_ret) {
2514 decoder = avcodec_find_decoder(par->codec_id);
2515 if (!decoder) {
2516 if (ret < 0)
2517 ret = AVERROR_DECODER_NOT_FOUND;
2518 continue;
2519 }
2520 }
2521 if (best_count >= st->codec_info_nb_frames)
2522 continue;
2523 best_count = st->codec_info_nb_frames;
2524 ret = real_stream_index;
2525 best_decoder = decoder;
2526 if (program && i == nb_streams - 1 && ret < 0) {
2527 program = NULL;
2528 nb_streams = ic->nb_streams;
2529 /* no related stream found, try again with everything */
2530 i = 0;
2531 }
2532 }
2533 if (decoder_ret)
2534 *decoder_ret = best_decoder;
2535 return ret;
2536 }
2537
2538 /*******************************************************/
2539
2540 int av_read_play(AVFormatContext *s)
2541 {
2542 if (s->iformat->read_play)
2543 return s->iformat->read_play(s);
2544 if (s->pb)
2545 return avio_pause(s->pb, 0);
2546 return AVERROR(ENOSYS);
2547 }
2548
2549 int av_read_pause(AVFormatContext *s)
2550 {
2551 if (s->iformat->read_pause)
2552 return s->iformat->read_pause(s);
2553 if (s->pb)
2554 return avio_pause(s->pb, 1);
2555 return AVERROR(ENOSYS);
2556 }
2557
2558 static void free_stream(AVStream **pst)
2559 {
2560 AVStream *st = *pst;
2561 int i;
2562
2563 if (!st)
2564 return;
2565
2566 for (i = 0; i < st->nb_side_data; i++)
2567 av_freep(&st->side_data[i].data);
2568 av_freep(&st->side_data);
2569
2570 if (st->parser)
2571 av_parser_close(st->parser);
2572
2573 if (st->attached_pic.data)
2574 av_packet_unref(&st->attached_pic);
2575
2576 if (st->internal) {
2577 avcodec_free_context(&st->internal->avctx);
2578 }
2579 av_freep(&st->internal);
2580
2581 av_dict_free(&st->metadata);
2582 avcodec_parameters_free(&st->codecpar);
2583 av_freep(&st->probe_data.buf);
2584 av_free(st->index_entries);
2585 #if FF_API_LAVF_AVCTX
2586 FF_DISABLE_DEPRECATION_WARNINGS
2587 av_free(st->codec->extradata);
2588 av_free(st->codec->subtitle_header);
2589 av_free(st->codec);
2590 FF_ENABLE_DEPRECATION_WARNINGS
2591 #endif
2592 av_free(st->priv_data);
2593 av_free(st->info);
2594
2595 av_freep(pst);
2596 }
2597
2598 void avformat_free_context(AVFormatContext *s)
2599 {
2600 int i;
2601
2602 if (!s)
2603 return;
2604
2605 av_opt_free(s);
2606 if (s->iformat && s->iformat->priv_class && s->priv_data)
2607 av_opt_free(s->priv_data);
2608
2609 for (i = 0; i < s->nb_streams; i++)
2610 free_stream(&s->streams[i]);
2611
2612 for (i = s->nb_programs - 1; i >= 0; i--) {
2613 av_dict_free(&s->programs[i]->metadata);
2614 av_freep(&s->programs[i]->stream_index);
2615 av_freep(&s->programs[i]);
2616 }
2617 av_freep(&s->programs);
2618 av_freep(&s->priv_data);
2619 while (s->nb_chapters--) {
2620 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2621 av_free(s->chapters[s->nb_chapters]);
2622 }
2623 av_freep(&s->chapters);
2624 av_dict_free(&s->metadata);
2625 av_freep(&s->streams);
2626 av_freep(&s->internal);
2627 av_free(s);
2628 }
2629
2630 void avformat_close_input(AVFormatContext **ps)
2631 {
2632 AVFormatContext *s = *ps;
2633 AVIOContext *pb = s->pb;
2634
2635 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2636 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2637 pb = NULL;
2638
2639 flush_packet_queue(s);
2640
2641 if (s->iformat)
2642 if (s->iformat->read_close)
2643 s->iformat->read_close(s);
2644
2645 avformat_free_context(s);
2646
2647 *ps = NULL;
2648
2649 avio_close(pb);
2650 }
2651
2652 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2653 {
2654 AVStream *st;
2655 int i;
2656
2657 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2658 sizeof(*s->streams)) < 0) {
2659 s->nb_streams = 0;
2660 return NULL;
2661 }
2662
2663 st = av_mallocz(sizeof(AVStream));
2664 if (!st)
2665 return NULL;
2666 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2667 av_free(st);
2668 return NULL;
2669 }
2670
2671 #if FF_API_LAVF_AVCTX
2672 FF_DISABLE_DEPRECATION_WARNINGS
2673 st->codec = avcodec_alloc_context3(c);
2674 if (!st->codec) {
2675 av_free(st->info);
2676 av_free(st);
2677 return NULL;
2678 }
2679 FF_ENABLE_DEPRECATION_WARNINGS
2680 #endif
2681
2682 st->internal = av_mallocz(sizeof(*st->internal));
2683 if (!st->internal)
2684 goto fail;
2685
2686 if (s->iformat) {
2687 #if FF_API_LAVF_AVCTX
2688 FF_DISABLE_DEPRECATION_WARNINGS
2689 /* no default bitrate if decoding */
2690 st->codec->bit_rate = 0;
2691 FF_ENABLE_DEPRECATION_WARNINGS
2692 #endif
2693
2694 /* default pts setting is MPEG-like */
2695 avpriv_set_pts_info(st, 33, 1, 90000);
2696 /* we set the current DTS to 0 so that formats without any timestamps
2697 * but durations get some timestamps, formats with some unknown
2698 * timestamps have their first few packets buffered and the
2699 * timestamps corrected before they are returned to the user */
2700 st->cur_dts = 0;
2701 } else {
2702 st->cur_dts = AV_NOPTS_VALUE;
2703 }
2704
2705 st->codecpar = avcodec_parameters_alloc();
2706 if (!st->codecpar)
2707 goto fail;
2708
2709 st->internal->avctx = avcodec_alloc_context3(NULL);
2710 if (!st->internal->avctx)
2711 goto fail;
2712
2713 st->index = s->nb_streams;
2714 st->start_time = AV_NOPTS_VALUE;
2715 st->duration = AV_NOPTS_VALUE;
2716 st->first_dts = AV_NOPTS_VALUE;
2717 st->probe_packets = MAX_PROBE_PACKETS;
2718
2719 st->last_IP_pts = AV_NOPTS_VALUE;
2720 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2721 st->pts_buffer[i] = AV_NOPTS_VALUE;
2722
2723 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2724
2725 st->info->fps_first_dts = AV_NOPTS_VALUE;
2726 st->info->fps_last_dts = AV_NOPTS_VALUE;
2727
2728 #if FF_API_LAVF_AVCTX
2729 st->internal->need_codec_update = 1;
2730 #endif
2731
2732 s->streams[s->nb_streams++] = st;
2733 return st;
2734 fail:
2735 free_stream(&st);
2736 return NULL;
2737 }
2738
2739 AVProgram *av_new_program(AVFormatContext *ac, int id)
2740 {
2741 AVProgram *program = NULL;
2742 int i;
2743
2744 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2745
2746 for (i = 0; i < ac->nb_programs; i++)
2747 if (ac->programs[i]->id == id)
2748 program = ac->programs[i];
2749
2750 if (!program) {
2751 program = av_mallocz(sizeof(AVProgram));
2752 if (!program)
2753 return NULL;
2754 dynarray_add(&ac->programs, &ac->nb_programs, program);
2755 program->discard = AVDISCARD_NONE;
2756 }
2757 program->id = id;
2758
2759 return program;
2760 }
2761
2762 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2763 int64_t start, int64_t end, const char *title)
2764 {
2765 AVChapter *chapter = NULL;
2766 int i;
2767
2768 for (i = 0; i < s->nb_chapters; i++)
2769 if (s->chapters[i]->id == id)
2770 chapter = s->chapters[i];
2771
2772 if (!chapter) {
2773 chapter = av_mallocz(sizeof(AVChapter));
2774 if (!chapter)
2775 return NULL;
2776 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2777 }
2778 av_dict_set(&chapter->metadata, "title", title, 0);
2779 chapter->id = id;
2780 chapter->time_base = time_base;
2781 chapter->start = start;
2782 chapter->end = end;
2783
2784 return chapter;
2785 }
2786
2787 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2788 {
2789 int i, j;
2790 AVProgram *program = NULL;
2791
2792 if (idx >= ac->nb_streams) {
2793 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2794 return;
2795 }
2796
2797 for (i = 0; i < ac->nb_programs; i++) {
2798 if (ac->programs[i]->id != progid)
2799 continue;
2800 program = ac->programs[i];
2801 for (j = 0; j < program->nb_stream_indexes; j++)
2802 if (program->stream_index[j] == idx)
2803 return;
2804
2805 if (av_reallocp_array(&program->stream_index,
2806 program->nb_stream_indexes + 1,
2807 sizeof(*program->stream_index)) < 0) {
2808 program->nb_stream_indexes = 0;
2809 return;
2810 }
2811 program->stream_index[program->nb_stream_indexes++] = idx;
2812 return;
2813 }
2814 }
2815
2816 uint64_t ff_ntp_time(void)
2817 {
2818 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2819 }
2820
2821 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2822 {
2823 const char *p;
2824 char *q, buf1[20], c;
2825 int nd, len, percentd_found;
2826
2827 q = buf;
2828 p = path;
2829 percentd_found = 0;
2830 for (;;) {
2831 c = *p++;
2832 if (c == '\0')
2833 break;
2834 if (c == '%') {
2835 do {
2836 nd = 0;
2837 while (av_isdigit(*p))
2838 nd = nd * 10 + *p++ - '0';
2839 c = *p++;
2840 } while (av_isdigit(c));
2841
2842 switch (c) {
2843 case '%':
2844 goto addchar;
2845 case 'd':
2846 if (percentd_found)
2847 goto fail;
2848 percentd_found = 1;
2849 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2850 len = strlen(buf1);
2851 if ((q - buf + len) > buf_size - 1)
2852 goto fail;
2853 memcpy(q, buf1, len);
2854 q += len;
2855 break;
2856 default:
2857 goto fail;
2858 }
2859 } else {
2860 addchar:
2861 if ((q - buf) < buf_size - 1)
2862 *q++ = c;
2863 }
2864 }
2865 if (!percentd_found)
2866 goto fail;
2867 *q = '\0';
2868 return 0;
2869 fail:
2870 *q = '\0';
2871 return -1;
2872 }
2873
2874 void av_url_split(char *proto, int proto_size,
2875 char *authorization, int authorization_size,
2876 char *hostname, int hostname_size,
2877 int *port_ptr, char *path, int path_size, const char *url)
2878 {
2879 const char *p, *ls, *at, *col, *brk;
2880
2881 if (port_ptr)
2882 *port_ptr = -1;
2883 if (proto_size > 0)
2884 proto[0] = 0;
2885 if (authorization_size > 0)
2886 authorization[0] = 0;
2887 if (hostname_size > 0)
2888 hostname[0] = 0;
2889 if (path_size > 0)
2890 path[0] = 0;
2891
2892 /* parse protocol */
2893 if ((p = strchr(url, ':'))) {
2894 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2895 p++; /* skip ':' */
2896 if (*p == '/')
2897 p++;
2898 if (*p == '/')
2899 p++;
2900 } else {
2901 /* no protocol means plain filename */
2902 av_strlcpy(path, url, path_size);
2903 return;
2904 }
2905
2906 /* separate path from hostname */
2907 ls = strchr(p, '/');
2908 if (!ls)
2909 ls = strchr(p, '?');
2910 if (ls)
2911 av_strlcpy(path, ls, path_size);
2912 else
2913 ls = &p[strlen(p)]; // XXX
2914
2915 /* the rest is hostname, use that to parse auth/port */
2916 if (ls != p) {
2917 /* authorization (user[:pass]@hostname) */
2918 if ((at = strchr(p, '@')) && at < ls) {
2919 av_strlcpy(authorization, p,
2920 FFMIN(authorization_size, at + 1 - p));
2921 p = at + 1; /* skip '@' */
2922 }
2923
2924 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2925 /* [host]:port */
2926 av_strlcpy(hostname, p + 1,
2927 FFMIN(hostname_size, brk - p));
2928 if (brk[1] == ':' && port_ptr)
2929 *port_ptr = atoi(brk + 2);
2930 } else if ((col = strchr(p, ':')) && col < ls) {
2931 av_strlcpy(hostname, p,
2932 FFMIN(col + 1 - p, hostname_size));
2933 if (port_ptr)
2934 *port_ptr = atoi(col + 1);
2935 } else
2936 av_strlcpy(hostname, p,
2937 FFMIN(ls + 1 - p, hostname_size));
2938 }
2939 }
2940
2941 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2942 {
2943 int i;
2944 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2945 '4', '5', '6', '7',
2946 '8', '9', 'A', 'B',
2947 'C', 'D', 'E', 'F' };
2948 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2949 '4', '5', '6', '7',
2950 '8', '9', 'a', 'b',
2951 'c', 'd', 'e', 'f' };
2952 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2953
2954 for (i = 0; i < s; i++) {
2955 buff[i * 2] = hex_table[src[i] >> 4];
2956 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2957 }
2958
2959 return buff;
2960 }
2961
2962 int ff_hex_to_data(uint8_t *data, const char *p)
2963 {
2964 int c, len, v;
2965
2966 len = 0;
2967 v = 1;
2968 for (;;) {
2969 p += strspn(p, SPACE_CHARS);
2970 if (*p == '\0')
2971 break;
2972 c = av_toupper((unsigned char) *p++);
2973 if (c >= '0' && c <= '9')
2974 c = c - '0';
2975 else if (c >= 'A' && c <= 'F')
2976 c = c - 'A' + 10;
2977 else
2978 break;
2979 v = (v << 4) | c;
2980 if (v & 0x100) {
2981 if (data)
2982 data[len] = v;
2983 len++;
2984 v = 1;
2985 }
2986 }
2987 return len;
2988 }
2989
2990 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2991 unsigned int pts_num, unsigned int pts_den)
2992 {
2993 AVRational new_tb;
2994 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2995 if (new_tb.num != pts_num)
2996 av_log(NULL, AV_LOG_DEBUG,
2997 "st:%d removing common factor %d from timebase\n",
2998 s->index, pts_num / new_tb.num);
2999 } else
3000 av_log(NULL, AV_LOG_WARNING,
3001 "st:%d has too large timebase, reducing\n", s->index);
3002
3003 if (new_tb.num <= 0 || new_tb.den <= 0) {
3004 av_log(NULL, AV_LOG_ERROR,
3005 "Ignoring attempt to set invalid timebase for st:%d\n",
3006 s->index);
3007 return;
3008 }
3009 s->time_base = new_tb;
3010 s->pts_wrap_bits = pts_wrap_bits;
3011 }
3012
3013 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3014 void *context)
3015 {
3016 const char *ptr = str;
3017
3018 /* Parse key=value pairs. */
3019 for (;;) {
3020 const char *key;
3021 char *dest = NULL, *dest_end;
3022 int key_len, dest_len = 0;
3023
3024 /* Skip whitespace and potential commas. */
3025 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3026 ptr++;
3027 if (!*ptr)
3028 break;
3029
3030 key = ptr;
3031
3032 if (!(ptr = strchr(key, '=')))
3033 break;
3034 ptr++;
3035 key_len = ptr - key;
3036
3037 callback_get_buf(context, key, key_len, &dest, &dest_len);
3038 dest_end = dest + dest_len - 1;
3039
3040 if (*ptr == '\"') {
3041 ptr++;
3042 while (*ptr && *ptr != '\"') {
3043 if (*ptr == '\\') {
3044 if (!ptr[1])
3045 break;
3046 if (dest && dest < dest_end)
3047 *dest++ = ptr[1];
3048 ptr += 2;
3049 } else {
3050 if (dest && dest < dest_end)
3051 *dest++ = *ptr;
3052 ptr++;
3053 }
3054 }
3055 if (*ptr == '\"')
3056 ptr++;
3057 } else {
3058 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3059 if (dest && dest < dest_end)
3060 *dest++ = *ptr;
3061 }
3062 if (dest)
3063 *dest = 0;
3064 }
3065 }
3066
3067 int ff_find_stream_index(AVFormatContext *s, int id)
3068 {
3069 int i;
3070 for (i = 0; i < s->nb_streams; i++)
3071 if (s->streams[i]->id == id)
3072 return i;
3073 return -1;
3074 }
3075
3076 int64_t ff_iso8601_to_unix_time(const char *datestr)
3077 {
3078 struct tm time1 = { 0 }, time2 = { 0 };
3079 const char *ret1, *ret2;
3080 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
3081 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
3082 if (ret2 && !ret1)
3083 return av_timegm(&time2);
3084 else
3085 return av_timegm(&time1);
3086 }
3087
3088 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
3089 int std_compliance)
3090 {
3091 if (ofmt) {
3092 if (ofmt->query_codec)
3093 return ofmt->query_codec(codec_id, std_compliance);
3094 else if (ofmt->codec_tag)
3095 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3096 else if (codec_id == ofmt->video_codec ||
3097 codec_id == ofmt->audio_codec ||
3098 codec_id == ofmt->subtitle_codec)
3099 return 1;
3100 }
3101 return AVERROR_PATCHWELCOME;
3102 }
3103
3104 int avformat_network_init(void)
3105 {
3106 #if CONFIG_NETWORK
3107 int ret;
3108 ff_network_inited_globally = 1;
3109 if ((ret = ff_network_init()) < 0)
3110 return ret;
3111 ff_tls_init();
3112 #endif
3113 return 0;
3114 }
3115
3116 int avformat_network_deinit(void)
3117 {
3118 #if CONFIG_NETWORK
3119 ff_network_close();
3120 ff_tls_deinit();
3121 #endif
3122 return 0;
3123 }
3124
3125 int ff_add_param_change(AVPacket *pkt, int32_t channels,
3126 uint64_t channel_layout, int32_t sample_rate,
3127 int32_t width, int32_t height)
3128 {
3129 uint32_t flags = 0;
3130 int size = 4;
3131 uint8_t *data;
3132 if (!pkt)
3133 return AVERROR(EINVAL);
3134 if (channels) {
3135 size += 4;
3136 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
3137 }
3138 if (channel_layout) {
3139 size += 8;
3140 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
3141 }
3142 if (sample_rate) {
3143 size += 4;
3144 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
3145 }
3146 if (width || height) {
3147 size += 8;
3148 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
3149 }
3150 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
3151 if (!data)
3152 return AVERROR(ENOMEM);
3153 bytestream_put_le32(&data, flags);
3154 if (channels)
3155 bytestream_put_le32(&data, channels);
3156 if (channel_layout)
3157 bytestream_put_le64(&data, channel_layout);
3158 if (sample_rate)
3159 bytestream_put_le32(&data, sample_rate);
3160 if (width || height) {
3161 bytestream_put_le32(&data, width);
3162 bytestream_put_le32(&data, height);
3163 }
3164 return 0;
3165 }
3166
3167 int ff_generate_avci_extradata(AVStream *st)
3168 {
3169 static const uint8_t avci100_1080p_extradata[] = {
3170 // SPS
3171 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3172 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3173 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3174 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3175 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3176 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3177 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3178 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3179 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3180 // PPS
3181 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3182 0xd0
3183 };
3184 static const uint8_t avci100_1080i_extradata[] = {
3185 // SPS
3186 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3187 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3188 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3189 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3190 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3191 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3192 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3193 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3194 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3195 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3196 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3197 // PPS
3198 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3199 0xd0
3200 };
3201 static const uint8_t avci50_1080i_extradata[] = {
3202 // SPS
3203 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3204 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3205 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3206 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3207 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3208 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3209 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3210 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3211 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3212 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3213 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3214 // PPS
3215 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3216 0x11
3217 };
3218 static const uint8_t avci100_720p_extradata[] = {
3219 // SPS
3220 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3221 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3222 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3223 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3224 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3225 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3226 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3227 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3228 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3229 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3230 // PPS
3231 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3232 0x11
3233 };
3234
3235 const uint8_t *data = NULL;
3236 int size = 0;
3237
3238 if (st->codecpar->width == 1920) {
3239 if (st->codecpar->field_order == AV_FIELD_PROGRESSIVE) {
3240 data = avci100_1080p_extradata;
3241 size = sizeof(avci100_1080p_extradata);
3242 } else {
3243 data = avci100_1080i_extradata;
3244 size = sizeof(avci100_1080i_extradata);
3245 }
3246 } else if (st->codecpar->width == 1440) {
3247 data = avci50_1080i_extradata;
3248 size = sizeof(avci50_1080i_extradata);
3249 } else if (st->codecpar->width == 1280) {
3250 data = avci100_720p_extradata;
3251 size = sizeof(avci100_720p_extradata);
3252 }
3253
3254 if (!size)
3255 return 0;
3256
3257 av_freep(&st->codecpar->extradata);
3258 st->codecpar->extradata_size = 0;
3259 st->codecpar->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3260 if (!st->codecpar->extradata)
3261 return AVERROR(ENOMEM);
3262
3263 memcpy(st->codecpar->extradata, data, size);
3264 st->codecpar->extradata_size = size;
3265
3266 return 0;
3267 }
3268
3269 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3270 int *size)
3271 {
3272 int i;
3273
3274 for (i = 0; i < st->nb_side_data; i++) {
3275 if (st->side_data[i].type == type) {
3276 if (size)
3277 *size = st->side_data[i].size;
3278 return st->side_data[i].data;
3279 }
3280 }
3281 return NULL;
3282 }
3283
3284 uint8_t *av_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3285 int size)
3286 {
3287 AVPacketSideData *sd, *tmp;
3288 int i;
3289 uint8_t *data = av_malloc(size);
3290
3291 if (!data)
3292 return NULL;
3293
3294 for (i = 0; i < st->nb_side_data; i++) {
3295 sd = &st->side_data[i];
3296
3297 if (sd->type == type) {
3298 av_freep(&sd->data);
3299 sd->data = data;
3300 sd->size = size;
3301 return sd->data;
3302 }
3303 }
3304
3305 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3306 if (!tmp) {
3307 av_freep(&data);
3308 return NULL;
3309 }
3310
3311 st->side_data = tmp;
3312 st->nb_side_data++;
3313
3314 sd = &st->side_data[st->nb_side_data - 1];
3315 sd->type = type;
3316 sd->data = data;
3317 sd->size = size;
3318 return data;
3319 }
3320
3321 void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
3322 {
3323 if (*pb)
3324 s->io_close(s, *pb);
3325 *pb = NULL;
3326 }