lavf: deprecate av_read_packet().
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* #define DEBUG */
23
24 #include "avformat.h"
25 #include "avio_internal.h"
26 #include "internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/bytestream.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/dict.h"
31 #include "libavutil/pixdesc.h"
32 #include "metadata.h"
33 #include "id3v2.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/avstring.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/parseutils.h"
38 #include "riff.h"
39 #include "audiointerleave.h"
40 #include "url.h"
41 #include <sys/time.h>
42 #include <time.h>
43 #include <stdarg.h>
44 #if CONFIG_NETWORK
45 #include "network.h"
46 #endif
47
48 #undef NDEBUG
49 #include <assert.h>
50
51 /**
52 * @file
53 * various utility functions for use within Libav
54 */
55
56 unsigned avformat_version(void)
57 {
58 return LIBAVFORMAT_VERSION_INT;
59 }
60
61 const char *avformat_configuration(void)
62 {
63 return LIBAV_CONFIGURATION;
64 }
65
66 const char *avformat_license(void)
67 {
68 #define LICENSE_PREFIX "libavformat license: "
69 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
70 }
71
72 /* fraction handling */
73
74 /**
75 * f = val + (num / den) + 0.5.
76 *
77 * 'num' is normalized so that it is such as 0 <= num < den.
78 *
79 * @param f fractional number
80 * @param val integer value
81 * @param num must be >= 0
82 * @param den must be >= 1
83 */
84 static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
85 {
86 num += (den >> 1);
87 if (num >= den) {
88 val += num / den;
89 num = num % den;
90 }
91 f->val = val;
92 f->num = num;
93 f->den = den;
94 }
95
96 /**
97 * Fractional addition to f: f = f + (incr / f->den).
98 *
99 * @param f fractional number
100 * @param incr increment, can be positive or negative
101 */
102 static void frac_add(AVFrac *f, int64_t incr)
103 {
104 int64_t num, den;
105
106 num = f->num + incr;
107 den = f->den;
108 if (num < 0) {
109 f->val += num / den;
110 num = num % den;
111 if (num < 0) {
112 num += den;
113 f->val--;
114 }
115 } else if (num >= den) {
116 f->val += num / den;
117 num = num % den;
118 }
119 f->num = num;
120 }
121
122 /** head of registered input format linked list */
123 static AVInputFormat *first_iformat = NULL;
124 /** head of registered output format linked list */
125 static AVOutputFormat *first_oformat = NULL;
126
127 AVInputFormat *av_iformat_next(AVInputFormat *f)
128 {
129 if(f) return f->next;
130 else return first_iformat;
131 }
132
133 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
134 {
135 if(f) return f->next;
136 else return first_oformat;
137 }
138
139 void av_register_input_format(AVInputFormat *format)
140 {
141 AVInputFormat **p;
142 p = &first_iformat;
143 while (*p != NULL) p = &(*p)->next;
144 *p = format;
145 format->next = NULL;
146 }
147
148 void av_register_output_format(AVOutputFormat *format)
149 {
150 AVOutputFormat **p;
151 p = &first_oformat;
152 while (*p != NULL) p = &(*p)->next;
153 *p = format;
154 format->next = NULL;
155 }
156
157 int av_match_ext(const char *filename, const char *extensions)
158 {
159 const char *ext, *p;
160 char ext1[32], *q;
161
162 if(!filename)
163 return 0;
164
165 ext = strrchr(filename, '.');
166 if (ext) {
167 ext++;
168 p = extensions;
169 for(;;) {
170 q = ext1;
171 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
172 *q++ = *p++;
173 *q = '\0';
174 if (!av_strcasecmp(ext1, ext))
175 return 1;
176 if (*p == '\0')
177 break;
178 p++;
179 }
180 }
181 return 0;
182 }
183
184 static int match_format(const char *name, const char *names)
185 {
186 const char *p;
187 int len, namelen;
188
189 if (!name || !names)
190 return 0;
191
192 namelen = strlen(name);
193 while ((p = strchr(names, ','))) {
194 len = FFMAX(p - names, namelen);
195 if (!av_strncasecmp(name, names, len))
196 return 1;
197 names = p+1;
198 }
199 return !av_strcasecmp(name, names);
200 }
201
202 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
203 const char *mime_type)
204 {
205 AVOutputFormat *fmt = NULL, *fmt_found;
206 int score_max, score;
207
208 /* specific test for image sequences */
209 #if CONFIG_IMAGE2_MUXER
210 if (!short_name && filename &&
211 av_filename_number_test(filename) &&
212 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
213 return av_guess_format("image2", NULL, NULL);
214 }
215 #endif
216 /* Find the proper file type. */
217 fmt_found = NULL;
218 score_max = 0;
219 while ((fmt = av_oformat_next(fmt))) {
220 score = 0;
221 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
222 score += 100;
223 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
224 score += 10;
225 if (filename && fmt->extensions &&
226 av_match_ext(filename, fmt->extensions)) {
227 score += 5;
228 }
229 if (score > score_max) {
230 score_max = score;
231 fmt_found = fmt;
232 }
233 }
234 return fmt_found;
235 }
236
237 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
238 const char *filename, const char *mime_type, enum AVMediaType type){
239 if(type == AVMEDIA_TYPE_VIDEO){
240 enum CodecID codec_id= CODEC_ID_NONE;
241
242 #if CONFIG_IMAGE2_MUXER
243 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
244 codec_id= ff_guess_image2_codec(filename);
245 }
246 #endif
247 if(codec_id == CODEC_ID_NONE)
248 codec_id= fmt->video_codec;
249 return codec_id;
250 }else if(type == AVMEDIA_TYPE_AUDIO)
251 return fmt->audio_codec;
252 else if (type == AVMEDIA_TYPE_SUBTITLE)
253 return fmt->subtitle_codec;
254 else
255 return CODEC_ID_NONE;
256 }
257
258 AVInputFormat *av_find_input_format(const char *short_name)
259 {
260 AVInputFormat *fmt = NULL;
261 while ((fmt = av_iformat_next(fmt))) {
262 if (match_format(short_name, fmt->name))
263 return fmt;
264 }
265 return NULL;
266 }
267
268
269 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
270 {
271 int ret= av_new_packet(pkt, size);
272
273 if(ret<0)
274 return ret;
275
276 pkt->pos= avio_tell(s);
277
278 ret= avio_read(s, pkt->data, size);
279 if(ret<=0)
280 av_free_packet(pkt);
281 else
282 av_shrink_packet(pkt, ret);
283
284 return ret;
285 }
286
287 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
288 {
289 int ret;
290 int old_size;
291 if (!pkt->size)
292 return av_get_packet(s, pkt, size);
293 old_size = pkt->size;
294 ret = av_grow_packet(pkt, size);
295 if (ret < 0)
296 return ret;
297 ret = avio_read(s, pkt->data + old_size, size);
298 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
299 return ret;
300 }
301
302
303 int av_filename_number_test(const char *filename)
304 {
305 char buf[1024];
306 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
307 }
308
309 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
310 {
311 AVProbeData lpd = *pd;
312 AVInputFormat *fmt1 = NULL, *fmt;
313 int score, id3 = 0;
314
315 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
316 int id3len = ff_id3v2_tag_len(lpd.buf);
317 if (lpd.buf_size > id3len + 16) {
318 lpd.buf += id3len;
319 lpd.buf_size -= id3len;
320 }
321 id3 = 1;
322 }
323
324 fmt = NULL;
325 while ((fmt1 = av_iformat_next(fmt1))) {
326 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
327 continue;
328 score = 0;
329 if (fmt1->read_probe) {
330 score = fmt1->read_probe(&lpd);
331 } else if (fmt1->extensions) {
332 if (av_match_ext(lpd.filename, fmt1->extensions)) {
333 score = 50;
334 }
335 }
336 if (score > *score_max) {
337 *score_max = score;
338 fmt = fmt1;
339 }else if (score == *score_max)
340 fmt = NULL;
341 }
342
343 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
344 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_MAX/4) {
345 while ((fmt = av_iformat_next(fmt)))
346 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
347 *score_max = AVPROBE_SCORE_MAX/4;
348 break;
349 }
350 }
351
352 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4-1) {
353 while ((fmt = av_iformat_next(fmt)))
354 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
355 *score_max = AVPROBE_SCORE_MAX/4-1;
356 break;
357 }
358 }
359
360 return fmt;
361 }
362
363 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
364 int score=0;
365 return av_probe_input_format2(pd, is_opened, &score);
366 }
367
368 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
369 {
370 static const struct {
371 const char *name; enum CodecID id; enum AVMediaType type;
372 } fmt_id_type[] = {
373 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
374 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
375 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
376 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
377 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
378 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
379 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
380 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
381 { 0 }
382 };
383 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
384
385 if (fmt) {
386 int i;
387 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
388 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
389 for (i = 0; fmt_id_type[i].name; i++) {
390 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
391 st->codec->codec_id = fmt_id_type[i].id;
392 st->codec->codec_type = fmt_id_type[i].type;
393 break;
394 }
395 }
396 }
397 return !!fmt;
398 }
399
400 /************************************************************/
401 /* input media file */
402
403 /** size of probe buffer, for guessing file type from file contents */
404 #define PROBE_BUF_MIN 2048
405 #define PROBE_BUF_MAX (1<<20)
406
407 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
408 const char *filename, void *logctx,
409 unsigned int offset, unsigned int max_probe_size)
410 {
411 AVProbeData pd = { filename ? filename : "", NULL, -offset };
412 unsigned char *buf = NULL;
413 int ret = 0, probe_size;
414
415 if (!max_probe_size) {
416 max_probe_size = PROBE_BUF_MAX;
417 } else if (max_probe_size > PROBE_BUF_MAX) {
418 max_probe_size = PROBE_BUF_MAX;
419 } else if (max_probe_size < PROBE_BUF_MIN) {
420 return AVERROR(EINVAL);
421 }
422
423 if (offset >= max_probe_size) {
424 return AVERROR(EINVAL);
425 }
426
427 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
428 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
429 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
430 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
431
432 if (probe_size < offset) {
433 continue;
434 }
435
436 /* read probe data */
437 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
438 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
439 /* fail if error was not end of file, otherwise, lower score */
440 if (ret != AVERROR_EOF) {
441 av_free(buf);
442 return ret;
443 }
444 score = 0;
445 ret = 0; /* error was end of file, nothing read */
446 }
447 pd.buf_size += ret;
448 pd.buf = &buf[offset];
449
450 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
451
452 /* guess file format */
453 *fmt = av_probe_input_format2(&pd, 1, &score);
454 if(*fmt){
455 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
456 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
457 }else
458 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
459 }
460 }
461
462 if (!*fmt) {
463 av_free(buf);
464 return AVERROR_INVALIDDATA;
465 }
466
467 /* rewind. reuse probe buffer to avoid seeking */
468 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
469 av_free(buf);
470
471 return ret;
472 }
473
474 /* open input file and probe the format if necessary */
475 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
476 {
477 int ret;
478 AVProbeData pd = {filename, NULL, 0};
479
480 if (s->pb) {
481 s->flags |= AVFMT_FLAG_CUSTOM_IO;
482 if (!s->iformat)
483 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
484 else if (s->iformat->flags & AVFMT_NOFILE)
485 return AVERROR(EINVAL);
486 return 0;
487 }
488
489 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
490 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
491 return 0;
492
493 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
494 &s->interrupt_callback, options)) < 0)
495 return ret;
496 if (s->iformat)
497 return 0;
498 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
499 }
500
501 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
502 AVPacketList **plast_pktl){
503 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
504 if (!pktl)
505 return NULL;
506
507 if (*packet_buffer)
508 (*plast_pktl)->next = pktl;
509 else
510 *packet_buffer = pktl;
511
512 /* add the packet in the buffered packet list */
513 *plast_pktl = pktl;
514 pktl->pkt= *pkt;
515 return &pktl->pkt;
516 }
517
518 static void queue_attached_pictures(AVFormatContext *s)
519 {
520 int i;
521 for (i = 0; i < s->nb_streams; i++)
522 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
523 s->streams[i]->discard < AVDISCARD_ALL) {
524 AVPacket copy = s->streams[i]->attached_pic;
525 copy.destruct = NULL;
526 add_to_pktbuf(&s->raw_packet_buffer, &copy, &s->raw_packet_buffer_end);
527 }
528 }
529
530 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
531 {
532 AVFormatContext *s = *ps;
533 int ret = 0;
534 AVDictionary *tmp = NULL;
535 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
536
537 if (!s && !(s = avformat_alloc_context()))
538 return AVERROR(ENOMEM);
539 if (fmt)
540 s->iformat = fmt;
541
542 if (options)
543 av_dict_copy(&tmp, *options, 0);
544
545 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
546 goto fail;
547
548 if ((ret = init_input(s, filename, &tmp)) < 0)
549 goto fail;
550
551 /* check filename in case an image number is expected */
552 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
553 if (!av_filename_number_test(filename)) {
554 ret = AVERROR(EINVAL);
555 goto fail;
556 }
557 }
558
559 s->duration = s->start_time = AV_NOPTS_VALUE;
560 av_strlcpy(s->filename, filename, sizeof(s->filename));
561
562 /* allocate private data */
563 if (s->iformat->priv_data_size > 0) {
564 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
565 ret = AVERROR(ENOMEM);
566 goto fail;
567 }
568 if (s->iformat->priv_class) {
569 *(const AVClass**)s->priv_data = s->iformat->priv_class;
570 av_opt_set_defaults(s->priv_data);
571 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
572 goto fail;
573 }
574 }
575
576 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
577 if (s->pb)
578 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
579
580 if (s->iformat->read_header)
581 if ((ret = s->iformat->read_header(s)) < 0)
582 goto fail;
583
584 if (id3v2_extra_meta &&
585 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
586 goto fail;
587 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
588
589 queue_attached_pictures(s);
590
591 if (s->pb && !s->data_offset)
592 s->data_offset = avio_tell(s->pb);
593
594 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
595
596 if (options) {
597 av_dict_free(options);
598 *options = tmp;
599 }
600 *ps = s;
601 return 0;
602
603 fail:
604 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
605 av_dict_free(&tmp);
606 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
607 avio_close(s->pb);
608 avformat_free_context(s);
609 *ps = NULL;
610 return ret;
611 }
612
613 /*******************************************************/
614
615 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
616 {
617 int ret, i;
618 AVStream *st;
619
620 for(;;){
621 AVPacketList *pktl = s->raw_packet_buffer;
622
623 if (pktl) {
624 *pkt = pktl->pkt;
625 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
626 !s->streams[pkt->stream_index]->probe_packets ||
627 s->raw_packet_buffer_remaining_size < pkt->size){
628 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
629 av_freep(&pd->buf);
630 pd->buf_size = 0;
631 s->raw_packet_buffer = pktl->next;
632 s->raw_packet_buffer_remaining_size += pkt->size;
633 av_free(pktl);
634 return 0;
635 }
636 }
637
638 av_init_packet(pkt);
639 ret= s->iformat->read_packet(s, pkt);
640 if (ret < 0) {
641 if (!pktl || ret == AVERROR(EAGAIN))
642 return ret;
643 for (i = 0; i < s->nb_streams; i++)
644 s->streams[i]->probe_packets = 0;
645 continue;
646 }
647
648 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
649 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
650 av_log(s, AV_LOG_WARNING,
651 "Dropped corrupted packet (stream = %d)\n",
652 pkt->stream_index);
653 av_free_packet(pkt);
654 continue;
655 }
656
657 st= s->streams[pkt->stream_index];
658
659 switch(st->codec->codec_type){
660 case AVMEDIA_TYPE_VIDEO:
661 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
662 break;
663 case AVMEDIA_TYPE_AUDIO:
664 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
665 break;
666 case AVMEDIA_TYPE_SUBTITLE:
667 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
668 break;
669 }
670
671 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
672 !st->probe_packets))
673 return ret;
674
675 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
676 s->raw_packet_buffer_remaining_size -= pkt->size;
677
678 if(st->codec->codec_id == CODEC_ID_PROBE){
679 AVProbeData *pd = &st->probe_data;
680 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
681 --st->probe_packets;
682
683 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
684 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
685 pd->buf_size += pkt->size;
686 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
687
688 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
689 //FIXME we do not reduce score to 0 for the case of running out of buffer space in bytes
690 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
691 if(st->codec->codec_id != CODEC_ID_PROBE){
692 pd->buf_size=0;
693 av_freep(&pd->buf);
694 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
695 }
696 }
697 }
698 }
699 }
700
701 #if FF_API_READ_PACKET
702 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
703 {
704 return ff_read_packet(s, pkt);
705 }
706 #endif
707
708
709 /**********************************************************/
710
711 /**
712 * Get the number of samples of an audio frame. Return -1 on error.
713 */
714 static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
715 {
716 int frame_size;
717
718 /* give frame_size priority if demuxing */
719 if (!mux && enc->frame_size > 1)
720 return enc->frame_size;
721
722 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
723 return frame_size;
724
725 /* fallback to using frame_size if muxing */
726 if (enc->frame_size > 1)
727 return enc->frame_size;
728
729 return -1;
730 }
731
732
733 /**
734 * Return the frame duration in seconds. Return 0 if not available.
735 */
736 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
737 AVCodecParserContext *pc, AVPacket *pkt)
738 {
739 int frame_size;
740
741 *pnum = 0;
742 *pden = 0;
743 switch(st->codec->codec_type) {
744 case AVMEDIA_TYPE_VIDEO:
745 if (st->r_frame_rate.num) {
746 *pnum = st->r_frame_rate.den;
747 *pden = st->r_frame_rate.num;
748 } else if(st->time_base.num*1000LL > st->time_base.den) {
749 *pnum = st->time_base.num;
750 *pden = st->time_base.den;
751 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
752 *pnum = st->codec->time_base.num;
753 *pden = st->codec->time_base.den;
754 if (pc && pc->repeat_pict) {
755 *pnum = (*pnum) * (1 + pc->repeat_pict);
756 }
757 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
758 //Thus if we have no parser in such case leave duration undefined.
759 if(st->codec->ticks_per_frame>1 && !pc){
760 *pnum = *pden = 0;
761 }
762 }
763 break;
764 case AVMEDIA_TYPE_AUDIO:
765 frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
766 if (frame_size <= 0 || st->codec->sample_rate <= 0)
767 break;
768 *pnum = frame_size;
769 *pden = st->codec->sample_rate;
770 break;
771 default:
772 break;
773 }
774 }
775
776 static int is_intra_only(AVCodecContext *enc){
777 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
778 return 1;
779 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
780 switch(enc->codec_id){
781 case CODEC_ID_MJPEG:
782 case CODEC_ID_MJPEGB:
783 case CODEC_ID_LJPEG:
784 case CODEC_ID_PRORES:
785 case CODEC_ID_RAWVIDEO:
786 case CODEC_ID_DVVIDEO:
787 case CODEC_ID_HUFFYUV:
788 case CODEC_ID_FFVHUFF:
789 case CODEC_ID_ASV1:
790 case CODEC_ID_ASV2:
791 case CODEC_ID_VCR1:
792 case CODEC_ID_DNXHD:
793 case CODEC_ID_JPEG2000:
794 return 1;
795 default: break;
796 }
797 }
798 return 0;
799 }
800
801 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
802 int64_t dts, int64_t pts)
803 {
804 AVStream *st= s->streams[stream_index];
805 AVPacketList *pktl= s->packet_buffer;
806
807 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
808 return;
809
810 st->first_dts= dts - st->cur_dts;
811 st->cur_dts= dts;
812
813 for(; pktl; pktl= pktl->next){
814 if(pktl->pkt.stream_index != stream_index)
815 continue;
816 //FIXME think more about this check
817 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
818 pktl->pkt.pts += st->first_dts;
819
820 if(pktl->pkt.dts != AV_NOPTS_VALUE)
821 pktl->pkt.dts += st->first_dts;
822
823 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
824 st->start_time= pktl->pkt.pts;
825 }
826 if (st->start_time == AV_NOPTS_VALUE)
827 st->start_time = pts;
828 }
829
830 static void update_initial_durations(AVFormatContext *s, AVStream *st,
831 int stream_index, int duration)
832 {
833 AVPacketList *pktl= s->packet_buffer;
834 int64_t cur_dts= 0;
835
836 if(st->first_dts != AV_NOPTS_VALUE){
837 cur_dts= st->first_dts;
838 for(; pktl; pktl= pktl->next){
839 if(pktl->pkt.stream_index == stream_index){
840 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
841 break;
842 cur_dts -= duration;
843 }
844 }
845 pktl= s->packet_buffer;
846 st->first_dts = cur_dts;
847 }else if(st->cur_dts)
848 return;
849
850 for(; pktl; pktl= pktl->next){
851 if(pktl->pkt.stream_index != stream_index)
852 continue;
853 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
854 && !pktl->pkt.duration){
855 pktl->pkt.dts= cur_dts;
856 if(!st->codec->has_b_frames)
857 pktl->pkt.pts= cur_dts;
858 cur_dts += duration;
859 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
860 pktl->pkt.duration = duration;
861 }else
862 break;
863 }
864 if(st->first_dts == AV_NOPTS_VALUE)
865 st->cur_dts= cur_dts;
866 }
867
868 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
869 AVCodecParserContext *pc, AVPacket *pkt)
870 {
871 int num, den, presentation_delayed, delay, i;
872 int64_t offset;
873
874 if (s->flags & AVFMT_FLAG_NOFILLIN)
875 return;
876
877 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
878 pkt->dts= AV_NOPTS_VALUE;
879
880 /* do we have a video B-frame ? */
881 delay= st->codec->has_b_frames;
882 presentation_delayed = 0;
883
884 /* XXX: need has_b_frame, but cannot get it if the codec is
885 not initialized */
886 if (delay &&
887 pc && pc->pict_type != AV_PICTURE_TYPE_B)
888 presentation_delayed = 1;
889
890 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
891 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
892 pkt->dts -= 1LL<<st->pts_wrap_bits;
893 }
894
895 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
896 // we take the conservative approach and discard both
897 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
898 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
899 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
900 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
901 }
902
903 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
904 compute_frame_duration(&num, &den, st, pc, pkt);
905 if (den && num) {
906 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
907
908 if(pkt->duration != 0 && s->packet_buffer)
909 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
910 }
911 }
912
913 /* correct timestamps with byte offset if demuxers only have timestamps
914 on packet boundaries */
915 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
916 /* this will estimate bitrate based on this frame's duration and size */
917 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
918 if(pkt->pts != AV_NOPTS_VALUE)
919 pkt->pts += offset;
920 if(pkt->dts != AV_NOPTS_VALUE)
921 pkt->dts += offset;
922 }
923
924 if (pc && pc->dts_sync_point >= 0) {
925 // we have synchronization info from the parser
926 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
927 if (den > 0) {
928 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
929 if (pkt->dts != AV_NOPTS_VALUE) {
930 // got DTS from the stream, update reference timestamp
931 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
932 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
933 } else if (st->reference_dts != AV_NOPTS_VALUE) {
934 // compute DTS based on reference timestamp
935 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
936 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
937 }
938 if (pc->dts_sync_point > 0)
939 st->reference_dts = pkt->dts; // new reference
940 }
941 }
942
943 /* This may be redundant, but it should not hurt. */
944 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
945 presentation_delayed = 1;
946
947 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
948 /* interpolate PTS and DTS if they are not present */
949 //We skip H264 currently because delay and has_b_frames are not reliably set
950 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
951 if (presentation_delayed) {
952 /* DTS = decompression timestamp */
953 /* PTS = presentation timestamp */
954 if (pkt->dts == AV_NOPTS_VALUE)
955 pkt->dts = st->last_IP_pts;
956 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
957 if (pkt->dts == AV_NOPTS_VALUE)
958 pkt->dts = st->cur_dts;
959
960 /* this is tricky: the dts must be incremented by the duration
961 of the frame we are displaying, i.e. the last I- or P-frame */
962 if (st->last_IP_duration == 0)
963 st->last_IP_duration = pkt->duration;
964 if(pkt->dts != AV_NOPTS_VALUE)
965 st->cur_dts = pkt->dts + st->last_IP_duration;
966 st->last_IP_duration = pkt->duration;
967 st->last_IP_pts= pkt->pts;
968 /* cannot compute PTS if not present (we can compute it only
969 by knowing the future */
970 } else if (pkt->pts != AV_NOPTS_VALUE ||
971 pkt->dts != AV_NOPTS_VALUE ||
972 pkt->duration ||
973 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
974 int duration = pkt->duration;
975 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
976 compute_frame_duration(&num, &den, st, pc, pkt);
977 if (den && num) {
978 duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den,
979 den * (int64_t)st->time_base.num,
980 AV_ROUND_DOWN);
981 if (duration != 0 && s->packet_buffer) {
982 update_initial_durations(s, st, pkt->stream_index,
983 duration);
984 }
985 }
986 }
987
988 if(pkt->pts != AV_NOPTS_VALUE && duration){
989 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
990 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
991 if(old_diff < new_diff && old_diff < (duration>>3)){
992 pkt->pts += duration;
993 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
994 }
995 }
996
997 /* presentation is not delayed : PTS and DTS are the same */
998 if(pkt->pts == AV_NOPTS_VALUE)
999 pkt->pts = pkt->dts;
1000 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1001 if(pkt->pts == AV_NOPTS_VALUE)
1002 pkt->pts = st->cur_dts;
1003 pkt->dts = pkt->pts;
1004 if(pkt->pts != AV_NOPTS_VALUE)
1005 st->cur_dts = pkt->pts + duration;
1006 }
1007 }
1008
1009 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1010 st->pts_buffer[0]= pkt->pts;
1011 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1012 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1013 if(pkt->dts == AV_NOPTS_VALUE)
1014 pkt->dts= st->pts_buffer[0];
1015 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here
1016 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1017 }
1018 if(pkt->dts > st->cur_dts)
1019 st->cur_dts = pkt->dts;
1020 }
1021
1022 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1023
1024 /* update flags */
1025 if(is_intra_only(st->codec))
1026 pkt->flags |= AV_PKT_FLAG_KEY;
1027 if (pc)
1028 pkt->convergence_duration = pc->convergence_duration;
1029 }
1030
1031 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1032 {
1033 while (*pkt_buf) {
1034 AVPacketList *pktl = *pkt_buf;
1035 *pkt_buf = pktl->next;
1036 av_free_packet(&pktl->pkt);
1037 av_freep(&pktl);
1038 }
1039 *pkt_buf_end = NULL;
1040 }
1041
1042 /**
1043 * Parse a packet, add all split parts to parse_queue
1044 *
1045 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1046 */
1047 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1048 {
1049 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1050 AVStream *st = s->streams[stream_index];
1051 uint8_t *data = pkt ? pkt->data : NULL;
1052 int size = pkt ? pkt->size : 0;
1053 int ret = 0, got_output = 0;
1054
1055 if (!pkt) {
1056 av_init_packet(&flush_pkt);
1057 pkt = &flush_pkt;
1058 got_output = 1;
1059 }
1060
1061 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1062 int len;
1063
1064 av_init_packet(&out_pkt);
1065 len = av_parser_parse2(st->parser, st->codec,
1066 &out_pkt.data, &out_pkt.size, data, size,
1067 pkt->pts, pkt->dts, pkt->pos);
1068
1069 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1070 /* increment read pointer */
1071 data += len;
1072 size -= len;
1073
1074 got_output = !!out_pkt.size;
1075
1076 if (!out_pkt.size)
1077 continue;
1078
1079 /* set the duration */
1080 out_pkt.duration = 0;
1081 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1082 if (st->codec->sample_rate > 0) {
1083 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1084 (AVRational){ 1, st->codec->sample_rate },
1085 st->time_base,
1086 AV_ROUND_DOWN);
1087 }
1088 } else if (st->codec->time_base.num != 0 &&
1089 st->codec->time_base.den != 0) {
1090 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1091 st->codec->time_base,
1092 st->time_base,
1093 AV_ROUND_DOWN);
1094 }
1095
1096 out_pkt.stream_index = st->index;
1097 out_pkt.pts = st->parser->pts;
1098 out_pkt.dts = st->parser->dts;
1099 out_pkt.pos = st->parser->pos;
1100
1101 if (st->parser->key_frame == 1 ||
1102 (st->parser->key_frame == -1 &&
1103 st->parser->pict_type == AV_PICTURE_TYPE_I))
1104 out_pkt.flags |= AV_PKT_FLAG_KEY;
1105
1106 compute_pkt_fields(s, st, st->parser, &out_pkt);
1107
1108 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1109 out_pkt.flags & AV_PKT_FLAG_KEY) {
1110 ff_reduce_index(s, st->index);
1111 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
1112 0, 0, AVINDEX_KEYFRAME);
1113 }
1114
1115 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1116 out_pkt.destruct = pkt->destruct;
1117 pkt->destruct = NULL;
1118 }
1119 if ((ret = av_dup_packet(&out_pkt)) < 0)
1120 goto fail;
1121
1122 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1123 av_free_packet(&out_pkt);
1124 ret = AVERROR(ENOMEM);
1125 goto fail;
1126 }
1127 }
1128
1129
1130 /* end of the stream => close and free the parser */
1131 if (pkt == &flush_pkt) {
1132 av_parser_close(st->parser);
1133 st->parser = NULL;
1134 }
1135
1136 fail:
1137 av_free_packet(pkt);
1138 return ret;
1139 }
1140
1141 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1142 AVPacketList **pkt_buffer_end,
1143 AVPacket *pkt)
1144 {
1145 AVPacketList *pktl;
1146 av_assert0(*pkt_buffer);
1147 pktl = *pkt_buffer;
1148 *pkt = pktl->pkt;
1149 *pkt_buffer = pktl->next;
1150 if (!pktl->next)
1151 *pkt_buffer_end = NULL;
1152 av_freep(&pktl);
1153 return 0;
1154 }
1155
1156 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1157 {
1158 int ret = 0, i, got_packet = 0;
1159
1160 av_init_packet(pkt);
1161
1162 while (!got_packet && !s->parse_queue) {
1163 AVStream *st;
1164 AVPacket cur_pkt;
1165
1166 /* read next packet */
1167 ret = ff_read_packet(s, &cur_pkt);
1168 if (ret < 0) {
1169 if (ret == AVERROR(EAGAIN))
1170 return ret;
1171 /* flush the parsers */
1172 for(i = 0; i < s->nb_streams; i++) {
1173 st = s->streams[i];
1174 if (st->parser && st->need_parsing)
1175 parse_packet(s, NULL, st->index);
1176 }
1177 /* all remaining packets are now in parse_queue =>
1178 * really terminate parsing */
1179 break;
1180 }
1181 ret = 0;
1182 st = s->streams[cur_pkt.stream_index];
1183
1184 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1185 cur_pkt.dts != AV_NOPTS_VALUE &&
1186 cur_pkt.pts < cur_pkt.dts) {
1187 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1188 cur_pkt.stream_index,
1189 cur_pkt.pts,
1190 cur_pkt.dts,
1191 cur_pkt.size);
1192 }
1193 if (s->debug & FF_FDEBUG_TS)
1194 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1195 cur_pkt.stream_index,
1196 cur_pkt.pts,
1197 cur_pkt.dts,
1198 cur_pkt.size,
1199 cur_pkt.duration,
1200 cur_pkt.flags);
1201
1202 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1203 st->parser = av_parser_init(st->codec->codec_id);
1204 if (!st->parser) {
1205 /* no parser available: just output the raw packets */
1206 st->need_parsing = AVSTREAM_PARSE_NONE;
1207 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1208 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1209 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1210 st->parser->flags |= PARSER_FLAG_ONCE;
1211 }
1212 }
1213
1214 if (!st->need_parsing || !st->parser) {
1215 /* no parsing needed: we just output the packet as is */
1216 *pkt = cur_pkt;
1217 compute_pkt_fields(s, st, NULL, pkt);
1218 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1219 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1220 ff_reduce_index(s, st->index);
1221 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1222 }
1223 got_packet = 1;
1224 } else if (st->discard < AVDISCARD_ALL) {
1225 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1226 return ret;
1227 } else {
1228 /* free packet */
1229 av_free_packet(&cur_pkt);
1230 }
1231 }
1232
1233 if (!got_packet && s->parse_queue)
1234 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1235
1236 if(s->debug & FF_FDEBUG_TS)
1237 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1238 pkt->stream_index,
1239 pkt->pts,
1240 pkt->dts,
1241 pkt->size,
1242 pkt->duration,
1243 pkt->flags);
1244
1245 return ret;
1246 }
1247
1248 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1249 {
1250 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1251 int eof = 0;
1252
1253 if (!genpts)
1254 return s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1255 &s->packet_buffer_end,
1256 pkt) :
1257 read_frame_internal(s, pkt);
1258
1259 for (;;) {
1260 int ret;
1261 AVPacketList *pktl = s->packet_buffer;
1262
1263 if (pktl) {
1264 AVPacket *next_pkt = &pktl->pkt;
1265
1266 if (next_pkt->dts != AV_NOPTS_VALUE) {
1267 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1268 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1269 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1270 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1271 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1272 next_pkt->pts = pktl->pkt.dts;
1273 }
1274 pktl = pktl->next;
1275 }
1276 pktl = s->packet_buffer;
1277 }
1278
1279 /* read packet from packet buffer, if there is data */
1280 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1281 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1282 return read_from_packet_buffer(&s->packet_buffer,
1283 &s->packet_buffer_end, pkt);
1284 }
1285
1286 ret = read_frame_internal(s, pkt);
1287 if (ret < 0) {
1288 if (pktl && ret != AVERROR(EAGAIN)) {
1289 eof = 1;
1290 continue;
1291 } else
1292 return ret;
1293 }
1294
1295 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1296 &s->packet_buffer_end)) < 0)
1297 return AVERROR(ENOMEM);
1298 }
1299 }
1300
1301 /* XXX: suppress the packet queue */
1302 static void flush_packet_queue(AVFormatContext *s)
1303 {
1304 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1305 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1306 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1307
1308 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1309 }
1310
1311 /*******************************************************/
1312 /* seek support */
1313
1314 int av_find_default_stream_index(AVFormatContext *s)
1315 {
1316 int first_audio_index = -1;
1317 int i;
1318 AVStream *st;
1319
1320 if (s->nb_streams <= 0)
1321 return -1;
1322 for(i = 0; i < s->nb_streams; i++) {
1323 st = s->streams[i];
1324 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1325 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1326 return i;
1327 }
1328 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1329 first_audio_index = i;
1330 }
1331 return first_audio_index >= 0 ? first_audio_index : 0;
1332 }
1333
1334 /**
1335 * Flush the frame reader.
1336 */
1337 void ff_read_frame_flush(AVFormatContext *s)
1338 {
1339 AVStream *st;
1340 int i, j;
1341
1342 flush_packet_queue(s);
1343
1344 /* for each stream, reset read state */
1345 for(i = 0; i < s->nb_streams; i++) {
1346 st = s->streams[i];
1347
1348 if (st->parser) {
1349 av_parser_close(st->parser);
1350 st->parser = NULL;
1351 }
1352 st->last_IP_pts = AV_NOPTS_VALUE;
1353 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1354 st->reference_dts = AV_NOPTS_VALUE;
1355
1356 st->probe_packets = MAX_PROBE_PACKETS;
1357
1358 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1359 st->pts_buffer[j]= AV_NOPTS_VALUE;
1360 }
1361 }
1362
1363 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1364 {
1365 int i;
1366
1367 for(i = 0; i < s->nb_streams; i++) {
1368 AVStream *st = s->streams[i];
1369
1370 st->cur_dts = av_rescale(timestamp,
1371 st->time_base.den * (int64_t)ref_st->time_base.num,
1372 st->time_base.num * (int64_t)ref_st->time_base.den);
1373 }
1374 }
1375
1376 void ff_reduce_index(AVFormatContext *s, int stream_index)
1377 {
1378 AVStream *st= s->streams[stream_index];
1379 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1380
1381 if((unsigned)st->nb_index_entries >= max_entries){
1382 int i;
1383 for(i=0; 2*i<st->nb_index_entries; i++)
1384 st->index_entries[i]= st->index_entries[2*i];
1385 st->nb_index_entries= i;
1386 }
1387 }
1388
1389 int ff_add_index_entry(AVIndexEntry **index_entries,
1390 int *nb_index_entries,
1391 unsigned int *index_entries_allocated_size,
1392 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1393 {
1394 AVIndexEntry *entries, *ie;
1395 int index;
1396
1397 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1398 return -1;
1399
1400 entries = av_fast_realloc(*index_entries,
1401 index_entries_allocated_size,
1402 (*nb_index_entries + 1) *
1403 sizeof(AVIndexEntry));
1404 if(!entries)
1405 return -1;
1406
1407 *index_entries= entries;
1408
1409 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1410
1411 if(index<0){
1412 index= (*nb_index_entries)++;
1413 ie= &entries[index];
1414 assert(index==0 || ie[-1].timestamp < timestamp);
1415 }else{
1416 ie= &entries[index];
1417 if(ie->timestamp != timestamp){
1418 if(ie->timestamp <= timestamp)
1419 return -1;
1420 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1421 (*nb_index_entries)++;
1422 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1423 distance= ie->min_distance;
1424 }
1425
1426 ie->pos = pos;
1427 ie->timestamp = timestamp;
1428 ie->min_distance= distance;
1429 ie->size= size;
1430 ie->flags = flags;
1431
1432 return index;
1433 }
1434
1435 int av_add_index_entry(AVStream *st,
1436 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1437 {
1438 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1439 &st->index_entries_allocated_size, pos,
1440 timestamp, size, distance, flags);
1441 }
1442
1443 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1444 int64_t wanted_timestamp, int flags)
1445 {
1446 int a, b, m;
1447 int64_t timestamp;
1448
1449 a = - 1;
1450 b = nb_entries;
1451
1452 //optimize appending index entries at the end
1453 if(b && entries[b-1].timestamp < wanted_timestamp)
1454 a= b-1;
1455
1456 while (b - a > 1) {
1457 m = (a + b) >> 1;
1458 timestamp = entries[m].timestamp;
1459 if(timestamp >= wanted_timestamp)
1460 b = m;
1461 if(timestamp <= wanted_timestamp)
1462 a = m;
1463 }
1464 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1465
1466 if(!(flags & AVSEEK_FLAG_ANY)){
1467 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1468 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1469 }
1470 }
1471
1472 if(m == nb_entries)
1473 return -1;
1474 return m;
1475 }
1476
1477 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1478 int flags)
1479 {
1480 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1481 wanted_timestamp, flags);
1482 }
1483
1484 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1485 {
1486 AVInputFormat *avif= s->iformat;
1487 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1488 int64_t ts_min, ts_max, ts;
1489 int index;
1490 int64_t ret;
1491 AVStream *st;
1492
1493 if (stream_index < 0)
1494 return -1;
1495
1496 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1497
1498 ts_max=
1499 ts_min= AV_NOPTS_VALUE;
1500 pos_limit= -1; //gcc falsely says it may be uninitialized
1501
1502 st= s->streams[stream_index];
1503 if(st->index_entries){
1504 AVIndexEntry *e;
1505
1506 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1507 index= FFMAX(index, 0);
1508 e= &st->index_entries[index];
1509
1510 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1511 pos_min= e->pos;
1512 ts_min= e->timestamp;
1513 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1514 pos_min,ts_min);
1515 }else{
1516 assert(index==0);
1517 }
1518
1519 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1520 assert(index < st->nb_index_entries);
1521 if(index >= 0){
1522 e= &st->index_entries[index];
1523 assert(e->timestamp >= target_ts);
1524 pos_max= e->pos;
1525 ts_max= e->timestamp;
1526 pos_limit= pos_max - e->min_distance;
1527 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1528 pos_max,pos_limit, ts_max);
1529 }
1530 }
1531
1532 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1533 if(pos<0)
1534 return -1;
1535
1536 /* do the seek */
1537 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1538 return ret;
1539
1540 ff_update_cur_dts(s, st, ts);
1541
1542 return 0;
1543 }
1544
1545 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1546 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1547 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1548 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1549 {
1550 int64_t pos, ts;
1551 int64_t start_pos, filesize;
1552 int no_change;
1553
1554 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1555
1556 if(ts_min == AV_NOPTS_VALUE){
1557 pos_min = s->data_offset;
1558 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1559 if (ts_min == AV_NOPTS_VALUE)
1560 return -1;
1561 }
1562
1563 if(ts_max == AV_NOPTS_VALUE){
1564 int step= 1024;
1565 filesize = avio_size(s->pb);
1566 pos_max = filesize - 1;
1567 do{
1568 pos_max -= step;
1569 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1570 step += step;
1571 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1572 if (ts_max == AV_NOPTS_VALUE)
1573 return -1;
1574
1575 for(;;){
1576 int64_t tmp_pos= pos_max + 1;
1577 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1578 if(tmp_ts == AV_NOPTS_VALUE)
1579 break;
1580 ts_max= tmp_ts;
1581 pos_max= tmp_pos;
1582 if(tmp_pos >= filesize)
1583 break;
1584 }
1585 pos_limit= pos_max;
1586 }
1587
1588 if(ts_min > ts_max){
1589 return -1;
1590 }else if(ts_min == ts_max){
1591 pos_limit= pos_min;
1592 }
1593
1594 no_change=0;
1595 while (pos_min < pos_limit) {
1596 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1597 pos_min, pos_max, ts_min, ts_max);
1598 assert(pos_limit <= pos_max);
1599
1600 if(no_change==0){
1601 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1602 // interpolate position (better than dichotomy)
1603 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1604 + pos_min - approximate_keyframe_distance;
1605 }else if(no_change==1){
1606 // bisection, if interpolation failed to change min or max pos last time
1607 pos = (pos_min + pos_limit)>>1;
1608 }else{
1609 /* linear search if bisection failed, can only happen if there
1610 are very few or no keyframes between min/max */
1611 pos=pos_min;
1612 }
1613 if(pos <= pos_min)
1614 pos= pos_min + 1;
1615 else if(pos > pos_limit)
1616 pos= pos_limit;
1617 start_pos= pos;
1618
1619 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1620 if(pos == pos_max)
1621 no_change++;
1622 else
1623 no_change=0;
1624 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1625 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1626 pos_limit, start_pos, no_change);
1627 if(ts == AV_NOPTS_VALUE){
1628 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1629 return -1;
1630 }
1631 assert(ts != AV_NOPTS_VALUE);
1632 if (target_ts <= ts) {
1633 pos_limit = start_pos - 1;
1634 pos_max = pos;
1635 ts_max = ts;
1636 }
1637 if (target_ts >= ts) {
1638 pos_min = pos;
1639 ts_min = ts;
1640 }
1641 }
1642
1643 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1644 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1645 pos_min = pos;
1646 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1647 pos_min++;
1648 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1649 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1650 pos, ts_min, target_ts, ts_max);
1651 *ts_ret= ts;
1652 return pos;
1653 }
1654
1655 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1656 int64_t pos_min, pos_max;
1657
1658 pos_min = s->data_offset;
1659 pos_max = avio_size(s->pb) - 1;
1660
1661 if (pos < pos_min) pos= pos_min;
1662 else if(pos > pos_max) pos= pos_max;
1663
1664 avio_seek(s->pb, pos, SEEK_SET);
1665
1666 return 0;
1667 }
1668
1669 static int seek_frame_generic(AVFormatContext *s,
1670 int stream_index, int64_t timestamp, int flags)
1671 {
1672 int index;
1673 int64_t ret;
1674 AVStream *st;
1675 AVIndexEntry *ie;
1676
1677 st = s->streams[stream_index];
1678
1679 index = av_index_search_timestamp(st, timestamp, flags);
1680
1681 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1682 return -1;
1683
1684 if(index < 0 || index==st->nb_index_entries-1){
1685 AVPacket pkt;
1686
1687 if(st->nb_index_entries){
1688 assert(st->index_entries);
1689 ie= &st->index_entries[st->nb_index_entries-1];
1690 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1691 return ret;
1692 ff_update_cur_dts(s, st, ie->timestamp);
1693 }else{
1694 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1695 return ret;
1696 }
1697 for (;;) {
1698 int read_status;
1699 do{
1700 read_status = av_read_frame(s, &pkt);
1701 } while (read_status == AVERROR(EAGAIN));
1702 if (read_status < 0)
1703 break;
1704 av_free_packet(&pkt);
1705 if(stream_index == pkt.stream_index){
1706 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1707 break;
1708 }
1709 }
1710 index = av_index_search_timestamp(st, timestamp, flags);
1711 }
1712 if (index < 0)
1713 return -1;
1714
1715 ff_read_frame_flush(s);
1716 if (s->iformat->read_seek){
1717 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1718 return 0;
1719 }
1720 ie = &st->index_entries[index];
1721 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1722 return ret;
1723 ff_update_cur_dts(s, st, ie->timestamp);
1724
1725 return 0;
1726 }
1727
1728 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1729 int64_t timestamp, int flags)
1730 {
1731 int ret;
1732 AVStream *st;
1733
1734 if (flags & AVSEEK_FLAG_BYTE) {
1735 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1736 return -1;
1737 ff_read_frame_flush(s);
1738 return seek_frame_byte(s, stream_index, timestamp, flags);
1739 }
1740
1741 if(stream_index < 0){
1742 stream_index= av_find_default_stream_index(s);
1743 if(stream_index < 0)
1744 return -1;
1745
1746 st= s->streams[stream_index];
1747 /* timestamp for default must be expressed in AV_TIME_BASE units */
1748 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1749 }
1750
1751 /* first, we try the format specific seek */
1752 if (s->iformat->read_seek) {
1753 ff_read_frame_flush(s);
1754 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1755 } else
1756 ret = -1;
1757 if (ret >= 0) {
1758 return 0;
1759 }
1760
1761 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1762 ff_read_frame_flush(s);
1763 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1764 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1765 ff_read_frame_flush(s);
1766 return seek_frame_generic(s, stream_index, timestamp, flags);
1767 }
1768 else
1769 return -1;
1770 }
1771
1772 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1773 {
1774 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1775
1776 if (ret >= 0)
1777 queue_attached_pictures(s);
1778
1779 return ret;
1780 }
1781
1782 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1783 {
1784 if(min_ts > ts || max_ts < ts)
1785 return -1;
1786
1787 if (s->iformat->read_seek2) {
1788 int ret;
1789 ff_read_frame_flush(s);
1790 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1791
1792 if (ret >= 0)
1793 queue_attached_pictures(s);
1794 return ret;
1795 }
1796
1797 if(s->iformat->read_timestamp){
1798 //try to seek via read_timestamp()
1799 }
1800
1801 //Fallback to old API if new is not implemented but old is
1802 //Note the old has somewat different sematics
1803 if(s->iformat->read_seek || 1)
1804 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1805
1806 // try some generic seek like seek_frame_generic() but with new ts semantics
1807 }
1808
1809 /*******************************************************/
1810
1811 /**
1812 * Return TRUE if the stream has accurate duration in any stream.
1813 *
1814 * @return TRUE if the stream has accurate duration for at least one component.
1815 */
1816 static int has_duration(AVFormatContext *ic)
1817 {
1818 int i;
1819 AVStream *st;
1820
1821 for(i = 0;i < ic->nb_streams; i++) {
1822 st = ic->streams[i];
1823 if (st->duration != AV_NOPTS_VALUE)
1824 return 1;
1825 }
1826 return 0;
1827 }
1828
1829 /**
1830 * Estimate the stream timings from the one of each components.
1831 *
1832 * Also computes the global bitrate if possible.
1833 */
1834 static void update_stream_timings(AVFormatContext *ic)
1835 {
1836 int64_t start_time, start_time1, end_time, end_time1;
1837 int64_t duration, duration1, filesize;
1838 int i;
1839 AVStream *st;
1840
1841 start_time = INT64_MAX;
1842 end_time = INT64_MIN;
1843 duration = INT64_MIN;
1844 for(i = 0;i < ic->nb_streams; i++) {
1845 st = ic->streams[i];
1846 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1847 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1848 start_time = FFMIN(start_time, start_time1);
1849 if (st->duration != AV_NOPTS_VALUE) {
1850 end_time1 = start_time1
1851 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1852 end_time = FFMAX(end_time, end_time1);
1853 }
1854 }
1855 if (st->duration != AV_NOPTS_VALUE) {
1856 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1857 duration = FFMAX(duration, duration1);
1858 }
1859 }
1860 if (start_time != INT64_MAX) {
1861 ic->start_time = start_time;
1862 if (end_time != INT64_MIN)
1863 duration = FFMAX(duration, end_time - start_time);
1864 }
1865 if (duration != INT64_MIN) {
1866 ic->duration = duration;
1867 if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
1868 /* compute the bitrate */
1869 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
1870 (double)ic->duration;
1871 }
1872 }
1873 }
1874
1875 static void fill_all_stream_timings(AVFormatContext *ic)
1876 {
1877 int i;
1878 AVStream *st;
1879
1880 update_stream_timings(ic);
1881 for(i = 0;i < ic->nb_streams; i++) {
1882 st = ic->streams[i];
1883 if (st->start_time == AV_NOPTS_VALUE) {
1884 if(ic->start_time != AV_NOPTS_VALUE)
1885 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1886 if(ic->duration != AV_NOPTS_VALUE)
1887 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1888 }
1889 }
1890 }
1891
1892 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1893 {
1894 int64_t filesize, duration;
1895 int bit_rate, i;
1896 AVStream *st;
1897
1898 /* if bit_rate is already set, we believe it */
1899 if (ic->bit_rate <= 0) {
1900 bit_rate = 0;
1901 for(i=0;i<ic->nb_streams;i++) {
1902 st = ic->streams[i];
1903 if (st->codec->bit_rate > 0)
1904 bit_rate += st->codec->bit_rate;
1905 }
1906 ic->bit_rate = bit_rate;
1907 }
1908
1909 /* if duration is already set, we believe it */
1910 if (ic->duration == AV_NOPTS_VALUE &&
1911 ic->bit_rate != 0) {
1912 filesize = ic->pb ? avio_size(ic->pb) : 0;
1913 if (filesize > 0) {
1914 for(i = 0; i < ic->nb_streams; i++) {
1915 st = ic->streams[i];
1916 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1917 if (st->duration == AV_NOPTS_VALUE)
1918 st->duration = duration;
1919 }
1920 }
1921 }
1922 }
1923
1924 #define DURATION_MAX_READ_SIZE 250000
1925 #define DURATION_MAX_RETRY 3
1926
1927 /* only usable for MPEG-PS streams */
1928 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1929 {
1930 AVPacket pkt1, *pkt = &pkt1;
1931 AVStream *st;
1932 int read_size, i, ret;
1933 int64_t end_time;
1934 int64_t filesize, offset, duration;
1935 int retry=0;
1936
1937 /* flush packet queue */
1938 flush_packet_queue(ic);
1939
1940 for (i=0; i<ic->nb_streams; i++) {
1941 st = ic->streams[i];
1942 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1943 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
1944
1945 if (st->parser) {
1946 av_parser_close(st->parser);
1947 st->parser= NULL;
1948 }
1949 }
1950
1951 /* estimate the end time (duration) */
1952 /* XXX: may need to support wrapping */
1953 filesize = ic->pb ? avio_size(ic->pb) : 0;
1954 end_time = AV_NOPTS_VALUE;
1955 do{
1956 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1957 if (offset < 0)
1958 offset = 0;
1959
1960 avio_seek(ic->pb, offset, SEEK_SET);
1961 read_size = 0;
1962 for(;;) {
1963 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1964 break;
1965
1966 do {
1967 ret = ff_read_packet(ic, pkt);
1968 } while(ret == AVERROR(EAGAIN));
1969 if (ret != 0)
1970 break;
1971 read_size += pkt->size;
1972 st = ic->streams[pkt->stream_index];
1973 if (pkt->pts != AV_NOPTS_VALUE &&
1974 (st->start_time != AV_NOPTS_VALUE ||
1975 st->first_dts != AV_NOPTS_VALUE)) {
1976 duration = end_time = pkt->pts;
1977 if (st->start_time != AV_NOPTS_VALUE)
1978 duration -= st->start_time;
1979 else
1980 duration -= st->first_dts;
1981 if (duration < 0)
1982 duration += 1LL<<st->pts_wrap_bits;
1983 if (duration > 0) {
1984 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1985 st->duration = duration;
1986 }
1987 }
1988 av_free_packet(pkt);
1989 }
1990 }while( end_time==AV_NOPTS_VALUE
1991 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1992 && ++retry <= DURATION_MAX_RETRY);
1993
1994 fill_all_stream_timings(ic);
1995
1996 avio_seek(ic->pb, old_offset, SEEK_SET);
1997 for (i=0; i<ic->nb_streams; i++) {
1998 st= ic->streams[i];
1999 st->cur_dts= st->first_dts;
2000 st->last_IP_pts = AV_NOPTS_VALUE;
2001 st->reference_dts = AV_NOPTS_VALUE;
2002 }
2003 }
2004
2005 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2006 {
2007 int64_t file_size;
2008
2009 /* get the file size, if possible */
2010 if (ic->iformat->flags & AVFMT_NOFILE) {
2011 file_size = 0;
2012 } else {
2013 file_size = avio_size(ic->pb);
2014 file_size = FFMAX(0, file_size);
2015 }
2016
2017 if ((!strcmp(ic->iformat->name, "mpeg") ||
2018 !strcmp(ic->iformat->name, "mpegts")) &&
2019 file_size && ic->pb->seekable) {
2020 /* get accurate estimate from the PTSes */
2021 estimate_timings_from_pts(ic, old_offset);
2022 } else if (has_duration(ic)) {
2023 /* at least one component has timings - we use them for all
2024 the components */
2025 fill_all_stream_timings(ic);
2026 } else {
2027 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2028 /* less precise: use bitrate info */
2029 estimate_timings_from_bit_rate(ic);
2030 }
2031 update_stream_timings(ic);
2032
2033 {
2034 int i;
2035 AVStream av_unused *st;
2036 for(i = 0;i < ic->nb_streams; i++) {
2037 st = ic->streams[i];
2038 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2039 (double) st->start_time / AV_TIME_BASE,
2040 (double) st->duration / AV_TIME_BASE);
2041 }
2042 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2043 (double) ic->start_time / AV_TIME_BASE,
2044 (double) ic->duration / AV_TIME_BASE,
2045 ic->bit_rate / 1000);
2046 }
2047 }
2048
2049 static int has_codec_parameters(AVStream *st)
2050 {
2051 AVCodecContext *avctx = st->codec;
2052 int val;
2053 switch (avctx->codec_type) {
2054 case AVMEDIA_TYPE_AUDIO:
2055 val = avctx->sample_rate && avctx->channels;
2056 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2057 return 0;
2058 break;
2059 case AVMEDIA_TYPE_VIDEO:
2060 val = avctx->width;
2061 if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
2062 return 0;
2063 break;
2064 default:
2065 val = 1;
2066 break;
2067 }
2068 return avctx->codec_id != CODEC_ID_NONE && val != 0;
2069 }
2070
2071 static int has_decode_delay_been_guessed(AVStream *st)
2072 {
2073 return st->codec->codec_id != CODEC_ID_H264 ||
2074 st->info->nb_decoded_frames >= 6;
2075 }
2076
2077 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2078 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2079 {
2080 AVCodec *codec;
2081 int got_picture = 1, ret = 0;
2082 AVFrame picture;
2083 AVPacket pkt = *avpkt;
2084
2085 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2086 AVDictionary *thread_opt = NULL;
2087
2088 codec = st->codec->codec ? st->codec->codec :
2089 avcodec_find_decoder(st->codec->codec_id);
2090
2091 if (!codec) {
2092 st->info->found_decoder = -1;
2093 return -1;
2094 }
2095
2096 /* force thread count to 1 since the h264 decoder will not extract SPS
2097 * and PPS to extradata during multi-threaded decoding */
2098 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2099 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2100 if (!options)
2101 av_dict_free(&thread_opt);
2102 if (ret < 0) {
2103 st->info->found_decoder = -1;
2104 return ret;
2105 }
2106 st->info->found_decoder = 1;
2107 } else if (!st->info->found_decoder)
2108 st->info->found_decoder = 1;
2109
2110 if (st->info->found_decoder < 0)
2111 return -1;
2112
2113 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2114 ret >= 0 &&
2115 (!has_codec_parameters(st) ||
2116 !has_decode_delay_been_guessed(st) ||
2117 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2118 got_picture = 0;
2119 avcodec_get_frame_defaults(&picture);
2120 switch(st->codec->codec_type) {
2121 case AVMEDIA_TYPE_VIDEO:
2122 ret = avcodec_decode_video2(st->codec, &picture,
2123 &got_picture, &pkt);
2124 break;
2125 case AVMEDIA_TYPE_AUDIO:
2126 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt);
2127 break;
2128 default:
2129 break;
2130 }
2131 if (ret >= 0) {
2132 if (got_picture)
2133 st->info->nb_decoded_frames++;
2134 pkt.data += ret;
2135 pkt.size -= ret;
2136 ret = got_picture;
2137 }
2138 }
2139 return ret;
2140 }
2141
2142 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2143 {
2144 while (tags->id != CODEC_ID_NONE) {
2145 if (tags->id == id)
2146 return tags->tag;
2147 tags++;
2148 }
2149 return 0;
2150 }
2151
2152 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2153 {
2154 int i;
2155 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2156 if(tag == tags[i].tag)
2157 return tags[i].id;
2158 }
2159 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2160 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2161 return tags[i].id;
2162 }
2163 return CODEC_ID_NONE;
2164 }
2165
2166 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2167 {
2168 int i;
2169 for(i=0; tags && tags[i]; i++){
2170 int tag= ff_codec_get_tag(tags[i], id);
2171 if(tag) return tag;
2172 }
2173 return 0;
2174 }
2175
2176 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2177 {
2178 int i;
2179 for(i=0; tags && tags[i]; i++){
2180 enum CodecID id= ff_codec_get_id(tags[i], tag);
2181 if(id!=CODEC_ID_NONE) return id;
2182 }
2183 return CODEC_ID_NONE;
2184 }
2185
2186 static void compute_chapters_end(AVFormatContext *s)
2187 {
2188 unsigned int i, j;
2189 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2190
2191 for (i = 0; i < s->nb_chapters; i++)
2192 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2193 AVChapter *ch = s->chapters[i];
2194 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2195 : INT64_MAX;
2196
2197 for (j = 0; j < s->nb_chapters; j++) {
2198 AVChapter *ch1 = s->chapters[j];
2199 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2200 if (j != i && next_start > ch->start && next_start < end)
2201 end = next_start;
2202 }
2203 ch->end = (end == INT64_MAX) ? ch->start : end;
2204 }
2205 }
2206
2207 static int get_std_framerate(int i){
2208 if(i<60*12) return i*1001;
2209 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2210 }
2211
2212 /*
2213 * Is the time base unreliable.
2214 * This is a heuristic to balance between quick acceptance of the values in
2215 * the headers vs. some extra checks.
2216 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2217 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2218 * And there are "variable" fps files this needs to detect as well.
2219 */
2220 static int tb_unreliable(AVCodecContext *c){
2221 if( c->time_base.den >= 101L*c->time_base.num
2222 || c->time_base.den < 5L*c->time_base.num
2223 /* || c->codec_tag == AV_RL32("DIVX")
2224 || c->codec_tag == AV_RL32("XVID")*/
2225 || c->codec_id == CODEC_ID_MPEG2VIDEO
2226 || c->codec_id == CODEC_ID_H264
2227 )
2228 return 1;
2229 return 0;
2230 }
2231
2232 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2233 {
2234 int i, count, ret, read_size, j;
2235 AVStream *st;
2236 AVPacket pkt1, *pkt;
2237 int64_t old_offset = avio_tell(ic->pb);
2238 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2239
2240 for(i=0;i<ic->nb_streams;i++) {
2241 AVCodec *codec;
2242 AVDictionary *thread_opt = NULL;
2243 st = ic->streams[i];
2244
2245 //only for the split stuff
2246 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2247 st->parser = av_parser_init(st->codec->codec_id);
2248 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2249 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2250 }
2251 }
2252 codec = st->codec->codec ? st->codec->codec :
2253 avcodec_find_decoder(st->codec->codec_id);
2254
2255 /* force thread count to 1 since the h264 decoder will not extract SPS
2256 * and PPS to extradata during multi-threaded decoding */
2257 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2258
2259 /* Ensure that subtitle_header is properly set. */
2260 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2261 && codec && !st->codec->codec)
2262 avcodec_open2(st->codec, codec, options ? &options[i]
2263 : &thread_opt);
2264
2265 //try to just open decoders, in case this is enough to get parameters
2266 if (!has_codec_parameters(st)) {
2267 if (codec && !st->codec->codec)
2268 avcodec_open2(st->codec, codec, options ? &options[i]
2269 : &thread_opt);
2270 }
2271 if (!options)
2272 av_dict_free(&thread_opt);
2273 }
2274
2275 for (i=0; i<ic->nb_streams; i++) {
2276 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2277 }
2278
2279 count = 0;
2280 read_size = 0;
2281 for(;;) {
2282 if (ff_check_interrupt(&ic->interrupt_callback)){
2283 ret= AVERROR_EXIT;
2284 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2285 break;
2286 }
2287
2288 /* check if one codec still needs to be handled */
2289 for(i=0;i<ic->nb_streams;i++) {
2290 int fps_analyze_framecount = 20;
2291
2292 st = ic->streams[i];
2293 if (!has_codec_parameters(st))
2294 break;
2295 /* if the timebase is coarse (like the usual millisecond precision
2296 of mkv), we need to analyze more frames to reliably arrive at
2297 the correct fps */
2298 if (av_q2d(st->time_base) > 0.0005)
2299 fps_analyze_framecount *= 2;
2300 if (ic->fps_probe_size >= 0)
2301 fps_analyze_framecount = ic->fps_probe_size;
2302 /* variable fps and no guess at the real fps */
2303 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2304 && st->info->duration_count < fps_analyze_framecount
2305 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2306 break;
2307 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2308 break;
2309 if(st->first_dts == AV_NOPTS_VALUE)
2310 break;
2311 }
2312 if (i == ic->nb_streams) {
2313 /* NOTE: if the format has no header, then we need to read
2314 some packets to get most of the streams, so we cannot
2315 stop here */
2316 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2317 /* if we found the info for all the codecs, we can stop */
2318 ret = count;
2319 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2320 break;
2321 }
2322 }
2323 /* we did not get all the codec info, but we read too much data */
2324 if (read_size >= ic->probesize) {
2325 ret = count;
2326 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2327 break;
2328 }
2329
2330 /* NOTE: a new stream can be added there if no header in file
2331 (AVFMTCTX_NOHEADER) */
2332 ret = read_frame_internal(ic, &pkt1);
2333 if (ret == AVERROR(EAGAIN))
2334 continue;
2335
2336 if (ret < 0) {
2337 /* EOF or error*/
2338 AVPacket empty_pkt = { 0 };
2339 int err = 0;
2340 av_init_packet(&empty_pkt);
2341
2342 ret = -1; /* we could not have all the codec parameters before EOF */
2343 for(i=0;i<ic->nb_streams;i++) {
2344 st = ic->streams[i];
2345
2346 /* flush the decoders */
2347 if (st->info->found_decoder == 1) {
2348 do {
2349 err = try_decode_frame(st, &empty_pkt,
2350 (options && i < orig_nb_streams) ?
2351 &options[i] : NULL);
2352 } while (err > 0 && !has_codec_parameters(st));
2353 }
2354
2355 if (err < 0) {
2356 av_log(ic, AV_LOG_WARNING,
2357 "decoding for stream %d failed\n", st->index);
2358 } else if (!has_codec_parameters(st)) {
2359 char buf[256];
2360 avcodec_string(buf, sizeof(buf), st->codec, 0);
2361 av_log(ic, AV_LOG_WARNING,
2362 "Could not find codec parameters (%s)\n", buf);
2363 } else {
2364 ret = 0;
2365 }
2366 }
2367 break;
2368 }
2369
2370 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2371 if ((ret = av_dup_packet(pkt)) < 0)
2372 goto find_stream_info_err;
2373
2374 read_size += pkt->size;
2375
2376 st = ic->streams[pkt->stream_index];
2377 if (st->codec_info_nb_frames>1) {
2378 if (av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2379 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2380 break;
2381 }
2382 st->info->codec_info_duration += pkt->duration;
2383 }
2384 {
2385 int64_t last = st->info->last_dts;
2386
2387 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){
2388 int64_t duration= pkt->dts - last;
2389 double dur= duration * av_q2d(st->time_base);
2390
2391 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2392 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2393 if (st->info->duration_count < 2)
2394 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2395 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2396 int framerate= get_std_framerate(i);
2397 int ticks= lrintf(dur*framerate/(1001*12));
2398 double error = dur - (double)ticks*1001*12 / framerate;
2399 st->info->duration_error[i] += error*error;
2400 }
2401 st->info->duration_count++;
2402 // ignore the first 4 values, they might have some random jitter
2403 if (st->info->duration_count > 3)
2404 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2405 }
2406 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2407 st->info->last_dts = pkt->dts;
2408 }
2409 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2410 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2411 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2412 st->codec->extradata_size= i;
2413 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2414 if (!st->codec->extradata)
2415 return AVERROR(ENOMEM);
2416 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2417 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2418 }
2419 }
2420
2421 /* if still no information, we try to open the codec and to
2422 decompress the frame. We try to avoid that in most cases as
2423 it takes longer and uses more memory. For MPEG-4, we need to
2424 decompress for QuickTime.
2425
2426 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2427 least one frame of codec data, this makes sure the codec initializes
2428 the channel configuration and does not only trust the values from the container.
2429 */
2430 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2431
2432 st->codec_info_nb_frames++;
2433 count++;
2434 }
2435
2436 // close codecs which were opened in try_decode_frame()
2437 for(i=0;i<ic->nb_streams;i++) {
2438 st = ic->streams[i];
2439 avcodec_close(st->codec);
2440 }
2441 for(i=0;i<ic->nb_streams;i++) {
2442 st = ic->streams[i];
2443 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2444 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2445 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2446 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2447 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2448 // the check for tb_unreliable() is not completely correct, since this is not about handling
2449 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2450 // ipmovie.c produces.
2451 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2452 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2453 if (st->info->duration_count && !st->r_frame_rate.num
2454 && tb_unreliable(st->codec) /*&&
2455 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2456 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2457 int num = 0;
2458 double best_error= 2*av_q2d(st->time_base);
2459 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2460
2461 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2462 double error = st->info->duration_error[j] * get_std_framerate(j);
2463 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2464 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2465 if(error < best_error){
2466 best_error= error;
2467 num = get_std_framerate(j);
2468 }
2469 }
2470 // do not increase frame rate by more than 1 % in order to match a standard rate.
2471 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2472 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2473 }
2474 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2475 if(!st->codec->bits_per_coded_sample)
2476 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2477 // set stream disposition based on audio service type
2478 switch (st->codec->audio_service_type) {
2479 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2480 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2481 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2482 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2483 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2484 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2485 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2486 st->disposition = AV_DISPOSITION_COMMENT; break;
2487 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2488 st->disposition = AV_DISPOSITION_KARAOKE; break;
2489 }
2490 }
2491 }
2492
2493 estimate_timings(ic, old_offset);
2494
2495 compute_chapters_end(ic);
2496
2497 find_stream_info_err:
2498 for (i=0; i < ic->nb_streams; i++) {
2499 if (ic->streams[i]->codec)
2500 ic->streams[i]->codec->thread_count = 0;
2501 av_freep(&ic->streams[i]->info);
2502 }
2503 return ret;
2504 }
2505
2506 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2507 {
2508 int i, j;
2509
2510 for (i = 0; i < ic->nb_programs; i++)
2511 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2512 if (ic->programs[i]->stream_index[j] == s)
2513 return ic->programs[i];
2514 return NULL;
2515 }
2516
2517 int av_find_best_stream(AVFormatContext *ic,
2518 enum AVMediaType type,
2519 int wanted_stream_nb,
2520 int related_stream,
2521 AVCodec **decoder_ret,
2522 int flags)
2523 {
2524 int i, nb_streams = ic->nb_streams;
2525 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2526 unsigned *program = NULL;
2527 AVCodec *decoder = NULL, *best_decoder = NULL;
2528
2529 if (related_stream >= 0 && wanted_stream_nb < 0) {
2530 AVProgram *p = find_program_from_stream(ic, related_stream);
2531 if (p) {
2532 program = p->stream_index;
2533 nb_streams = p->nb_stream_indexes;
2534 }
2535 }
2536 for (i = 0; i < nb_streams; i++) {
2537 int real_stream_index = program ? program[i] : i;
2538 AVStream *st = ic->streams[real_stream_index];
2539 AVCodecContext *avctx = st->codec;
2540 if (avctx->codec_type != type)
2541 continue;
2542 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2543 continue;
2544 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2545 continue;
2546 if (decoder_ret) {
2547 decoder = avcodec_find_decoder(st->codec->codec_id);
2548 if (!decoder) {
2549 if (ret < 0)
2550 ret = AVERROR_DECODER_NOT_FOUND;
2551 continue;
2552 }
2553 }
2554 if (best_count >= st->codec_info_nb_frames)
2555 continue;
2556 best_count = st->codec_info_nb_frames;
2557 ret = real_stream_index;
2558 best_decoder = decoder;
2559 if (program && i == nb_streams - 1 && ret < 0) {
2560 program = NULL;
2561 nb_streams = ic->nb_streams;
2562 i = 0; /* no related stream found, try again with everything */
2563 }
2564 }
2565 if (decoder_ret)
2566 *decoder_ret = best_decoder;
2567 return ret;
2568 }
2569
2570 /*******************************************************/
2571
2572 int av_read_play(AVFormatContext *s)
2573 {
2574 if (s->iformat->read_play)
2575 return s->iformat->read_play(s);
2576 if (s->pb)
2577 return avio_pause(s->pb, 0);
2578 return AVERROR(ENOSYS);
2579 }
2580
2581 int av_read_pause(AVFormatContext *s)
2582 {
2583 if (s->iformat->read_pause)
2584 return s->iformat->read_pause(s);
2585 if (s->pb)
2586 return avio_pause(s->pb, 1);
2587 return AVERROR(ENOSYS);
2588 }
2589
2590 void avformat_free_context(AVFormatContext *s)
2591 {
2592 int i;
2593 AVStream *st;
2594
2595 av_opt_free(s);
2596 if (s->iformat && s->iformat->priv_class && s->priv_data)
2597 av_opt_free(s->priv_data);
2598
2599 for(i=0;i<s->nb_streams;i++) {
2600 /* free all data in a stream component */
2601 st = s->streams[i];
2602 if (st->parser) {
2603 av_parser_close(st->parser);
2604 }
2605 if (st->attached_pic.data)
2606 av_free_packet(&st->attached_pic);
2607 av_dict_free(&st->metadata);
2608 av_free(st->index_entries);
2609 av_free(st->codec->extradata);
2610 av_free(st->codec->subtitle_header);
2611 av_free(st->codec);
2612 av_free(st->priv_data);
2613 av_free(st->info);
2614 av_free(st);
2615 }
2616 for(i=s->nb_programs-1; i>=0; i--) {
2617 av_dict_free(&s->programs[i]->metadata);
2618 av_freep(&s->programs[i]->stream_index);
2619 av_freep(&s->programs[i]);
2620 }
2621 av_freep(&s->programs);
2622 av_freep(&s->priv_data);
2623 while(s->nb_chapters--) {
2624 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2625 av_free(s->chapters[s->nb_chapters]);
2626 }
2627 av_freep(&s->chapters);
2628 av_dict_free(&s->metadata);
2629 av_freep(&s->streams);
2630 av_free(s);
2631 }
2632
2633 #if FF_API_CLOSE_INPUT_FILE
2634 void av_close_input_file(AVFormatContext *s)
2635 {
2636 avformat_close_input(&s);
2637 }
2638 #endif
2639
2640 void avformat_close_input(AVFormatContext **ps)
2641 {
2642 AVFormatContext *s = *ps;
2643 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2644 NULL : s->pb;
2645 flush_packet_queue(s);
2646 if (s->iformat->read_close)
2647 s->iformat->read_close(s);
2648 avformat_free_context(s);
2649 *ps = NULL;
2650 if (pb)
2651 avio_close(pb);
2652 }
2653
2654 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2655 {
2656 AVStream *st;
2657 int i;
2658 AVStream **streams;
2659
2660 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2661 return NULL;
2662 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2663 if (!streams)
2664 return NULL;
2665 s->streams = streams;
2666
2667 st = av_mallocz(sizeof(AVStream));
2668 if (!st)
2669 return NULL;
2670 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2671 av_free(st);
2672 return NULL;
2673 }
2674
2675 st->codec = avcodec_alloc_context3(c);
2676 if (s->iformat) {
2677 /* no default bitrate if decoding */
2678 st->codec->bit_rate = 0;
2679 }
2680 st->index = s->nb_streams;
2681 st->start_time = AV_NOPTS_VALUE;
2682 st->duration = AV_NOPTS_VALUE;
2683 /* we set the current DTS to 0 so that formats without any timestamps
2684 but durations get some timestamps, formats with some unknown
2685 timestamps have their first few packets buffered and the
2686 timestamps corrected before they are returned to the user */
2687 st->cur_dts = 0;
2688 st->first_dts = AV_NOPTS_VALUE;
2689 st->probe_packets = MAX_PROBE_PACKETS;
2690
2691 /* default pts setting is MPEG-like */
2692 avpriv_set_pts_info(st, 33, 1, 90000);
2693 st->last_IP_pts = AV_NOPTS_VALUE;
2694 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2695 st->pts_buffer[i]= AV_NOPTS_VALUE;
2696 st->reference_dts = AV_NOPTS_VALUE;
2697
2698 st->sample_aspect_ratio = (AVRational){0,1};
2699
2700 s->streams[s->nb_streams++] = st;
2701 return st;
2702 }
2703
2704 AVProgram *av_new_program(AVFormatContext *ac, int id)
2705 {
2706 AVProgram *program=NULL;
2707 int i;
2708
2709 av_dlog(ac, "new_program: id=0x%04x\n", id);
2710
2711 for(i=0; i<ac->nb_programs; i++)
2712 if(ac->programs[i]->id == id)
2713 program = ac->programs[i];
2714
2715 if(!program){
2716 program = av_mallocz(sizeof(AVProgram));
2717 if (!program)
2718 return NULL;
2719 dynarray_add(&ac->programs, &ac->nb_programs, program);
2720 program->discard = AVDISCARD_NONE;
2721 }
2722 program->id = id;
2723
2724 return program;
2725 }
2726
2727 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2728 {
2729 AVChapter *chapter = NULL;
2730 int i;
2731
2732 for(i=0; i<s->nb_chapters; i++)
2733 if(s->chapters[i]->id == id)
2734 chapter = s->chapters[i];
2735
2736 if(!chapter){
2737 chapter= av_mallocz(sizeof(AVChapter));
2738 if(!chapter)
2739 return NULL;
2740 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2741 }
2742 av_dict_set(&chapter->metadata, "title", title, 0);
2743 chapter->id = id;
2744 chapter->time_base= time_base;
2745 chapter->start = start;
2746 chapter->end = end;
2747
2748 return chapter;
2749 }
2750
2751 /************************************************************/
2752 /* output media file */
2753
2754 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2755 {
2756 const AVCodecTag *avctag;
2757 int n;
2758 enum CodecID id = CODEC_ID_NONE;
2759 unsigned int tag = 0;
2760
2761 /**
2762 * Check that tag + id is in the table
2763 * If neither is in the table -> OK
2764 * If tag is in the table with another id -> FAIL
2765 * If id is in the table with another tag -> FAIL unless strict < normal
2766 */
2767 for (n = 0; s->oformat->codec_tag[n]; n++) {
2768 avctag = s->oformat->codec_tag[n];
2769 while (avctag->id != CODEC_ID_NONE) {
2770 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) {
2771 id = avctag->id;
2772 if (id == st->codec->codec_id)
2773 return 1;
2774 }
2775 if (avctag->id == st->codec->codec_id)
2776 tag = avctag->tag;
2777 avctag++;
2778 }
2779 }
2780 if (id != CODEC_ID_NONE)
2781 return 0;
2782 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2783 return 0;
2784 return 1;
2785 }
2786
2787 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
2788 {
2789 int ret = 0, i;
2790 AVStream *st;
2791 AVDictionary *tmp = NULL;
2792
2793 if (options)
2794 av_dict_copy(&tmp, *options, 0);
2795 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
2796 goto fail;
2797
2798 // some sanity checks
2799 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2800 av_log(s, AV_LOG_ERROR, "no streams\n");
2801 ret = AVERROR(EINVAL);
2802 goto fail;
2803 }
2804
2805 for(i=0;i<s->nb_streams;i++) {
2806 st = s->streams[i];
2807
2808 switch (st->codec->codec_type) {
2809 case AVMEDIA_TYPE_AUDIO:
2810 if(st->codec->sample_rate<=0){
2811 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2812 ret = AVERROR(EINVAL);
2813 goto fail;
2814 }
2815 if(!st->codec->block_align)
2816 st->codec->block_align = st->codec->channels *
2817 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2818 break;
2819 case AVMEDIA_TYPE_VIDEO:
2820 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2821 av_log(s, AV_LOG_ERROR, "time base not set\n");
2822 ret = AVERROR(EINVAL);
2823 goto fail;
2824 }
2825 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2826 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2827 ret = AVERROR(EINVAL);
2828 goto fail;
2829 }
2830 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2831 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
2832 "(%d/%d) and encoder layer (%d/%d)\n",
2833 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2834 st->codec->sample_aspect_ratio.num,
2835 st->codec->sample_aspect_ratio.den);
2836 ret = AVERROR(EINVAL);
2837 goto fail;
2838 }
2839 break;
2840 }
2841
2842 if(s->oformat->codec_tag){
2843 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2844 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2845 st->codec->codec_tag= 0;
2846 }
2847 if(st->codec->codec_tag){
2848 if (!validate_codec_tag(s, st)) {
2849 char tagbuf[32];
2850 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2851 av_log(s, AV_LOG_ERROR,
2852 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2853 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2854 ret = AVERROR_INVALIDDATA;
2855 goto fail;
2856 }
2857 }else
2858 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2859 }
2860
2861 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2862 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2863 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2864 }
2865
2866 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2867 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2868 if (!s->priv_data) {
2869 ret = AVERROR(ENOMEM);
2870 goto fail;
2871 }
2872 if (s->oformat->priv_class) {
2873 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2874 av_opt_set_defaults(s->priv_data);
2875 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
2876 goto fail;
2877 }
2878 }
2879
2880 /* set muxer identification string */
2881 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2882 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2883 }
2884
2885 if(s->oformat->write_header){
2886 ret = s->oformat->write_header(s);
2887 if (ret < 0)
2888 goto fail;
2889 }
2890
2891 /* init PTS generation */
2892 for(i=0;i<s->nb_streams;i++) {
2893 int64_t den = AV_NOPTS_VALUE;
2894 st = s->streams[i];
2895
2896 switch (st->codec->codec_type) {
2897 case AVMEDIA_TYPE_AUDIO:
2898 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2899 break;
2900 case AVMEDIA_TYPE_VIDEO:
2901 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2902 break;
2903 default:
2904 break;
2905 }
2906 if (den != AV_NOPTS_VALUE) {
2907 if (den <= 0) {
2908 ret = AVERROR_INVALIDDATA;
2909 goto fail;
2910 }
2911 frac_init(&st->pts, 0, 0, den);
2912 }
2913 }
2914
2915 if (options) {
2916 av_dict_free(options);
2917 *options = tmp;
2918 }
2919 return 0;
2920 fail:
2921 av_dict_free(&tmp);
2922 return ret;
2923 }
2924
2925 //FIXME merge with compute_pkt_fields
2926 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2927 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2928 int num, den, frame_size, i;
2929
2930 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
2931 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2932
2933 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2934 return AVERROR(EINVAL);*/
2935
2936 /* duration field */
2937 if (pkt->duration == 0) {
2938 compute_frame_duration(&num, &den, st, NULL, pkt);
2939 if (den && num) {
2940 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2941 }
2942 }
2943
2944 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2945 pkt->pts= pkt->dts;
2946
2947 //XXX/FIXME this is a temporary hack until all encoders output pts
2948 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2949 pkt->dts=
2950 // pkt->pts= st->cur_dts;
2951 pkt->pts= st->pts.val;
2952 }
2953
2954 //calculate dts from pts
2955 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2956 st->pts_buffer[0]= pkt->pts;
2957 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2958 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2959 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2960 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2961
2962 pkt->dts= st->pts_buffer[0];
2963 }
2964
2965 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2966 av_log(s, AV_LOG_ERROR,
2967 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
2968 st->index, st->cur_dts, pkt->dts);
2969 return AVERROR(EINVAL);
2970 }
2971 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2972 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
2973 return AVERROR(EINVAL);
2974 }
2975
2976 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2977 st->cur_dts= pkt->dts;
2978 st->pts.val= pkt->dts;
2979
2980 /* update pts */
2981 switch (st->codec->codec_type) {
2982 case AVMEDIA_TYPE_AUDIO:
2983 frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
2984
2985 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2986 likely equal to the encoder delay, but it would be better if we
2987 had the real timestamps from the encoder */
2988 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2989 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2990 }
2991 break;
2992 case AVMEDIA_TYPE_VIDEO:
2993 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2994 break;
2995 default:
2996 break;
2997 }
2998 return 0;
2999 }
3000
3001 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3002 {
3003 int ret;
3004
3005 if (!pkt) {
3006 if (s->oformat->flags & AVFMT_ALLOW_FLUSH)
3007 return s->oformat->write_packet(s, pkt);
3008 return 1;
3009 }
3010
3011 ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3012
3013 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3014 return ret;
3015
3016 ret= s->oformat->write_packet(s, pkt);
3017
3018 if (ret >= 0)
3019 s->streams[pkt->stream_index]->nb_frames++;
3020 return ret;
3021 }
3022
3023 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3024 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3025 {
3026 AVPacketList **next_point, *this_pktl;
3027
3028 this_pktl = av_mallocz(sizeof(AVPacketList));
3029 this_pktl->pkt= *pkt;
3030 pkt->destruct= NULL; // do not free original but only the copy
3031 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3032
3033 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3034 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3035 }else
3036 next_point = &s->packet_buffer;
3037
3038 if(*next_point){
3039 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3040 while(!compare(s, &(*next_point)->pkt, pkt)){
3041 next_point= &(*next_point)->next;
3042 }
3043 goto next_non_null;
3044 }else{
3045 next_point = &(s->packet_buffer_end->next);
3046 }
3047 }
3048 assert(!*next_point);
3049
3050 s->packet_buffer_end= this_pktl;
3051 next_non_null:
3052
3053 this_pktl->next= *next_point;
3054
3055 s->streams[pkt->stream_index]->last_in_packet_buffer=
3056 *next_point= this_pktl;
3057 }
3058
3059 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3060 {
3061 AVStream *st = s->streams[ pkt ->stream_index];
3062 AVStream *st2= s->streams[ next->stream_index];
3063 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3064 st->time_base);
3065
3066 if (comp == 0)
3067 return pkt->stream_index < next->stream_index;
3068 return comp > 0;
3069 }
3070
3071 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3072 AVPacketList *pktl;
3073 int stream_count=0;
3074 int i;
3075
3076 if(pkt){
3077 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3078 }
3079
3080 for(i=0; i < s->nb_streams; i++)
3081 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3082
3083 if(stream_count && (s->nb_streams == stream_count || flush)){
3084 pktl= s->packet_buffer;
3085 *out= pktl->pkt;
3086
3087 s->packet_buffer= pktl->next;
3088 if(!s->packet_buffer)
3089 s->packet_buffer_end= NULL;
3090
3091 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3092 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3093 av_freep(&pktl);
3094 return 1;
3095 }else{
3096 av_init_packet(out);
3097 return 0;
3098 }
3099 }
3100
3101 /**
3102 * Interleave an AVPacket correctly so it can be muxed.
3103 * @param out the interleaved packet will be output here
3104 * @param in the input packet
3105 * @param flush 1 if no further packets are available as input and all