lavf: add avformat_open_input() as a replacement for av_open_input_*
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* #define DEBUG */
23
24 #include "avformat.h"
25 #include "avio_internal.h"
26 #include "internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "metadata.h"
31 #include "id3v2.h"
32 #include "libavutil/avstring.h"
33 #include "riff.h"
34 #include "audiointerleave.h"
35 #include "url.h"
36 #include <sys/time.h>
37 #include <time.h>
38 #include <strings.h>
39 #include <stdarg.h>
40 #if CONFIG_NETWORK
41 #include "network.h"
42 #endif
43
44 #undef NDEBUG
45 #include <assert.h>
46
47 /**
48 * @file
49 * various utility functions for use within Libav
50 */
51
52 unsigned avformat_version(void)
53 {
54 return LIBAVFORMAT_VERSION_INT;
55 }
56
57 const char *avformat_configuration(void)
58 {
59 return LIBAV_CONFIGURATION;
60 }
61
62 const char *avformat_license(void)
63 {
64 #define LICENSE_PREFIX "libavformat license: "
65 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
66 }
67
68 /* fraction handling */
69
70 /**
71 * f = val + (num / den) + 0.5.
72 *
73 * 'num' is normalized so that it is such as 0 <= num < den.
74 *
75 * @param f fractional number
76 * @param val integer value
77 * @param num must be >= 0
78 * @param den must be >= 1
79 */
80 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
81 {
82 num += (den >> 1);
83 if (num >= den) {
84 val += num / den;
85 num = num % den;
86 }
87 f->val = val;
88 f->num = num;
89 f->den = den;
90 }
91
92 /**
93 * Fractional addition to f: f = f + (incr / f->den).
94 *
95 * @param f fractional number
96 * @param incr increment, can be positive or negative
97 */
98 static void av_frac_add(AVFrac *f, int64_t incr)
99 {
100 int64_t num, den;
101
102 num = f->num + incr;
103 den = f->den;
104 if (num < 0) {
105 f->val += num / den;
106 num = num % den;
107 if (num < 0) {
108 num += den;
109 f->val--;
110 }
111 } else if (num >= den) {
112 f->val += num / den;
113 num = num % den;
114 }
115 f->num = num;
116 }
117
118 /** head of registered input format linked list */
119 static AVInputFormat *first_iformat = NULL;
120 /** head of registered output format linked list */
121 static AVOutputFormat *first_oformat = NULL;
122
123 AVInputFormat *av_iformat_next(AVInputFormat *f)
124 {
125 if(f) return f->next;
126 else return first_iformat;
127 }
128
129 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
130 {
131 if(f) return f->next;
132 else return first_oformat;
133 }
134
135 void av_register_input_format(AVInputFormat *format)
136 {
137 AVInputFormat **p;
138 p = &first_iformat;
139 while (*p != NULL) p = &(*p)->next;
140 *p = format;
141 format->next = NULL;
142 }
143
144 void av_register_output_format(AVOutputFormat *format)
145 {
146 AVOutputFormat **p;
147 p = &first_oformat;
148 while (*p != NULL) p = &(*p)->next;
149 *p = format;
150 format->next = NULL;
151 }
152
153 int av_match_ext(const char *filename, const char *extensions)
154 {
155 const char *ext, *p;
156 char ext1[32], *q;
157
158 if(!filename)
159 return 0;
160
161 ext = strrchr(filename, '.');
162 if (ext) {
163 ext++;
164 p = extensions;
165 for(;;) {
166 q = ext1;
167 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
168 *q++ = *p++;
169 *q = '\0';
170 if (!strcasecmp(ext1, ext))
171 return 1;
172 if (*p == '\0')
173 break;
174 p++;
175 }
176 }
177 return 0;
178 }
179
180 static int match_format(const char *name, const char *names)
181 {
182 const char *p;
183 int len, namelen;
184
185 if (!name || !names)
186 return 0;
187
188 namelen = strlen(name);
189 while ((p = strchr(names, ','))) {
190 len = FFMAX(p - names, namelen);
191 if (!strncasecmp(name, names, len))
192 return 1;
193 names = p+1;
194 }
195 return !strcasecmp(name, names);
196 }
197
198 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
199 const char *mime_type)
200 {
201 AVOutputFormat *fmt = NULL, *fmt_found;
202 int score_max, score;
203
204 /* specific test for image sequences */
205 #if CONFIG_IMAGE2_MUXER
206 if (!short_name && filename &&
207 av_filename_number_test(filename) &&
208 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
209 return av_guess_format("image2", NULL, NULL);
210 }
211 #endif
212 /* Find the proper file type. */
213 fmt_found = NULL;
214 score_max = 0;
215 while ((fmt = av_oformat_next(fmt))) {
216 score = 0;
217 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
218 score += 100;
219 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
220 score += 10;
221 if (filename && fmt->extensions &&
222 av_match_ext(filename, fmt->extensions)) {
223 score += 5;
224 }
225 if (score > score_max) {
226 score_max = score;
227 fmt_found = fmt;
228 }
229 }
230 return fmt_found;
231 }
232
233 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
234 const char *filename, const char *mime_type, enum AVMediaType type){
235 if(type == AVMEDIA_TYPE_VIDEO){
236 enum CodecID codec_id= CODEC_ID_NONE;
237
238 #if CONFIG_IMAGE2_MUXER
239 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
240 codec_id= ff_guess_image2_codec(filename);
241 }
242 #endif
243 if(codec_id == CODEC_ID_NONE)
244 codec_id= fmt->video_codec;
245 return codec_id;
246 }else if(type == AVMEDIA_TYPE_AUDIO)
247 return fmt->audio_codec;
248 else if (type == AVMEDIA_TYPE_SUBTITLE)
249 return fmt->subtitle_codec;
250 else
251 return CODEC_ID_NONE;
252 }
253
254 AVInputFormat *av_find_input_format(const char *short_name)
255 {
256 AVInputFormat *fmt = NULL;
257 while ((fmt = av_iformat_next(fmt))) {
258 if (match_format(short_name, fmt->name))
259 return fmt;
260 }
261 return NULL;
262 }
263
264
265 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
266 {
267 int ret= av_new_packet(pkt, size);
268
269 if(ret<0)
270 return ret;
271
272 pkt->pos= avio_tell(s);
273
274 ret= avio_read(s, pkt->data, size);
275 if(ret<=0)
276 av_free_packet(pkt);
277 else
278 av_shrink_packet(pkt, ret);
279
280 return ret;
281 }
282
283 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
284 {
285 int ret;
286 int old_size;
287 if (!pkt->size)
288 return av_get_packet(s, pkt, size);
289 old_size = pkt->size;
290 ret = av_grow_packet(pkt, size);
291 if (ret < 0)
292 return ret;
293 ret = avio_read(s, pkt->data + old_size, size);
294 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
295 return ret;
296 }
297
298
299 int av_filename_number_test(const char *filename)
300 {
301 char buf[1024];
302 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
303 }
304
305 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
306 {
307 AVProbeData lpd = *pd;
308 AVInputFormat *fmt1 = NULL, *fmt;
309 int score, id3 = 0;
310
311 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
312 int id3len = ff_id3v2_tag_len(lpd.buf);
313 if (lpd.buf_size > id3len + 16) {
314 lpd.buf += id3len;
315 lpd.buf_size -= id3len;
316 }
317 id3 = 1;
318 }
319
320 fmt = NULL;
321 while ((fmt1 = av_iformat_next(fmt1))) {
322 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
323 continue;
324 score = 0;
325 if (fmt1->read_probe) {
326 score = fmt1->read_probe(&lpd);
327 } else if (fmt1->extensions) {
328 if (av_match_ext(lpd.filename, fmt1->extensions)) {
329 score = 50;
330 }
331 }
332 if (score > *score_max) {
333 *score_max = score;
334 fmt = fmt1;
335 }else if (score == *score_max)
336 fmt = NULL;
337 }
338
339 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
340 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4) {
341 while ((fmt = av_iformat_next(fmt)))
342 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
343 *score_max = AVPROBE_SCORE_MAX/4;
344 break;
345 }
346 }
347
348 return fmt;
349 }
350
351 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
352 int score=0;
353 return av_probe_input_format2(pd, is_opened, &score);
354 }
355
356 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
357 {
358 static const struct {
359 const char *name; enum CodecID id; enum AVMediaType type;
360 } fmt_id_type[] = {
361 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
362 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
363 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
364 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
365 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
366 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
367 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
368 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
369 { 0 }
370 };
371 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
372
373 if (fmt) {
374 int i;
375 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
376 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
377 for (i = 0; fmt_id_type[i].name; i++) {
378 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
379 st->codec->codec_id = fmt_id_type[i].id;
380 st->codec->codec_type = fmt_id_type[i].type;
381 break;
382 }
383 }
384 }
385 return !!fmt;
386 }
387
388 /************************************************************/
389 /* input media file */
390
391 /**
392 * Open a media file from an IO stream. 'fmt' must be specified.
393 */
394 int av_open_input_stream(AVFormatContext **ic_ptr,
395 AVIOContext *pb, const char *filename,
396 AVInputFormat *fmt, AVFormatParameters *ap)
397 {
398 int err;
399 AVFormatContext *ic;
400 AVFormatParameters default_ap;
401
402 if(!ap){
403 ap=&default_ap;
404 memset(ap, 0, sizeof(default_ap));
405 }
406
407 if(!ap->prealloced_context)
408 ic = avformat_alloc_context();
409 else
410 ic = *ic_ptr;
411 if (!ic) {
412 err = AVERROR(ENOMEM);
413 goto fail;
414 }
415 ic->iformat = fmt;
416 ic->pb = pb;
417 ic->duration = AV_NOPTS_VALUE;
418 ic->start_time = AV_NOPTS_VALUE;
419 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
420
421 /* allocate private data */
422 if (fmt->priv_data_size > 0) {
423 ic->priv_data = av_mallocz(fmt->priv_data_size);
424 if (!ic->priv_data) {
425 err = AVERROR(ENOMEM);
426 goto fail;
427 }
428 if (fmt->priv_class) {
429 *(const AVClass**)ic->priv_data = fmt->priv_class;
430 av_opt_set_defaults(ic->priv_data);
431 }
432 } else {
433 ic->priv_data = NULL;
434 }
435
436 // e.g. AVFMT_NOFILE formats will not have a AVIOContext
437 if (ic->pb)
438 ff_id3v2_read(ic, ID3v2_DEFAULT_MAGIC);
439
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic, ap);
442 if (err < 0)
443 goto fail;
444 }
445
446 if (pb && !ic->data_offset)
447 ic->data_offset = avio_tell(ic->pb);
448
449 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
450
451 *ic_ptr = ic;
452 return 0;
453 fail:
454 if (ic) {
455 int i;
456 av_freep(&ic->priv_data);
457 for(i=0;i<ic->nb_streams;i++) {
458 AVStream *st = ic->streams[i];
459 if (st) {
460 av_free(st->priv_data);
461 av_free(st->codec->extradata);
462 av_free(st->codec);
463 av_free(st->info);
464 }
465 av_free(st);
466 }
467 }
468 av_free(ic);
469 *ic_ptr = NULL;
470 return err;
471 }
472
473 /** size of probe buffer, for guessing file type from file contents */
474 #define PROBE_BUF_MIN 2048
475 #define PROBE_BUF_MAX (1<<20)
476
477 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
478 const char *filename, void *logctx,
479 unsigned int offset, unsigned int max_probe_size)
480 {
481 AVProbeData pd = { filename ? filename : "", NULL, -offset };
482 unsigned char *buf = NULL;
483 int ret = 0, probe_size;
484
485 if (!max_probe_size) {
486 max_probe_size = PROBE_BUF_MAX;
487 } else if (max_probe_size > PROBE_BUF_MAX) {
488 max_probe_size = PROBE_BUF_MAX;
489 } else if (max_probe_size < PROBE_BUF_MIN) {
490 return AVERROR(EINVAL);
491 }
492
493 if (offset >= max_probe_size) {
494 return AVERROR(EINVAL);
495 }
496
497 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0;
498 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
499 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
500 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
501
502 if (probe_size < offset) {
503 continue;
504 }
505
506 /* read probe data */
507 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
508 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
509 /* fail if error was not end of file, otherwise, lower score */
510 if (ret != AVERROR_EOF) {
511 av_free(buf);
512 return ret;
513 }
514 score = 0;
515 ret = 0; /* error was end of file, nothing read */
516 }
517 pd.buf_size += ret;
518 pd.buf = &buf[offset];
519
520 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
521
522 /* guess file format */
523 *fmt = av_probe_input_format2(&pd, 1, &score);
524 if(*fmt){
525 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
526 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
527 }else
528 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
529 }
530 }
531
532 if (!*fmt) {
533 av_free(buf);
534 return AVERROR_INVALIDDATA;
535 }
536
537 /* rewind. reuse probe buffer to avoid seeking */
538 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
539 av_free(buf);
540
541 return ret;
542 }
543
544 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
545 AVInputFormat *fmt,
546 int buf_size,
547 AVFormatParameters *ap)
548 {
549 int err;
550 AVProbeData probe_data, *pd = &probe_data;
551 AVIOContext *pb = NULL;
552 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
553
554 pd->filename = "";
555 if (filename)
556 pd->filename = filename;
557 pd->buf = NULL;
558 pd->buf_size = 0;
559
560 if (!fmt) {
561 /* guess format if no file can be opened */
562 fmt = av_probe_input_format(pd, 0);
563 }
564
565 /* Do not open file if the format does not need it. XXX: specific
566 hack needed to handle RTSP/TCP */
567 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
568 /* if no file needed do not try to open one */
569 if ((err=avio_open(&pb, filename, AVIO_FLAG_READ)) < 0) {
570 goto fail;
571 }
572 if (buf_size > 0) {
573 ffio_set_buf_size(pb, buf_size);
574 }
575 if (!fmt && (err = av_probe_input_buffer(pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) {
576 goto fail;
577 }
578 }
579
580 /* if still no format found, error */
581 if (!fmt) {
582 err = AVERROR_INVALIDDATA;
583 goto fail;
584 }
585
586 /* check filename in case an image number is expected */
587 if (fmt->flags & AVFMT_NEEDNUMBER) {
588 if (!av_filename_number_test(filename)) {
589 err = AVERROR(EINVAL);
590 goto fail;
591 }
592 }
593 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
594 if (err)
595 goto fail;
596 return 0;
597 fail:
598 av_freep(&pd->buf);
599 if (pb)
600 avio_close(pb);
601 if (ap && ap->prealloced_context)
602 av_free(*ic_ptr);
603 *ic_ptr = NULL;
604 return err;
605
606 }
607
608 /* open input file and probe the format if necessary */
609 static int init_input(AVFormatContext *s, const char *filename)
610 {
611 int ret;
612 AVProbeData pd = {filename, NULL, 0};
613
614 if (s->pb) {
615 s->flags |= AVFMT_FLAG_CUSTOM_IO;
616 if (!s->iformat)
617 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
618 else if (s->iformat->flags & AVFMT_NOFILE)
619 return AVERROR(EINVAL);
620 return 0;
621 }
622
623 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
624 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
625 return 0;
626
627 if ((ret = avio_open(&s->pb, filename, AVIO_FLAG_READ)) < 0)
628 return ret;
629 if (s->iformat)
630 return 0;
631 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
632 }
633
634 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
635 {
636 AVFormatContext *s = *ps;
637 int ret = 0;
638 AVFormatParameters ap = { 0 };
639 AVDictionary *tmp = NULL;
640
641 if (!s && !(s = avformat_alloc_context()))
642 return AVERROR(ENOMEM);
643 if (fmt)
644 s->iformat = fmt;
645
646 if (options)
647 av_dict_copy(&tmp, *options, 0);
648
649 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
650 goto fail;
651
652 if ((ret = init_input(s, filename)) < 0)
653 goto fail;
654
655 /* check filename in case an image number is expected */
656 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
657 if (!av_filename_number_test(filename)) {
658 ret = AVERROR(EINVAL);
659 goto fail;
660 }
661 }
662
663 s->duration = s->start_time = AV_NOPTS_VALUE;
664 av_strlcpy(s->filename, filename, sizeof(s->filename));
665
666 /* allocate private data */
667 if (s->iformat->priv_data_size > 0) {
668 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
669 ret = AVERROR(ENOMEM);
670 goto fail;
671 }
672 if (s->iformat->priv_class) {
673 *(const AVClass**)s->priv_data = s->iformat->priv_class;
674 av_opt_set_defaults(s->priv_data);
675 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
676 goto fail;
677 }
678 }
679
680 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
681 if (s->pb)
682 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
683
684 if (s->iformat->read_header)
685 if ((ret = s->iformat->read_header(s, &ap)) < 0)
686 goto fail;
687
688 if (s->pb && !s->data_offset)
689 s->data_offset = avio_tell(s->pb);
690
691 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
692
693 if (options) {
694 av_dict_free(options);
695 *options = tmp;
696 }
697 *ps = s;
698 return 0;
699
700 fail:
701 av_dict_free(&tmp);
702 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
703 avio_close(s->pb);
704 avformat_free_context(s);
705 *ps = NULL;
706 return ret;
707 }
708
709 /*******************************************************/
710
711 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
712 AVPacketList **plast_pktl){
713 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
714 if (!pktl)
715 return NULL;
716
717 if (*packet_buffer)
718 (*plast_pktl)->next = pktl;
719 else
720 *packet_buffer = pktl;
721
722 /* add the packet in the buffered packet list */
723 *plast_pktl = pktl;
724 pktl->pkt= *pkt;
725 return &pktl->pkt;
726 }
727
728 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
729 {
730 int ret, i;
731 AVStream *st;
732
733 for(;;){
734 AVPacketList *pktl = s->raw_packet_buffer;
735
736 if (pktl) {
737 *pkt = pktl->pkt;
738 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
739 !s->streams[pkt->stream_index]->probe_packets ||
740 s->raw_packet_buffer_remaining_size < pkt->size){
741 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
742 av_freep(&pd->buf);
743 pd->buf_size = 0;
744 s->raw_packet_buffer = pktl->next;
745 s->raw_packet_buffer_remaining_size += pkt->size;
746 av_free(pktl);
747 return 0;
748 }
749 }
750
751 av_init_packet(pkt);
752 ret= s->iformat->read_packet(s, pkt);
753 if (ret < 0) {
754 if (!pktl || ret == AVERROR(EAGAIN))
755 return ret;
756 for (i = 0; i < s->nb_streams; i++)
757 s->streams[i]->probe_packets = 0;
758 continue;
759 }
760 st= s->streams[pkt->stream_index];
761
762 switch(st->codec->codec_type){
763 case AVMEDIA_TYPE_VIDEO:
764 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
765 break;
766 case AVMEDIA_TYPE_AUDIO:
767 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
768 break;
769 case AVMEDIA_TYPE_SUBTITLE:
770 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
771 break;
772 }
773
774 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
775 !st->probe_packets))
776 return ret;
777
778 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
779 s->raw_packet_buffer_remaining_size -= pkt->size;
780
781 if(st->codec->codec_id == CODEC_ID_PROBE){
782 AVProbeData *pd = &st->probe_data;
783 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
784 --st->probe_packets;
785
786 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
787 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
788 pd->buf_size += pkt->size;
789 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
790
791 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
792 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
793 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
794 if(st->codec->codec_id != CODEC_ID_PROBE){
795 pd->buf_size=0;
796 av_freep(&pd->buf);
797 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
798 }
799 }
800 }
801 }
802 }
803
804 /**********************************************************/
805
806 /**
807 * Get the number of samples of an audio frame. Return -1 on error.
808 */
809 static int get_audio_frame_size(AVCodecContext *enc, int size)
810 {
811 int frame_size;
812
813 if(enc->codec_id == CODEC_ID_VORBIS)
814 return -1;
815
816 if (enc->frame_size <= 1) {
817 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
818
819 if (bits_per_sample) {
820 if (enc->channels == 0)
821 return -1;
822 frame_size = (size << 3) / (bits_per_sample * enc->channels);
823 } else {
824 /* used for example by ADPCM codecs */
825 if (enc->bit_rate == 0)
826 return -1;
827 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
828 }
829 } else {
830 frame_size = enc->frame_size;
831 }
832 return frame_size;
833 }
834
835
836 /**
837 * Return the frame duration in seconds. Return 0 if not available.
838 */
839 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
840 AVCodecParserContext *pc, AVPacket *pkt)
841 {
842 int frame_size;
843
844 *pnum = 0;
845 *pden = 0;
846 switch(st->codec->codec_type) {
847 case AVMEDIA_TYPE_VIDEO:
848 if(st->time_base.num*1000LL > st->time_base.den){
849 *pnum = st->time_base.num;
850 *pden = st->time_base.den;
851 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
852 *pnum = st->codec->time_base.num;
853 *pden = st->codec->time_base.den;
854 if (pc && pc->repeat_pict) {
855 *pnum = (*pnum) * (1 + pc->repeat_pict);
856 }
857 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
858 //Thus if we have no parser in such case leave duration undefined.
859 if(st->codec->ticks_per_frame>1 && !pc){
860 *pnum = *pden = 0;
861 }
862 }
863 break;
864 case AVMEDIA_TYPE_AUDIO:
865 frame_size = get_audio_frame_size(st->codec, pkt->size);
866 if (frame_size <= 0 || st->codec->sample_rate <= 0)
867 break;
868 *pnum = frame_size;
869 *pden = st->codec->sample_rate;
870 break;
871 default:
872 break;
873 }
874 }
875
876 static int is_intra_only(AVCodecContext *enc){
877 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
878 return 1;
879 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
880 switch(enc->codec_id){
881 case CODEC_ID_MJPEG:
882 case CODEC_ID_MJPEGB:
883 case CODEC_ID_LJPEG:
884 case CODEC_ID_RAWVIDEO:
885 case CODEC_ID_DVVIDEO:
886 case CODEC_ID_HUFFYUV:
887 case CODEC_ID_FFVHUFF:
888 case CODEC_ID_ASV1:
889 case CODEC_ID_ASV2:
890 case CODEC_ID_VCR1:
891 case CODEC_ID_DNXHD:
892 case CODEC_ID_JPEG2000:
893 return 1;
894 default: break;
895 }
896 }
897 return 0;
898 }
899
900 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
901 int64_t dts, int64_t pts)
902 {
903 AVStream *st= s->streams[stream_index];
904 AVPacketList *pktl= s->packet_buffer;
905
906 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
907 return;
908
909 st->first_dts= dts - st->cur_dts;
910 st->cur_dts= dts;
911
912 for(; pktl; pktl= pktl->next){
913 if(pktl->pkt.stream_index != stream_index)
914 continue;
915 //FIXME think more about this check
916 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
917 pktl->pkt.pts += st->first_dts;
918
919 if(pktl->pkt.dts != AV_NOPTS_VALUE)
920 pktl->pkt.dts += st->first_dts;
921
922 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
923 st->start_time= pktl->pkt.pts;
924 }
925 if (st->start_time == AV_NOPTS_VALUE)
926 st->start_time = pts;
927 }
928
929 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
930 {
931 AVPacketList *pktl= s->packet_buffer;
932 int64_t cur_dts= 0;
933
934 if(st->first_dts != AV_NOPTS_VALUE){
935 cur_dts= st->first_dts;
936 for(; pktl; pktl= pktl->next){
937 if(pktl->pkt.stream_index == pkt->stream_index){
938 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
939 break;
940 cur_dts -= pkt->duration;
941 }
942 }
943 pktl= s->packet_buffer;
944 st->first_dts = cur_dts;
945 }else if(st->cur_dts)
946 return;
947
948 for(; pktl; pktl= pktl->next){
949 if(pktl->pkt.stream_index != pkt->stream_index)
950 continue;
951 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
952 && !pktl->pkt.duration){
953 pktl->pkt.dts= cur_dts;
954 if(!st->codec->has_b_frames)
955 pktl->pkt.pts= cur_dts;
956 cur_dts += pkt->duration;
957 pktl->pkt.duration= pkt->duration;
958 }else
959 break;
960 }
961 if(st->first_dts == AV_NOPTS_VALUE)
962 st->cur_dts= cur_dts;
963 }
964
965 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
966 AVCodecParserContext *pc, AVPacket *pkt)
967 {
968 int num, den, presentation_delayed, delay, i;
969 int64_t offset;
970
971 if (s->flags & AVFMT_FLAG_NOFILLIN)
972 return;
973
974 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
975 pkt->dts= AV_NOPTS_VALUE;
976
977 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
978 //FIXME Set low_delay = 0 when has_b_frames = 1
979 st->codec->has_b_frames = 1;
980
981 /* do we have a video B-frame ? */
982 delay= st->codec->has_b_frames;
983 presentation_delayed = 0;
984
985 // ignore delay caused by frame threading so that the mpeg2-without-dts
986 // warning will not trigger
987 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
988 delay -= st->codec->thread_count-1;
989
990 /* XXX: need has_b_frame, but cannot get it if the codec is
991 not initialized */
992 if (delay &&
993 pc && pc->pict_type != AV_PICTURE_TYPE_B)
994 presentation_delayed = 1;
995
996 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
997 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
998 pkt->dts -= 1LL<<st->pts_wrap_bits;
999 }
1000
1001 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1002 // we take the conservative approach and discard both
1003 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1004 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1005 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
1006 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
1007 }
1008
1009 if (pkt->duration == 0) {
1010 compute_frame_duration(&num, &den, st, pc, pkt);
1011 if (den && num) {
1012 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1013
1014 if(pkt->duration != 0 && s->packet_buffer)
1015 update_initial_durations(s, st, pkt);
1016 }
1017 }
1018
1019 /* correct timestamps with byte offset if demuxers only have timestamps
1020 on packet boundaries */
1021 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1022 /* this will estimate bitrate based on this frame's duration and size */
1023 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1024 if(pkt->pts != AV_NOPTS_VALUE)
1025 pkt->pts += offset;
1026 if(pkt->dts != AV_NOPTS_VALUE)
1027 pkt->dts += offset;
1028 }
1029
1030 if (pc && pc->dts_sync_point >= 0) {
1031 // we have synchronization info from the parser
1032 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1033 if (den > 0) {
1034 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1035 if (pkt->dts != AV_NOPTS_VALUE) {
1036 // got DTS from the stream, update reference timestamp
1037 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1038 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1039 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1040 // compute DTS based on reference timestamp
1041 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1042 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1043 }
1044 if (pc->dts_sync_point > 0)
1045 st->reference_dts = pkt->dts; // new reference
1046 }
1047 }
1048
1049 /* This may be redundant, but it should not hurt. */
1050 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1051 presentation_delayed = 1;
1052
1053 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1054 /* interpolate PTS and DTS if they are not present */
1055 //We skip H264 currently because delay and has_b_frames are not reliably set
1056 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1057 if (presentation_delayed) {
1058 /* DTS = decompression timestamp */
1059 /* PTS = presentation timestamp */
1060 if (pkt->dts == AV_NOPTS_VALUE)
1061 pkt->dts = st->last_IP_pts;
1062 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1063 if (pkt->dts == AV_NOPTS_VALUE)
1064 pkt->dts = st->cur_dts;
1065
1066 /* this is tricky: the dts must be incremented by the duration
1067 of the frame we are displaying, i.e. the last I- or P-frame */
1068 if (st->last_IP_duration == 0)
1069 st->last_IP_duration = pkt->duration;
1070 if(pkt->dts != AV_NOPTS_VALUE)
1071 st->cur_dts = pkt->dts + st->last_IP_duration;
1072 st->last_IP_duration = pkt->duration;
1073 st->last_IP_pts= pkt->pts;
1074 /* cannot compute PTS if not present (we can compute it only
1075 by knowing the future */
1076 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1077 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1078 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1079 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1080 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1081 pkt->pts += pkt->duration;
1082 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1083 }
1084 }
1085
1086 /* presentation is not delayed : PTS and DTS are the same */
1087 if(pkt->pts == AV_NOPTS_VALUE)
1088 pkt->pts = pkt->dts;
1089 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1090 if(pkt->pts == AV_NOPTS_VALUE)
1091 pkt->pts = st->cur_dts;
1092 pkt->dts = pkt->pts;
1093 if(pkt->pts != AV_NOPTS_VALUE)
1094 st->cur_dts = pkt->pts + pkt->duration;
1095 }
1096 }
1097
1098 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1099 st->pts_buffer[0]= pkt->pts;
1100 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1101 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1102 if(pkt->dts == AV_NOPTS_VALUE)
1103 pkt->dts= st->pts_buffer[0];
1104 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1105 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1106 }
1107 if(pkt->dts > st->cur_dts)
1108 st->cur_dts = pkt->dts;
1109 }
1110
1111 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1112
1113 /* update flags */
1114 if(is_intra_only(st->codec))
1115 pkt->flags |= AV_PKT_FLAG_KEY;
1116 else if (pc) {
1117 pkt->flags = 0;
1118 /* keyframe computation */
1119 if (pc->key_frame == 1)
1120 pkt->flags |= AV_PKT_FLAG_KEY;
1121 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1122 pkt->flags |= AV_PKT_FLAG_KEY;
1123 }
1124 if (pc)
1125 pkt->convergence_duration = pc->convergence_duration;
1126 }
1127
1128
1129 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1130 {
1131 AVStream *st;
1132 int len, ret, i;
1133
1134 av_init_packet(pkt);
1135
1136 for(;;) {
1137 /* select current input stream component */
1138 st = s->cur_st;
1139 if (st) {
1140 if (!st->need_parsing || !st->parser) {
1141 /* no parsing needed: we just output the packet as is */
1142 /* raw data support */
1143 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1144 compute_pkt_fields(s, st, NULL, pkt);
1145 s->cur_st = NULL;
1146 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1147 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1148 ff_reduce_index(s, st->index);
1149 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1150 }
1151 break;
1152 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1153 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1154 st->cur_ptr, st->cur_len,
1155 st->cur_pkt.pts, st->cur_pkt.dts,
1156 st->cur_pkt.pos);
1157 st->cur_pkt.pts = AV_NOPTS_VALUE;
1158 st->cur_pkt.dts = AV_NOPTS_VALUE;
1159 /* increment read pointer */
1160 st->cur_ptr += len;
1161 st->cur_len -= len;
1162
1163 /* return packet if any */
1164 if (pkt->size) {
1165 got_packet:
1166 pkt->duration = 0;
1167 pkt->stream_index = st->index;
1168 pkt->pts = st->parser->pts;
1169 pkt->dts = st->parser->dts;
1170 pkt->pos = st->parser->pos;
1171 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1172 s->cur_st = NULL;
1173 pkt->destruct= st->cur_pkt.destruct;
1174 st->cur_pkt.destruct= NULL;
1175 st->cur_pkt.data = NULL;
1176 assert(st->cur_len == 0);
1177 }else{
1178 pkt->destruct = NULL;
1179 }
1180 compute_pkt_fields(s, st, st->parser, pkt);
1181
1182 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1183 ff_reduce_index(s, st->index);
1184 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1185 0, 0, AVINDEX_KEYFRAME);
1186 }
1187
1188 break;
1189 }
1190 } else {
1191 /* free packet */
1192 av_free_packet(&st->cur_pkt);
1193 s->cur_st = NULL;
1194 }
1195 } else {
1196 AVPacket cur_pkt;
1197 /* read next packet */
1198 ret = av_read_packet(s, &cur_pkt);
1199 if (ret < 0) {
1200 if (ret == AVERROR(EAGAIN))
1201 return ret;
1202 /* return the last frames, if any */
1203 for(i = 0; i < s->nb_streams; i++) {
1204 st = s->streams[i];
1205 if (st->parser && st->need_parsing) {
1206 av_parser_parse2(st->parser, st->codec,
1207 &pkt->data, &pkt->size,
1208 NULL, 0,
1209 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1210 AV_NOPTS_VALUE);
1211 if (pkt->size)
1212 goto got_packet;
1213 }
1214 }
1215 /* no more packets: really terminate parsing */
1216 return ret;
1217 }
1218 st = s->streams[cur_pkt.stream_index];
1219 st->cur_pkt= cur_pkt;
1220
1221 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1222 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1223 st->cur_pkt.pts < st->cur_pkt.dts){
1224 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1225 st->cur_pkt.stream_index,
1226 st->cur_pkt.pts,
1227 st->cur_pkt.dts,
1228 st->cur_pkt.size);
1229 // av_free_packet(&st->cur_pkt);
1230 // return -1;
1231 }
1232
1233 if(s->debug & FF_FDEBUG_TS)
1234 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1235 st->cur_pkt.stream_index,
1236 st->cur_pkt.pts,
1237 st->cur_pkt.dts,
1238 st->cur_pkt.size,
1239 st->cur_pkt.duration,
1240 st->cur_pkt.flags);
1241
1242 s->cur_st = st;
1243 st->cur_ptr = st->cur_pkt.data;
1244 st->cur_len = st->cur_pkt.size;
1245 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1246 st->parser = av_parser_init(st->codec->codec_id);
1247 if (!st->parser) {
1248 /* no parser available: just output the raw packets */
1249 st->need_parsing = AVSTREAM_PARSE_NONE;
1250 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1251 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1252 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1253 st->parser->flags |= PARSER_FLAG_ONCE;
1254 }
1255 }
1256 }
1257 }
1258 if(s->debug & FF_FDEBUG_TS)
1259 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1260 pkt->stream_index,
1261 pkt->pts,
1262 pkt->dts,
1263 pkt->size,
1264 pkt->duration,
1265 pkt->flags);
1266
1267 return 0;
1268 }
1269
1270 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1271 {
1272 AVPacketList *pktl;
1273 int eof=0;
1274 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1275
1276 for(;;){
1277 pktl = s->packet_buffer;
1278 if (pktl) {
1279 AVPacket *next_pkt= &pktl->pkt;
1280
1281 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1282 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1283 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1284 if( pktl->pkt.stream_index == next_pkt->stream_index
1285 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1286 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1287 next_pkt->pts= pktl->pkt.dts;
1288 }
1289 pktl= pktl->next;
1290 }
1291 pktl = s->packet_buffer;
1292 }
1293
1294 if( next_pkt->pts != AV_NOPTS_VALUE
1295 || next_pkt->dts == AV_NOPTS_VALUE
1296 || !genpts || eof){
1297 /* read packet from packet buffer, if there is data */
1298 *pkt = *next_pkt;
1299 s->packet_buffer = pktl->next;
1300 av_free(pktl);
1301 return 0;
1302 }
1303 }
1304 if(genpts){
1305 int ret= av_read_frame_internal(s, pkt);
1306 if(ret<0){
1307 if(pktl && ret != AVERROR(EAGAIN)){
1308 eof=1;
1309 continue;
1310 }else
1311 return ret;
1312 }
1313
1314 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1315 &s->packet_buffer_end)) < 0)
1316 return AVERROR(ENOMEM);
1317 }else{
1318 assert(!s->packet_buffer);
1319 return av_read_frame_internal(s, pkt);
1320 }
1321 }
1322 }
1323
1324 /* XXX: suppress the packet queue */
1325 static void flush_packet_queue(AVFormatContext *s)
1326 {
1327 AVPacketList *pktl;
1328
1329 for(;;) {
1330 pktl = s->packet_buffer;
1331 if (!pktl)
1332 break;
1333 s->packet_buffer = pktl->next;
1334 av_free_packet(&pktl->pkt);
1335 av_free(pktl);
1336 }
1337 while(s->raw_packet_buffer){
1338 pktl = s->raw_packet_buffer;
1339 s->raw_packet_buffer = pktl->next;
1340 av_free_packet(&pktl->pkt);
1341 av_free(pktl);
1342 }
1343 s->packet_buffer_end=
1344 s->raw_packet_buffer_end= NULL;
1345 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1346 }
1347
1348 /*******************************************************/
1349 /* seek support */
1350
1351 int av_find_default_stream_index(AVFormatContext *s)
1352 {
1353 int first_audio_index = -1;
1354 int i;
1355 AVStream *st;
1356
1357 if (s->nb_streams <= 0)
1358 return -1;
1359 for(i = 0; i < s->nb_streams; i++) {
1360 st = s->streams[i];
1361 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1362 return i;
1363 }
1364 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1365 first_audio_index = i;
1366 }
1367 return first_audio_index >= 0 ? first_audio_index : 0;
1368 }
1369
1370 /**
1371 * Flush the frame reader.
1372 */
1373 void ff_read_frame_flush(AVFormatContext *s)
1374 {
1375 AVStream *st;
1376 int i, j;
1377
1378 flush_packet_queue(s);
1379
1380 s->cur_st = NULL;
1381
1382 /* for each stream, reset read state */
1383 for(i = 0; i < s->nb_streams; i++) {
1384 st = s->streams[i];
1385
1386 if (st->parser) {
1387 av_parser_close(st->parser);
1388 st->parser = NULL;
1389 av_free_packet(&st->cur_pkt);
1390 }
1391 st->last_IP_pts = AV_NOPTS_VALUE;
1392 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1393 st->reference_dts = AV_NOPTS_VALUE;
1394 /* fail safe */
1395 st->cur_ptr = NULL;
1396 st->cur_len = 0;
1397
1398 st->probe_packets = MAX_PROBE_PACKETS;
1399
1400 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1401 st->pts_buffer[j]= AV_NOPTS_VALUE;
1402 }
1403 }
1404
1405 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1406 int i;
1407
1408 for(i = 0; i < s->nb_streams; i++) {
1409 AVStream *st = s->streams[i];
1410
1411 st->cur_dts = av_rescale(timestamp,
1412 st->time_base.den * (int64_t)ref_st->time_base.num,
1413 st->time_base.num * (int64_t)ref_st->time_base.den);
1414 }
1415 }
1416
1417 void ff_reduce_index(AVFormatContext *s, int stream_index)
1418 {
1419 AVStream *st= s->streams[stream_index];
1420 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1421
1422 if((unsigned)st->nb_index_entries >= max_entries){
1423 int i;
1424 for(i=0; 2*i<st->nb_index_entries; i++)
1425 st->index_entries[i]= st->index_entries[2*i];
1426 st->nb_index_entries= i;
1427 }
1428 }
1429
1430 int ff_add_index_entry(AVIndexEntry **index_entries,
1431 int *nb_index_entries,
1432 unsigned int *index_entries_allocated_size,
1433 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1434 {
1435 AVIndexEntry *entries, *ie;
1436 int index;
1437
1438 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1439 return -1;
1440
1441 entries = av_fast_realloc(*index_entries,
1442 index_entries_allocated_size,
1443 (*nb_index_entries + 1) *
1444 sizeof(AVIndexEntry));
1445 if(!entries)
1446 return -1;
1447
1448 *index_entries= entries;
1449
1450 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1451
1452 if(index<0){
1453 index= (*nb_index_entries)++;
1454 ie= &entries[index];
1455 assert(index==0 || ie[-1].timestamp < timestamp);
1456 }else{
1457 ie= &entries[index];
1458 if(ie->timestamp != timestamp){
1459 if(ie->timestamp <= timestamp)
1460 return -1;
1461 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1462 (*nb_index_entries)++;
1463 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1464 distance= ie->min_distance;
1465 }
1466
1467 ie->pos = pos;
1468 ie->timestamp = timestamp;
1469 ie->min_distance= distance;
1470 ie->size= size;
1471 ie->flags = flags;
1472
1473 return index;
1474 }
1475
1476 int av_add_index_entry(AVStream *st,
1477 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1478 {
1479 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1480 &st->index_entries_allocated_size, pos,
1481 timestamp, size, distance, flags);
1482 }
1483
1484 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1485 int64_t wanted_timestamp, int flags)
1486 {
1487 int a, b, m;
1488 int64_t timestamp;
1489
1490 a = - 1;
1491 b = nb_entries;
1492
1493 //optimize appending index entries at the end
1494 if(b && entries[b-1].timestamp < wanted_timestamp)
1495 a= b-1;
1496
1497 while (b - a > 1) {
1498 m = (a + b) >> 1;
1499 timestamp = entries[m].timestamp;
1500 if(timestamp >= wanted_timestamp)
1501 b = m;
1502 if(timestamp <= wanted_timestamp)
1503 a = m;
1504 }
1505 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1506
1507 if(!(flags & AVSEEK_FLAG_ANY)){
1508 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1509 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1510 }
1511 }
1512
1513 if(m == nb_entries)
1514 return -1;
1515 return m;
1516 }
1517
1518 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1519 int flags)
1520 {
1521 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1522 wanted_timestamp, flags);
1523 }
1524
1525 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1526 AVInputFormat *avif= s->iformat;
1527 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1528 int64_t ts_min, ts_max, ts;
1529 int index;
1530 int64_t ret;
1531 AVStream *st;
1532
1533 if (stream_index < 0)
1534 return -1;
1535
1536 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1537
1538 ts_max=
1539 ts_min= AV_NOPTS_VALUE;
1540 pos_limit= -1; //gcc falsely says it may be uninitialized
1541
1542 st= s->streams[stream_index];
1543 if(st->index_entries){
1544 AVIndexEntry *e;
1545
1546 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1547 index= FFMAX(index, 0);
1548 e= &st->index_entries[index];
1549
1550 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1551 pos_min= e->pos;
1552 ts_min= e->timestamp;
1553 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1554 pos_min,ts_min);
1555 }else{
1556 assert(index==0);
1557 }
1558
1559 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1560 assert(index < st->nb_index_entries);
1561 if(index >= 0){
1562 e= &st->index_entries[index];
1563 assert(e->timestamp >= target_ts);
1564 pos_max= e->pos;
1565 ts_max= e->timestamp;
1566 pos_limit= pos_max - e->min_distance;
1567 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1568 pos_max,pos_limit, ts_max);
1569 }
1570 }
1571
1572 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1573 if(pos<0)
1574 return -1;
1575
1576 /* do the seek */
1577 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1578 return ret;
1579
1580 av_update_cur_dts(s, st, ts);
1581
1582 return 0;
1583 }
1584
1585 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1586 int64_t pos, ts;
1587 int64_t start_pos, filesize;
1588 int no_change;
1589
1590 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1591
1592 if(ts_min == AV_NOPTS_VALUE){
1593 pos_min = s->data_offset;
1594 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1595 if (ts_min == AV_NOPTS_VALUE)
1596 return -1;
1597 }
1598
1599 if(ts_max == AV_NOPTS_VALUE){
1600 int step= 1024;
1601 filesize = avio_size(s->pb);
1602 pos_max = filesize - 1;
1603 do{
1604 pos_max -= step;
1605 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1606 step += step;
1607 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1608 if (ts_max == AV_NOPTS_VALUE)
1609 return -1;
1610
1611 for(;;){
1612 int64_t tmp_pos= pos_max + 1;
1613 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1614 if(tmp_ts == AV_NOPTS_VALUE)
1615 break;
1616 ts_max= tmp_ts;
1617 pos_max= tmp_pos;
1618 if(tmp_pos >= filesize)
1619 break;
1620 }
1621 pos_limit= pos_max;
1622 }
1623
1624 if(ts_min > ts_max){
1625 return -1;
1626 }else if(ts_min == ts_max){
1627 pos_limit= pos_min;
1628 }
1629
1630 no_change=0;
1631 while (pos_min < pos_limit) {
1632 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1633 pos_min, pos_max, ts_min, ts_max);
1634 assert(pos_limit <= pos_max);
1635
1636 if(no_change==0){
1637 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1638 // interpolate position (better than dichotomy)
1639 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1640 + pos_min - approximate_keyframe_distance;
1641 }else if(no_change==1){
1642 // bisection, if interpolation failed to change min or max pos last time
1643 pos = (pos_min + pos_limit)>>1;
1644 }else{
1645 /* linear search if bisection failed, can only happen if there
1646 are very few or no keyframes between min/max */
1647 pos=pos_min;
1648 }
1649 if(pos <= pos_min)
1650 pos= pos_min + 1;
1651 else if(pos > pos_limit)
1652 pos= pos_limit;
1653 start_pos= pos;
1654
1655 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1656 if(pos == pos_max)
1657 no_change++;
1658 else
1659 no_change=0;
1660 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1661 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1662 pos_limit, start_pos, no_change);
1663 if(ts == AV_NOPTS_VALUE){
1664 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1665 return -1;
1666 }
1667 assert(ts != AV_NOPTS_VALUE);
1668 if (target_ts <= ts) {
1669 pos_limit = start_pos - 1;
1670 pos_max = pos;
1671 ts_max = ts;
1672 }
1673 if (target_ts >= ts) {
1674 pos_min = pos;
1675 ts_min = ts;
1676 }
1677 }
1678
1679 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1680 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1681 pos_min = pos;
1682 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1683 pos_min++;
1684 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1685 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1686 pos, ts_min, target_ts, ts_max);
1687 *ts_ret= ts;
1688 return pos;
1689 }
1690
1691 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1692 int64_t pos_min, pos_max;
1693 #if 0
1694 AVStream *st;
1695
1696 if (stream_index < 0)
1697 return -1;
1698
1699 st= s->streams[stream_index];
1700 #endif
1701
1702 pos_min = s->data_offset;
1703 pos_max = avio_size(s->pb) - 1;
1704
1705 if (pos < pos_min) pos= pos_min;
1706 else if(pos > pos_max) pos= pos_max;
1707
1708 avio_seek(s->pb, pos, SEEK_SET);
1709
1710 #if 0
1711 av_update_cur_dts(s, st, ts);
1712 #endif
1713 return 0;
1714 }
1715
1716 static int av_seek_frame_generic(AVFormatContext *s,
1717 int stream_index, int64_t timestamp, int flags)
1718 {
1719 int index;
1720 int64_t ret;
1721 AVStream *st;
1722 AVIndexEntry *ie;
1723
1724 st = s->streams[stream_index];
1725
1726 index = av_index_search_timestamp(st, timestamp, flags);
1727
1728 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1729 return -1;
1730
1731 if(index < 0 || index==st->nb_index_entries-1){
1732 int i;
1733 AVPacket pkt;
1734
1735 if(st->nb_index_entries){
1736 assert(st->index_entries);
1737 ie= &st->index_entries[st->nb_index_entries-1];
1738 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1739 return ret;
1740 av_update_cur_dts(s, st, ie->timestamp);
1741 }else{
1742 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1743 return ret;
1744 }
1745 for(i=0;; i++) {
1746 int ret;
1747 do{
1748 ret = av_read_frame(s, &pkt);
1749 }while(ret == AVERROR(EAGAIN));
1750 if(ret<0)
1751 break;
1752 av_free_packet(&pkt);
1753 if(stream_index == pkt.stream_index){
1754 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1755 break;
1756 }
1757 }
1758 index = av_index_search_timestamp(st, timestamp, flags);
1759 }
1760 if (index < 0)
1761 return -1;
1762
1763 ff_read_frame_flush(s);
1764 if (s->iformat->read_seek){
1765 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1766 return 0;
1767 }
1768 ie = &st->index_entries[index];
1769 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1770 return ret;
1771 av_update_cur_dts(s, st, ie->timestamp);
1772
1773 return 0;
1774 }
1775
1776 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1777 {
1778 int ret;
1779 AVStream *st;
1780
1781 ff_read_frame_flush(s);
1782
1783 if(flags & AVSEEK_FLAG_BYTE)
1784 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1785
1786 if(stream_index < 0){
1787 stream_index= av_find_default_stream_index(s);
1788 if(stream_index < 0)
1789 return -1;
1790
1791 st= s->streams[stream_index];
1792 /* timestamp for default must be expressed in AV_TIME_BASE units */
1793 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1794 }
1795
1796 /* first, we try the format specific seek */
1797 if (s->iformat->read_seek)
1798 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1799 else
1800 ret = -1;
1801 if (ret >= 0) {
1802 return 0;
1803 }
1804
1805 if(s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH))
1806 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1807 else if (!(s->iformat->flags & AVFMT_NOGENSEARCH))
1808 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1809 else
1810 return -1;
1811 }
1812
1813 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1814 {
1815 if(min_ts > ts || max_ts < ts)
1816 return -1;
1817
1818 ff_read_frame_flush(s);
1819
1820 if (s->iformat->read_seek2)
1821 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1822
1823 if(s->iformat->read_timestamp){
1824 //try to seek via read_timestamp()
1825 }
1826
1827 //Fallback to old API if new is not implemented but old is
1828 //Note the old has somewat different sematics
1829 if(s->iformat->read_seek || 1)
1830 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1831
1832 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1833 }
1834
1835 /*******************************************************/
1836
1837 /**
1838 * Return TRUE if the stream has accurate duration in any stream.
1839 *
1840 * @return TRUE if the stream has accurate duration for at least one component.
1841 */
1842 static int av_has_duration(AVFormatContext *ic)
1843 {
1844 int i;
1845 AVStream *st;
1846
1847 for(i = 0;i < ic->nb_streams; i++) {
1848 st = ic->streams[i];
1849 if (st->duration != AV_NOPTS_VALUE)
1850 return 1;
1851 }
1852 return 0;
1853 }
1854
1855 /**
1856 * Estimate the stream timings from the one of each components.
1857 *
1858 * Also computes the global bitrate if possible.
1859 */
1860 static void av_update_stream_timings(AVFormatContext *ic)
1861 {
1862 int64_t start_time, start_time1, end_time, end_time1;
1863 int64_t duration, duration1;
1864 int i;
1865 AVStream *st;
1866
1867 start_time = INT64_MAX;
1868 end_time = INT64_MIN;
1869 duration = INT64_MIN;
1870 for(i = 0;i < ic->nb_streams; i++) {
1871 st = ic->streams[i];
1872 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1873 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1874 if (start_time1 < start_time)
1875 start_time = start_time1;
1876 if (st->duration != AV_NOPTS_VALUE) {
1877 end_time1 = start_time1
1878 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1879 if (end_time1 > end_time)
1880 end_time = end_time1;
1881 }
1882 }
1883 if (st->duration != AV_NOPTS_VALUE) {
1884 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1885 if (duration1 > duration)
1886 duration = duration1;
1887 }
1888 }
1889 if (start_time != INT64_MAX) {
1890 ic->start_time = start_time;
1891 if (end_time != INT64_MIN) {
1892 if (end_time - start_time > duration)
1893 duration = end_time - start_time;
1894 }
1895 }
1896 if (duration != INT64_MIN) {
1897 ic->duration = duration;
1898 if (ic->file_size > 0) {
1899 /* compute the bitrate */
1900 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1901 (double)ic->duration;
1902 }
1903 }
1904 }
1905
1906 static void fill_all_stream_timings(AVFormatContext *ic)
1907 {
1908 int i;
1909 AVStream *st;
1910
1911 av_update_stream_timings(ic);
1912 for(i = 0;i < ic->nb_streams; i++) {
1913 st = ic->streams[i];
1914 if (st->start_time == AV_NOPTS_VALUE) {
1915 if(ic->start_time != AV_NOPTS_VALUE)
1916 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1917 if(ic->duration != AV_NOPTS_VALUE)
1918 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1919 }
1920 }
1921 }
1922
1923 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1924 {
1925 int64_t filesize, duration;
1926 int bit_rate, i;
1927 AVStream *st;
1928
1929 /* if bit_rate is already set, we believe it */
1930 if (ic->bit_rate <= 0) {
1931 bit_rate = 0;
1932 for(i=0;i<ic->nb_streams;i++) {
1933 st = ic->streams[i];
1934 if (st->codec->bit_rate > 0)
1935 bit_rate += st->codec->bit_rate;
1936 }
1937 ic->bit_rate = bit_rate;
1938 }
1939
1940 /* if duration is already set, we believe it */
1941 if (ic->duration == AV_NOPTS_VALUE &&
1942 ic->bit_rate != 0 &&
1943 ic->file_size != 0) {
1944 filesize = ic->file_size;
1945 if (filesize > 0) {
1946 for(i = 0; i < ic->nb_streams; i++) {
1947 st = ic->streams[i];
1948 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1949 if (st->duration == AV_NOPTS_VALUE)
1950 st->duration = duration;
1951 }
1952 }
1953 }
1954 }
1955
1956 #define DURATION_MAX_READ_SIZE 250000
1957 #define DURATION_MAX_RETRY 3
1958
1959 /* only usable for MPEG-PS streams */
1960 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1961 {
1962 AVPacket pkt1, *pkt = &pkt1;
1963 AVStream *st;
1964 int read_size, i, ret;
1965 int64_t end_time;
1966 int64_t filesize, offset, duration;
1967 int retry=0;
1968
1969 ic->cur_st = NULL;
1970
1971 /* flush packet queue */
1972 flush_packet_queue(ic);
1973
1974 for (i=0; i<ic->nb_streams; i++) {
1975 st = ic->streams[i];
1976 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1977 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1978
1979 if (st->parser) {
1980 av_parser_close(st->parser);
1981 st->parser= NULL;
1982 av_free_packet(&st->cur_pkt);
1983 }
1984 }
1985
1986 /* estimate the end time (duration) */
1987 /* XXX: may need to support wrapping */
1988 filesize = ic->file_size;
1989 end_time = AV_NOPTS_VALUE;
1990 do{
1991 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1992 if (offset < 0)
1993 offset = 0;
1994
1995 avio_seek(ic->pb, offset, SEEK_SET);
1996 read_size = 0;
1997 for(;;) {
1998 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1999 break;
2000
2001 do{
2002 ret = av_read_packet(ic, pkt);
2003 }while(ret == AVERROR(EAGAIN));
2004 if (ret != 0)
2005 break;
2006 read_size += pkt->size;
2007 st = ic->streams[pkt->stream_index];
2008 if (pkt->pts != AV_NOPTS_VALUE &&
2009 (st->start_time != AV_NOPTS_VALUE ||
2010 st->first_dts != AV_NOPTS_VALUE)) {
2011 duration = end_time = pkt->pts;
2012 if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time;
2013 else duration -= st->first_dts;
2014 if (duration < 0)
2015 duration += 1LL<<st->pts_wrap_bits;
2016 if (duration > 0) {
2017 if (st->duration == AV_NOPTS_VALUE ||
2018 st->duration < duration)
2019 st->duration = duration;
2020 }
2021 }
2022 av_free_packet(pkt);
2023 }
2024 }while( end_time==AV_NOPTS_VALUE
2025 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2026 && ++retry <= DURATION_MAX_RETRY);
2027
2028 fill_all_stream_timings(ic);
2029
2030 avio_seek(ic->pb, old_offset, SEEK_SET);
2031 for (i=0; i<ic->nb_streams; i++) {
2032 st= ic->streams[i];
2033 st->cur_dts= st->first_dts;
2034 st->last_IP_pts = AV_NOPTS_VALUE;
2035 st->reference_dts = AV_NOPTS_VALUE;
2036 }
2037 }
2038
2039 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
2040 {
2041 int64_t file_size;
2042
2043 /* get the file size, if possible */
2044 if (ic->iformat->flags & AVFMT_NOFILE) {
2045 file_size = 0;
2046 } else {
2047 file_size = avio_size(ic->pb);
2048 if (file_size < 0)
2049 file_size = 0;
2050 }
2051 ic->file_size = file_size;
2052
2053 if ((!strcmp(ic->iformat->name, "mpeg") ||
2054 !strcmp(ic->iformat->name, "mpegts")) &&
2055 file_size && ic->pb->seekable) {
2056 /* get accurate estimate from the PTSes */
2057 av_estimate_timings_from_pts(ic, old_offset);
2058 } else if (av_has_duration(ic)) {
2059 /* at least one component has timings - we use them for all
2060 the components */
2061 fill_all_stream_timings(ic);
2062 } else {
2063 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2064 /* less precise: use bitrate info */
2065 av_estimate_timings_from_bit_rate(ic);
2066 }
2067 av_update_stream_timings(ic);
2068
2069 {
2070 int i;
2071 AVStream av_unused *st;
2072 for(i = 0;i < ic->nb_streams; i++) {
2073 st = ic->streams[i];
2074 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2075 (double) st->start_time / AV_TIME_BASE,
2076 (double) st->duration / AV_TIME_BASE);
2077 }
2078 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2079 (double) ic->start_time / AV_TIME_BASE,
2080 (double) ic->duration / AV_TIME_BASE,
2081 ic->bit_rate / 1000);
2082 }
2083 }
2084
2085 static int has_codec_parameters(AVCodecContext *enc)
2086 {
2087 int val;
2088 switch(enc->codec_type) {
2089 case AVMEDIA_TYPE_AUDIO:
2090 val = enc->sample_rate && enc->channels && enc->sample_fmt != AV_SAMPLE_FMT_NONE;
2091 if(!enc->frame_size &&
2092 (enc->codec_id == CODEC_ID_VORBIS ||
2093 enc->codec_id == CODEC_ID_AAC ||
2094 enc->codec_id == CODEC_ID_MP1 ||
2095 enc->codec_id == CODEC_ID_MP2 ||
2096 enc->codec_id == CODEC_ID_MP3 ||
2097 enc->codec_id == CODEC_ID_SPEEX))
2098 return 0;
2099 break;
2100 case AVMEDIA_TYPE_VIDEO:
2101 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
2102 break;
2103 default:
2104 val = 1;
2105 break;
2106 }
2107 return enc->codec_id != CODEC_ID_NONE && val != 0;
2108 }
2109
2110 static int has_decode_delay_been_guessed(AVStream *st)
2111 {
2112 return st->codec->codec_id != CODEC_ID_H264 ||
2113 st->codec_info_nb_frames >= 6 + st->codec->has_b_frames;
2114 }
2115
2116 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
2117 {
2118 int16_t *samples;
2119 AVCodec *codec;
2120 int got_picture, data_size, ret=0;
2121 AVFrame picture;
2122
2123 if(!st->codec->codec){
2124 codec = avcodec_find_decoder(st->codec->codec_id);
2125 if (!codec)
2126 return -1;
2127 ret = avcodec_open(st->codec, codec);
2128 if (ret < 0)
2129 return ret;
2130 }
2131
2132 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){
2133 switch(st->codec->codec_type) {
2134 case AVMEDIA_TYPE_VIDEO:
2135 avcodec_get_frame_defaults(&picture);
2136 ret = avcodec_decode_video2(st->codec, &picture,
2137 &got_picture, avpkt);
2138 break;
2139 case AVMEDIA_TYPE_AUDIO:
2140 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2141 samples = av_malloc(data_size);
2142 if (!samples)
2143 goto fail;
2144 ret = avcodec_decode_audio3(st->codec, samples,
2145 &data_size, avpkt);
2146 av_free(samples);
2147 break;
2148 default:
2149 break;
2150 }
2151 }
2152 fail:
2153 return ret;
2154 }
2155
2156 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2157 {
2158 while (tags->id != CODEC_ID_NONE) {
2159 if (tags->id == id)
2160 return tags->tag;
2161 tags++;
2162 }
2163 return 0;
2164 }
2165
2166 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2167 {
2168 int i;
2169 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2170 if(tag == tags[i].tag)
2171 return tags[i].id;
2172 }
2173 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2174 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag))
2175 return tags[i].id;
2176 }
2177 return CODEC_ID_NONE;
2178 }
2179
2180 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2181 {
2182 int i;
2183 for(i=0; tags && tags[i]; i++){
2184 int tag= ff_codec_get_tag(tags[i], id);
2185 if(tag) return tag;
2186 }
2187 return 0;
2188 }
2189
2190 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2191 {
2192 int i;
2193 for(i=0; tags && tags[i]; i++){
2194 enum CodecID id= ff_codec_get_id(tags[i], tag);
2195 if(id!=CODEC_ID_NONE) return id;
2196 }
2197 return CODEC_ID_NONE;
2198 }
2199
2200 static void compute_chapters_end(AVFormatContext *s)
2201 {
2202 unsigned int i, j;
2203 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2204
2205 for (i = 0; i < s->nb_chapters; i++)
2206 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2207 AVChapter *ch = s->chapters[i];
2208 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2209 : INT64_MAX;
2210
2211 for (j = 0; j < s->nb_chapters; j++) {
2212 AVChapter *ch1 = s->chapters[j];
2213 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2214 if (j != i && next_start > ch->start && next_start < end)
2215 end = next_start;
2216 }
2217 ch->end = (end == INT64_MAX) ? ch->start : end;
2218 }
2219 }
2220
2221 static int get_std_framerate(int i){
2222 if(i<60*12) return i*1001;
2223 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2224 }
2225
2226 /*
2227 * Is the time base unreliable.
2228 * This is a heuristic to balance between quick acceptance of the values in
2229 * the headers vs. some extra checks.
2230 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2231 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2232 * And there are "variable" fps files this needs to detect as well.
2233 */
2234 static int tb_unreliable(AVCodecContext *c){
2235 if( c->time_base.den >= 101L*c->time_base.num
2236 || c->time_base.den < 5L*c->time_base.num
2237 /* || c->codec_tag == AV_RL32("DIVX")
2238 || c->codec_tag == AV_RL32("XVID")*/
2239 || c->codec_id == CODEC_ID_MPEG2VIDEO
2240 || c->codec_id == CODEC_ID_H264
2241 )
2242 return 1;
2243 return 0;
2244 }
2245
2246 int av_find_stream_info(AVFormatContext *ic)
2247 {
2248 int i, count, ret, read_size, j;
2249 AVStream *st;
2250 AVPacket pkt1, *pkt;
2251 int64_t old_offset = avio_tell(ic->pb);
2252
2253 for(i=0;i<ic->nb_streams;i++) {
2254 AVCodec *codec;
2255 st = ic->streams[i];
2256 if (st->codec->codec_id == CODEC_ID_AAC) {
2257 st->codec->sample_rate = 0;
2258 st->codec->frame_size = 0;
2259 st->codec->channels = 0;
2260 }
2261 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2262 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2263 /* if(!st->time_base.num)
2264 st->time_base= */
2265 if(!st->codec->time_base.num)
2266 st->codec->time_base= st->time_base;
2267 }
2268 //only for the split stuff
2269 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2270 st->parser = av_parser_init(st->codec->codec_id);
2271 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2272 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2273 }
2274 }
2275 assert(!st->codec->codec);
2276 codec = avcodec_find_decoder(st->codec->codec_id);
2277
2278 /* Force decoding of at least one frame of codec data
2279 * this makes sure the codec initializes the channel configuration
2280 * and does not trust the values from the container.
2281 */
2282 if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF)
2283 st->codec->channels = 0;
2284
2285 /* Ensure that subtitle_header is properly set. */
2286 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2287 && codec && !st->codec->codec)
2288 avcodec_open(st->codec, codec);
2289
2290 //try to just open decoders, in case this is enough to get parameters
2291 if(!has_codec_parameters(st->codec)){
2292 if (codec && !st->codec->codec)
2293 avcodec_open(st->codec, codec);
2294 }
2295 }
2296
2297 for (i=0; i<ic->nb_streams; i++) {
2298 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2299 }
2300
2301 count = 0;
2302 read_size = 0;
2303 for(;;) {
2304 if(url_interrupt_cb()){
2305 ret= AVERROR_EXIT;
2306 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2307 break;
2308 }
2309
2310 /* check if one codec still needs to be handled */
2311 for(i=0;i<ic->nb_streams;i++) {
2312 int fps_analyze_framecount = 20;
2313
2314 st = ic->streams[i];
2315 if (!has_codec_parameters(st->codec))
2316 break;
2317 /* if the timebase is coarse (like the usual millisecond precision
2318 of mkv), we need to analyze more frames to reliably arrive at
2319 the correct fps */
2320 if (av_q2d(st->time_base) > 0.0005)
2321 fps_analyze_framecount *= 2;
2322 if (ic->fps_probe_size >= 0)
2323 fps_analyze_framecount = ic->fps_probe_size;
2324 /* variable fps and no guess at the real fps */
2325 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2326 && st->info->duration_count < fps_analyze_framecount
2327 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2328 break;
2329 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2330 break;
2331 if(st->first_dts == AV_NOPTS_VALUE)
2332 break;
2333 }
2334 if (i == ic->nb_streams) {
2335 /* NOTE: if the format has no header, then we need to read
2336 some packets to get most of the streams, so we cannot
2337 stop here */
2338 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2339 /* if we found the info for all the codecs, we can stop */
2340 ret = count;
2341 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2342 break;
2343 }
2344 }
2345 /* we did not get all the codec info, but we read too much data */
2346 if (read_size >= ic->probesize) {
2347 ret = count;
2348 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2349 break;
2350 }
2351
2352 /* NOTE: a new stream can be added there if no header in file
2353 (AVFMTCTX_NOHEADER) */
2354 ret = av_read_frame_internal(ic, &pkt1);
2355 if (ret < 0 && ret != AVERROR(EAGAIN)) {
2356 /* EOF or error */
2357 ret = -1; /* we could not have all the codec parameters before EOF */
2358 for(i=0;i<ic->nb_streams;i++) {
2359 st = ic->streams[i];
2360 if (!has_codec_parameters(st->codec)){
2361 char buf[256];
2362 avcodec_string(buf, sizeof(buf), st->codec, 0);
2363 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2364 } else {
2365 ret = 0;
2366 }
2367 }
2368 break;
2369 }
2370
2371 if (ret == AVERROR(EAGAIN))
2372 continue;
2373
2374 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2375 if ((ret = av_dup_packet(pkt)) < 0)
2376 goto find_stream_info_err;
2377
2378 read_size += pkt->size;
2379
2380 st = ic->streams[pkt->stream_index];
2381 if (st->codec_info_nb_frames>1) {
2382 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2383 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2384 break;
2385 }
2386 st->info->codec_info_duration += pkt->duration;
2387 }
2388 {
2389 int64_t last = st->info->last_dts;
2390 int64_t duration= pkt->dts - last;
2391
2392 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2393 double dur= duration * av_q2d(st->time_base);
2394
2395 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2396 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2397 if (st->info->duration_count < 2)
2398 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2399 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2400 int framerate= get_std_framerate(i);
2401 int ticks= lrintf(dur*framerate/(1001*12));
2402 double error= dur - ticks*1001*12/(double)framerate;
2403 st->info->duration_error[i] += error*error;
2404 }
2405 st->info->duration_count++;
2406 // ignore the first 4 values, they might have some random jitter
2407 if (st->info->duration_count > 3)
2408 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2409 }
2410 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2411 st->info->last_dts = pkt->dts;
2412 }
2413 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2414 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2415 if(i){
2416 st->codec->extradata_size= i;
2417 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2418 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2419 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2420 }
2421 }
2422
2423 /* if still no information, we try to open the codec and to
2424 decompress the frame. We try to avoid that in most cases as
2425 it takes longer and uses more memory. For MPEG-4, we need to
2426 decompress for QuickTime. */
2427 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st))
2428 try_decode_frame(st, pkt);
2429
2430 st->codec_info_nb_frames++;
2431 count++;
2432 }
2433
2434 // close codecs which were opened in try_decode_frame()
2435 for(i=0;i<ic->nb_streams;i++) {
2436 st = ic->streams[i];
2437 if(st->codec->codec)
2438 avcodec_close(st->codec);
2439 }
2440 for(i=0;i<ic->nb_streams;i++) {
2441 st = ic->streams[i];
2442 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2443 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2444 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2445 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2446 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2447 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2448 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2449
2450 // the check for tb_unreliable() is not completely correct, since this is not about handling
2451 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2452 // ipmovie.c produces.
2453 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2454 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2455 if (st->info->duration_count && !st->r_frame_rate.num
2456 && tb_unreliable(st->codec) /*&&
2457 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2458 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2459 int num = 0;
2460 double best_error= 2*av_q2d(st->time_base);
2461 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2462
2463 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2464 double error = st->info->duration_error[j] * get_std_framerate(j);
2465 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2466 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2467 if(error < best_error){
2468 best_error= error;
2469 num = get_std_framerate(j);
2470 }
2471 }
2472 // do not increase frame rate by more than 1 % in order to match a standard rate.
2473 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2474 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2475 }
2476
2477 if (!st->r_frame_rate.num){
2478 if( st->codec->time_base.den * (int64_t)st->time_base.num
2479 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2480 st->r_frame_rate.num = st->codec->time_base.den;
2481 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2482 }else{
2483 st->r_frame_rate.num = st->time_base.den;
2484 st->r_frame_rate.den = st->time_base.num;
2485 }
2486 }
2487 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2488 if(!st->codec->bits_per_coded_sample)
2489 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2490 // set stream disposition based on audio service type
2491 switch (st->codec->audio_service_type) {
2492 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2493 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2494 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2495 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2496 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2497 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2498 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2499 st->disposition = AV_DISPOSITION_COMMENT; break;
2500 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2501 st->disposition = AV_DISPOSITION_KARAOKE; break;
2502 }
2503 }
2504 }
2505
2506 av_estimate_timings(ic, old_offset);
2507
2508 compute_chapters_end(ic);
2509
2510 #if 0
2511 /* correct DTS for B-frame streams with no timestamps */
2512 for(i=0;i<ic->nb_streams;i++) {
2513 st = ic->streams[i];
2514 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2515 if(b-frames){
2516 ppktl = &ic->packet_buffer;
2517 while(ppkt1){
2518 if(ppkt1->stream_index != i)
2519 continue;
2520 if(ppkt1->pkt->dts < 0)
2521 break;
2522 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2523 break;
2524 ppkt1->pkt->dts -= delta;
2525 ppkt1= ppkt1->next;
2526 }
2527 if(ppkt1)
2528 continue;
2529 st->cur_dts -= delta;
2530 }
2531 }
2532 }
2533 #endif
2534
2535 find_stream_info_err:
2536 for (i=0; i < ic->nb_streams; i++)
2537 av_freep(&ic->streams[i]->info);
2538 return ret;
2539 }
2540
2541 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2542 {
2543 int i, j;
2544
2545 for (i = 0; i < ic->nb_programs; i++)
2546 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2547 if (ic->programs[i]->stream_index[j] == s)
2548 return ic->programs[i];
2549 return NULL;
2550 }
2551
2552 int av_find_best_stream(AVFormatContext *ic,
2553 enum AVMediaType type,
2554 int wanted_stream_nb,
2555 int related_stream,
2556 AVCodec **decoder_ret,
2557 int flags)
2558 {
2559 int i, nb_streams = ic->nb_streams;
2560 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2561 unsigned *program = NULL;
2562 AVCodec *decoder = NULL, *best_decoder = NULL;
2563
2564 if (related_stream >= 0 && wanted_stream_nb < 0) {
2565 AVProgram *p = find_program_from_stream(ic, related_stream);
2566 if (p) {
2567 program = p->stream_index;
2568 nb_streams = p->nb_stream_indexes;
2569 }
2570 }
2571 for (i = 0; i < nb_streams; i++) {
2572 int real_stream_index = program ? program[i] : i;
2573 AVStream *st = ic->streams[real_stream_index];
2574 AVCodecContext *avctx = st->codec;
2575 if (avctx->codec_type != type)
2576 continue;
2577 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2578 continue;
2579 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2580 continue;
2581 if (decoder_ret) {
2582 decoder = avcodec_find_decoder(st->codec->codec_id);
2583 if (!decoder) {
2584 if (ret < 0)
2585 ret = AVERROR_DECODER_NOT_FOUND;
2586 continue;
2587 }
2588 }
2589 if (best_count >= st->codec_info_nb_frames)
2590 continue;
2591 best_count = st->codec_info_nb_frames;
2592 ret = real_stream_index;
2593 best_decoder = decoder;
2594 if (program && i == nb_streams - 1 && ret < 0) {
2595 program = NULL;
2596 nb_streams = ic->nb_streams;
2597 i = 0; /* no related stream found, try again with everything */
2598 }
2599 }
2600 if (decoder_ret)
2601 *decoder_ret = best_decoder;
2602 return ret;
2603 }
2604
2605 /*******************************************************/
2606
2607 int av_read_play(AVFormatContext *s)
2608 {
2609 if (s->iformat->read_play)
2610 return s->iformat->read_play(s);
2611 if (s->pb)
2612 return avio_pause(s->pb, 0);
2613 return AVERROR(ENOSYS);
2614 }
2615
2616 int av_read_pause(AVFormatContext *s)
2617 {
2618 if (s->iformat->read_pause)
2619 return s->iformat->read_pause(s);
2620 if (s->pb)
2621 return avio_pause(s->pb, 1);
2622 return AVERROR(ENOSYS);
2623 }
2624
2625 void av_close_input_stream(AVFormatContext *s)
2626 {
2627 flush_packet_queue(s);
2628 if (s->iformat->read_close)
2629 s->iformat->read_close(s);
2630 avformat_free_context(s);
2631 }
2632
2633 void avformat_free_context(AVFormatContext *s)
2634 {
2635 int i;
2636 AVStream *st;
2637
2638 av_opt_free(s);
2639 if (s->iformat && s->iformat->priv_class && s->priv_data)
2640 av_opt_free(s->priv_data);
2641
2642 for(i=0;i<s->nb_streams;i++) {
2643 /* free all data in a stream component */
2644 st = s->streams[i];
2645 if (st->parser) {
2646 av_parser_close(st->parser);
2647 av_free_packet(&st->cur_pkt);
2648 }
2649 av_dict_free(&st->metadata);
2650 av_free(st->index_entries);
2651 av_free(st->codec->extradata);
2652 av_free(st->codec->subtitle_header);
2653 av_free(st->codec);
2654 av_free(st->priv_data);
2655 av_free(st->info);
2656 av_free(st);
2657 }
2658 for(i=s->nb_programs-1; i>=0; i--) {
2659 av_dict_free(&s->programs[i]->metadata);
2660 av_freep(&s->programs[i]->stream_index);
2661 av_freep(&s->programs[i]);
2662 }
2663 av_freep(&s->programs);
2664 av_freep(&s->priv_data);
2665 while(s->nb_chapters--) {
2666 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2667 av_free(s->chapters[s->nb_chapters]);
2668 }
2669 av_freep(&s->chapters);
2670 av_dict_free(&s->metadata);
2671 av_freep(&s->streams);
2672 av_free(s);
2673 }
2674
2675 void av_close_input_file(AVFormatContext *s)
2676 {
2677 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2678 NULL : s->pb;
2679 av_close_input_stream(s);
2680 if (pb)
2681 avio_close(pb);
2682 }
2683
2684 AVStream *av_new_stream(AVFormatContext *s, int id)
2685 {
2686 AVStream *st;
2687 int i;
2688 AVStream **streams;
2689
2690 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2691 return NULL;
2692 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2693 if (!streams)
2694 return NULL;
2695 s->streams = streams;
2696
2697 st = av_mallocz(sizeof(AVStream));
2698 if (!st)
2699 return NULL;
2700 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2701 av_free(st);
2702 return NULL;
2703 }
2704
2705 st->codec= avcodec_alloc_context();
2706 if (s->iformat) {
2707 /* no default bitrate if decoding */
2708 st->codec->bit_rate = 0;
2709 }
2710 st->index = s->nb_streams;
2711 st->id = id;
2712 st->start_time = AV_NOPTS_VALUE;
2713 st->duration = AV_NOPTS_VALUE;
2714 /* we set the current DTS to 0 so that formats without any timestamps
2715 but durations get some timestamps, formats with some unknown
2716 timestamps have their first few packets buffered and the
2717 timestamps corrected before they are returned to the user */
2718 st->cur_dts = 0;
2719 st->first_dts = AV_NOPTS_VALUE;
2720 st->probe_packets = MAX_PROBE_PACKETS;
2721
2722 /* default pts setting is MPEG-like */
2723 av_set_pts_info(st, 33, 1, 90000);
2724 st->last_IP_pts = AV_NOPTS_VALUE;
2725 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2726 st->pts_buffer[i]= AV_NOPTS_VALUE;
2727 st->reference_dts = AV_NOPTS_VALUE;
2728
2729 st->sample_aspect_ratio = (AVRational){0,1};
2730
2731 s->streams[s->nb_streams++] = st;
2732 return st;
2733 }
2734
2735 AVProgram *av_new_program(AVFormatContext *ac, int id)
2736 {
2737 AVProgram *program=NULL;
2738 int i;
2739
2740 av_dlog(ac, "new_program: id=0x%04x\n", id);
2741
2742 for(i=0; i<ac->nb_programs; i++)
2743 if(ac->programs[i]->id == id)
2744 program = ac->programs[i];
2745
2746 if(!program){
2747 program = av_mallocz(sizeof(AVProgram));
2748 if (!program)
2749 return NULL;
2750 dynarray_add(&ac->programs, &ac->nb_programs, program);
2751 program->discard = AVDISCARD_NONE;
2752 }
2753 program->id = id;
2754
2755 return program;
2756 }
2757
2758 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2759 {
2760 AVChapter *chapter = NULL;
2761 int i;
2762
2763 for(i=0; i<s->nb_chapters; i++)
2764 if(s->chapters[i]->id == id)
2765 chapter = s->chapters[i];
2766
2767 if(!chapter){
2768 chapter= av_mallocz(sizeof(AVChapter));
2769 if(!chapter)
2770 return NULL;
2771 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2772 }
2773 av_dict_set(&chapter->metadata, "title", title, 0);
2774 chapter->id = id;
2775 chapter->time_base= time_base;
2776 chapter->start = start;
2777 chapter->end = end;
2778
2779 return chapter;
2780 }
2781
2782 /************************************************************/
2783 /* output media file */
2784
2785 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2786 {
2787 int ret;
2788
2789 if (s->oformat->priv_data_size > 0) {
2790 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2791 if (!s->priv_data)
2792 return AVERROR(ENOMEM);
2793 if (s->oformat->priv_class) {
2794 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2795 av_opt_set_defaults(s->priv_data);
2796 }
2797 } else
2798 s->priv_data = NULL;
2799
2800 if (s->oformat->set_parameters) {
2801 ret = s->oformat->set_parameters(s, ap);
2802 if (ret < 0)
2803 return ret;
2804 }
2805 return 0;
2806 }
2807
2808 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2809 {
2810 const AVCodecTag *avctag;
2811 int n;
2812 enum CodecID id = CODEC_ID_NONE;
2813 unsigned int tag = 0;
2814
2815 /**
2816 * Check that tag + id is in the table
2817 * If neither is in the table -> OK
2818 * If tag is in the table with another id -> FAIL
2819 * If id is in the table with another tag -> FAIL unless strict < normal
2820 */
2821 for (n = 0; s->oformat->codec_tag[n]; n++) {
2822 avctag = s->oformat->codec_tag[n];
2823 while (avctag->id != CODEC_ID_NONE) {
2824 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) {
2825 id = avctag->id;
2826 if (id == st->codec->codec_id)
2827 return 1;
2828 }
2829 if (avctag->id == st->codec->codec_id)
2830 tag = avctag->tag;
2831 avctag++;
2832 }
2833 }
2834 if (id != CODEC_ID_NONE)
2835 return 0;
2836 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2837 return 0;
2838 return 1;
2839 }
2840
2841 int av_write_header(AVFormatContext *s)
2842 {
2843 int ret, i;
2844 AVStream *st;
2845
2846 // some sanity checks
2847 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2848 av_log(s, AV_LOG_ERROR, "no streams\n");
2849 return AVERROR(EINVAL);
2850 }
2851
2852 for(i=0;i<s->nb_streams;i++) {
2853 st = s->streams[i];
2854
2855 switch (st->codec->codec_type) {
2856 case AVMEDIA_TYPE_AUDIO:
2857 if(st->codec->sample_rate<=0){
2858 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2859 return AVERROR(EINVAL);
2860 }
2861 if(!st->codec->block_align)
2862 st->codec->block_align = st->codec->channels *
2863 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2864 break;
2865 case AVMEDIA_TYPE_VIDEO:
2866 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2867 av_log(s, AV_LOG_ERROR, "time base not set\n");
2868 return AVERROR(EINVAL);
2869 }
2870 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2871 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2872 return AVERROR(EINVAL);
2873 }
2874 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2875 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2876 return AVERROR(EINVAL);
2877 }
2878 break;
2879 }
2880
2881 if(s->oformat->codec_tag){
2882 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2883 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2884 st->codec->codec_tag= 0;
2885 }
2886 if(st->codec->codec_tag){
2887 if (!validate_codec_tag(s, st)) {
2888 char tagbuf[32];
2889 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2890 av_log(s, AV_LOG_ERROR,
2891 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2892 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2893 return AVERROR_INVALIDDATA;
2894 }
2895 }else
2896 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2897 }
2898
2899 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2900 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2901 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2902 }
2903
2904 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2905 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2906 if (!s->priv_data)
2907 return AVERROR(ENOMEM);
2908 }
2909
2910 /* set muxer identification string */
2911 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2912 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2913 }
2914
2915 if(s->oformat->write_header){
2916 ret = s->oformat->write_header(s);
2917 if (ret < 0)
2918 return ret;
2919 }
2920
2921 /* init PTS generation */
2922 for(i=0;i<s->nb_streams;i++) {
2923 int64_t den = AV_NOPTS_VALUE;
2924 st = s->streams[i];
2925
2926 switch (st->codec->codec_type) {
2927 case AVMEDIA_TYPE_AUDIO:
2928 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2929 break;
2930 case AVMEDIA_TYPE_VIDEO:
2931 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2932 break;
2933 default:
2934 break;
2935 }
2936 if (den != AV_NOPTS_VALUE) {
2937 if (den <= 0)
2938 return AVERROR_INVALIDDATA;
2939 av_frac_init(&st->pts, 0, 0, den);
2940 }
2941 }
2942 return 0;
2943 }
2944
2945 //FIXME merge with compute_pkt_fields
2946 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2947 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2948 int num, den, frame_size, i;
2949
2950 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
2951 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2952
2953 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2954 return AVERROR(EINVAL);*/
2955
2956 /* duration field */
2957 if (pkt->duration == 0) {
2958 compute_frame_duration(&num, &den, st, NULL, pkt);
2959 if (den && num) {
2960 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2961 }
2962 }
2963
2964 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2965 pkt->pts= pkt->dts;
2966
2967 //XXX/FIXME this is a temporary hack until all encoders output pts
2968 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2969 pkt->dts=
2970 // pkt->pts= st->cur_dts;
2971 pkt->pts= st->pts.val;
2972 }
2973
2974 //calculate dts from pts
2975 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2976 st->pts_buffer[0]= pkt->pts;
2977 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2978 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2979 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2980 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2981
2982 pkt->dts= st->pts_buffer[0];
2983 }
2984
2985 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2986 av_log(s, AV_LOG_ERROR,
2987 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
2988 st->index, st->cur_dts, pkt->dts);
2989 return AVERROR(EINVAL);
2990 }
2991 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2992 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
2993 return AVERROR(EINVAL);
2994 }
2995
2996 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2997 st->cur_dts= pkt->dts;
2998 st->pts.val= pkt->dts;
2999
3000 /* update pts */
3001 switch (st->codec->codec_type) {
3002 case AVMEDIA_TYPE_AUDIO:
3003 frame_size = get_audio_frame_size(st->codec, pkt->size);
3004
3005 /* HACK/FIXME, we skip the initial 0 size packets as they are most
3006 likely equal to the encoder delay, but it would be better if we
3007 had the real timestamps from the encoder */
3008 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
3009 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
3010 }
3011 break;
3012 case AVMEDIA_TYPE_VIDEO:
3013 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
3014 break;
3015 default:
3016 break;
3017 }
3018 return 0;
3019 }
3020
3021 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
3022 {
3023 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
3024
3025 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3026 return ret;
3027
3028 ret= s->oformat->write_packet(s, pkt);
3029 return ret;
3030 }
3031
3032 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
3033 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
3034 {
3035 AVPacketList **next_point, *this_pktl;
3036
3037 this_pktl = av_mallocz(sizeof(AVPacketList));
3038 this_pktl->pkt= *pkt;
3039 pkt->destruct= NULL; // do not free original but only the copy
3040 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
3041
3042 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
3043 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
3044 }else
3045 next_point = &s->packet_buffer;
3046
3047 if(*next_point){
3048 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3049 while(!compare(s, &(*next_point)->pkt, pkt)){
3050 next_point= &(*next_point)->next;
3051 }
3052 goto next_non_null;
3053 }else{
3054 next_point = &(s->packet_buffer_end->next);
3055 }
3056 }
3057 assert(!*next_point);
3058
3059 s->packet_buffer_end= this_pktl;
3060 next_non_null:
3061
3062 this_pktl->next= *next_point;
3063
3064 s->streams[pkt->stream_index]->last_in_packet_buffer=
3065 *next_point= this_pktl;
3066 }
3067
3068 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3069 {
3070 AVStream *st = s->streams[ pkt ->stream_index];
3071 AVStream *st2= s->streams[ next->stream_index];
3072 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3073 st->time_base);
3074
3075 if (comp == 0)
3076 return pkt->stream_index < next->stream_index;
3077 return comp > 0;
3078 }
3079
3080 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3081 AVPacketList *pktl;
3082 int stream_count=0;
3083 int i;
3084
3085 if(pkt){
3086 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3087 }
3088
3089 for(i=0; i < s->nb_streams; i++)
3090 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3091
3092 if(stream_count && (s->nb_streams == stream_count || flush)){
3093 pktl= s->packet_buffer;
3094 *out= pktl->pkt;
3095
3096 s->packet_buffer= pktl->next;
3097 if(!s->packet_buffer)
3098 s->packet_buffer_end= NULL;
3099
3100 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3101 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3102 av_freep(&pktl);
3103 return 1;
3104 }else{
3105 av_init_packet(out);
3106 return 0;
3107 }