5865495954daa04111d80b6a2b8a114136b38b37
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* #define DEBUG */
23
24 #include "avformat.h"
25 #include "avio_internal.h"
26 #include "internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/pixdesc.h"
31 #include "metadata.h"
32 #include "id3v2.h"
33 #include "libavutil/avstring.h"
34 #include "riff.h"
35 #include "audiointerleave.h"
36 #include "url.h"
37 #include <sys/time.h>
38 #include <time.h>
39 #include <strings.h>
40 #include <stdarg.h>
41 #if CONFIG_NETWORK
42 #include "network.h"
43 #endif
44
45 #undef NDEBUG
46 #include <assert.h>
47
48 /**
49 * @file
50 * various utility functions for use within Libav
51 */
52
53 unsigned avformat_version(void)
54 {
55 return LIBAVFORMAT_VERSION_INT;
56 }
57
58 const char *avformat_configuration(void)
59 {
60 return LIBAV_CONFIGURATION;
61 }
62
63 const char *avformat_license(void)
64 {
65 #define LICENSE_PREFIX "libavformat license: "
66 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
67 }
68
69 /* fraction handling */
70
71 /**
72 * f = val + (num / den) + 0.5.
73 *
74 * 'num' is normalized so that it is such as 0 <= num < den.
75 *
76 * @param f fractional number
77 * @param val integer value
78 * @param num must be >= 0
79 * @param den must be >= 1
80 */
81 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
82 {
83 num += (den >> 1);
84 if (num >= den) {
85 val += num / den;
86 num = num % den;
87 }
88 f->val = val;
89 f->num = num;
90 f->den = den;
91 }
92
93 /**
94 * Fractional addition to f: f = f + (incr / f->den).
95 *
96 * @param f fractional number
97 * @param incr increment, can be positive or negative
98 */
99 static void av_frac_add(AVFrac *f, int64_t incr)
100 {
101 int64_t num, den;
102
103 num = f->num + incr;
104 den = f->den;
105 if (num < 0) {
106 f->val += num / den;
107 num = num % den;
108 if (num < 0) {
109 num += den;
110 f->val--;
111 }
112 } else if (num >= den) {
113 f->val += num / den;
114 num = num % den;
115 }
116 f->num = num;
117 }
118
119 /** head of registered input format linked list */
120 static AVInputFormat *first_iformat = NULL;
121 /** head of registered output format linked list */
122 static AVOutputFormat *first_oformat = NULL;
123
124 AVInputFormat *av_iformat_next(AVInputFormat *f)
125 {
126 if(f) return f->next;
127 else return first_iformat;
128 }
129
130 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
131 {
132 if(f) return f->next;
133 else return first_oformat;
134 }
135
136 void av_register_input_format(AVInputFormat *format)
137 {
138 AVInputFormat **p;
139 p = &first_iformat;
140 while (*p != NULL) p = &(*p)->next;
141 *p = format;
142 format->next = NULL;
143 }
144
145 void av_register_output_format(AVOutputFormat *format)
146 {
147 AVOutputFormat **p;
148 p = &first_oformat;
149 while (*p != NULL) p = &(*p)->next;
150 *p = format;
151 format->next = NULL;
152 }
153
154 int av_match_ext(const char *filename, const char *extensions)
155 {
156 const char *ext, *p;
157 char ext1[32], *q;
158
159 if(!filename)
160 return 0;
161
162 ext = strrchr(filename, '.');
163 if (ext) {
164 ext++;
165 p = extensions;
166 for(;;) {
167 q = ext1;
168 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
169 *q++ = *p++;
170 *q = '\0';
171 if (!strcasecmp(ext1, ext))
172 return 1;
173 if (*p == '\0')
174 break;
175 p++;
176 }
177 }
178 return 0;
179 }
180
181 static int match_format(const char *name, const char *names)
182 {
183 const char *p;
184 int len, namelen;
185
186 if (!name || !names)
187 return 0;
188
189 namelen = strlen(name);
190 while ((p = strchr(names, ','))) {
191 len = FFMAX(p - names, namelen);
192 if (!strncasecmp(name, names, len))
193 return 1;
194 names = p+1;
195 }
196 return !strcasecmp(name, names);
197 }
198
199 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
200 const char *mime_type)
201 {
202 AVOutputFormat *fmt = NULL, *fmt_found;
203 int score_max, score;
204
205 /* specific test for image sequences */
206 #if CONFIG_IMAGE2_MUXER
207 if (!short_name && filename &&
208 av_filename_number_test(filename) &&
209 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
210 return av_guess_format("image2", NULL, NULL);
211 }
212 #endif
213 /* Find the proper file type. */
214 fmt_found = NULL;
215 score_max = 0;
216 while ((fmt = av_oformat_next(fmt))) {
217 score = 0;
218 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
219 score += 100;
220 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
221 score += 10;
222 if (filename && fmt->extensions &&
223 av_match_ext(filename, fmt->extensions)) {
224 score += 5;
225 }
226 if (score > score_max) {
227 score_max = score;
228 fmt_found = fmt;
229 }
230 }
231 return fmt_found;
232 }
233
234 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
235 const char *filename, const char *mime_type, enum AVMediaType type){
236 if(type == AVMEDIA_TYPE_VIDEO){
237 enum CodecID codec_id= CODEC_ID_NONE;
238
239 #if CONFIG_IMAGE2_MUXER
240 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
241 codec_id= ff_guess_image2_codec(filename);
242 }
243 #endif
244 if(codec_id == CODEC_ID_NONE)
245 codec_id= fmt->video_codec;
246 return codec_id;
247 }else if(type == AVMEDIA_TYPE_AUDIO)
248 return fmt->audio_codec;
249 else if (type == AVMEDIA_TYPE_SUBTITLE)
250 return fmt->subtitle_codec;
251 else
252 return CODEC_ID_NONE;
253 }
254
255 AVInputFormat *av_find_input_format(const char *short_name)
256 {
257 AVInputFormat *fmt = NULL;
258 while ((fmt = av_iformat_next(fmt))) {
259 if (match_format(short_name, fmt->name))
260 return fmt;
261 }
262 return NULL;
263 }
264
265
266 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
267 {
268 int ret= av_new_packet(pkt, size);
269
270 if(ret<0)
271 return ret;
272
273 pkt->pos= avio_tell(s);
274
275 ret= avio_read(s, pkt->data, size);
276 if(ret<=0)
277 av_free_packet(pkt);
278 else
279 av_shrink_packet(pkt, ret);
280
281 return ret;
282 }
283
284 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
285 {
286 int ret;
287 int old_size;
288 if (!pkt->size)
289 return av_get_packet(s, pkt, size);
290 old_size = pkt->size;
291 ret = av_grow_packet(pkt, size);
292 if (ret < 0)
293 return ret;
294 ret = avio_read(s, pkt->data + old_size, size);
295 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
296 return ret;
297 }
298
299
300 int av_filename_number_test(const char *filename)
301 {
302 char buf[1024];
303 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
304 }
305
306 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
307 {
308 AVProbeData lpd = *pd;
309 AVInputFormat *fmt1 = NULL, *fmt;
310 int score, id3 = 0;
311
312 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
313 int id3len = ff_id3v2_tag_len(lpd.buf);
314 if (lpd.buf_size > id3len + 16) {
315 lpd.buf += id3len;
316 lpd.buf_size -= id3len;
317 }
318 id3 = 1;
319 }
320
321 fmt = NULL;
322 while ((fmt1 = av_iformat_next(fmt1))) {
323 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
324 continue;
325 score = 0;
326 if (fmt1->read_probe) {
327 score = fmt1->read_probe(&lpd);
328 } else if (fmt1->extensions) {
329 if (av_match_ext(lpd.filename, fmt1->extensions)) {
330 score = 50;
331 }
332 }
333 if (score > *score_max) {
334 *score_max = score;
335 fmt = fmt1;
336 }else if (score == *score_max)
337 fmt = NULL;
338 }
339
340 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
341 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4) {
342 while ((fmt = av_iformat_next(fmt)))
343 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
344 *score_max = AVPROBE_SCORE_MAX/4;
345 break;
346 }
347 }
348
349 return fmt;
350 }
351
352 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
353 int score=0;
354 return av_probe_input_format2(pd, is_opened, &score);
355 }
356
357 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
358 {
359 static const struct {
360 const char *name; enum CodecID id; enum AVMediaType type;
361 } fmt_id_type[] = {
362 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
363 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
364 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
365 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
366 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
367 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
368 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
369 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
370 { 0 }
371 };
372 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
373
374 if (fmt) {
375 int i;
376 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
377 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
378 for (i = 0; fmt_id_type[i].name; i++) {
379 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
380 st->codec->codec_id = fmt_id_type[i].id;
381 st->codec->codec_type = fmt_id_type[i].type;
382 break;
383 }
384 }
385 }
386 return !!fmt;
387 }
388
389 /************************************************************/
390 /* input media file */
391
392 #if FF_API_FORMAT_PARAMETERS
393 static AVDictionary *convert_format_parameters(AVFormatParameters *ap)
394 {
395 char buf[1024];
396 AVDictionary *opts = NULL;
397
398 if (ap->time_base.num) {
399 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num);
400 av_dict_set(&opts, "framerate", buf, 0);
401 }
402 if (ap->sample_rate) {
403 snprintf(buf, sizeof(buf), "%d", ap->sample_rate);
404 av_dict_set(&opts, "sample_rate", buf, 0);
405 }
406 if (ap->channels) {
407 snprintf(buf, sizeof(buf), "%d", ap->channels);
408 av_dict_set(&opts, "channels", buf, 0);
409 }
410 if (ap->width || ap->height) {
411 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height);
412 av_dict_set(&opts, "video_size", buf, 0);
413 }
414 if (ap->pix_fmt != PIX_FMT_NONE) {
415 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0);
416 }
417 if (ap->channel) {
418 snprintf(buf, sizeof(buf), "%d", ap->channel);
419 av_dict_set(&opts, "channel", buf, 0);
420 }
421 if (ap->standard) {
422 av_dict_set(&opts, "standard", ap->standard, 0);
423 }
424 if (ap->mpeg2ts_compute_pcr) {
425 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0);
426 }
427 if (ap->initial_pause) {
428 av_dict_set(&opts, "initial_pause", "1", 0);
429 }
430 return opts;
431 }
432
433 /**
434 * Open a media file from an IO stream. 'fmt' must be specified.
435 */
436 int av_open_input_stream(AVFormatContext **ic_ptr,
437 AVIOContext *pb, const char *filename,
438 AVInputFormat *fmt, AVFormatParameters *ap)
439 {
440 int err;
441 AVDictionary *opts;
442 AVFormatContext *ic;
443 AVFormatParameters default_ap;
444
445 if(!ap){
446 ap=&default_ap;
447 memset(ap, 0, sizeof(default_ap));
448 }
449 opts = convert_format_parameters(ap);
450
451 if(!ap->prealloced_context)
452 ic = avformat_alloc_context();
453 else
454 ic = *ic_ptr;
455 if (!ic) {
456 err = AVERROR(ENOMEM);
457 goto fail;
458 }
459 ic->pb = pb;
460
461 err = avformat_open_input(ic_ptr, filename, fmt, &opts);
462
463 fail:
464 av_dict_free(&opts);
465 return err;
466 }
467 #endif
468
469 /** size of probe buffer, for guessing file type from file contents */
470 #define PROBE_BUF_MIN 2048
471 #define PROBE_BUF_MAX (1<<20)
472
473 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
474 const char *filename, void *logctx,
475 unsigned int offset, unsigned int max_probe_size)
476 {
477 AVProbeData pd = { filename ? filename : "", NULL, -offset };
478 unsigned char *buf = NULL;
479 int ret = 0, probe_size;
480
481 if (!max_probe_size) {
482 max_probe_size = PROBE_BUF_MAX;
483 } else if (max_probe_size > PROBE_BUF_MAX) {
484 max_probe_size = PROBE_BUF_MAX;
485 } else if (max_probe_size < PROBE_BUF_MIN) {
486 return AVERROR(EINVAL);
487 }
488
489 if (offset >= max_probe_size) {
490 return AVERROR(EINVAL);
491 }
492
493 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0;
494 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
495 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
496 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
497
498 if (probe_size < offset) {
499 continue;
500 }
501
502 /* read probe data */
503 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
504 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
505 /* fail if error was not end of file, otherwise, lower score */
506 if (ret != AVERROR_EOF) {
507 av_free(buf);
508 return ret;
509 }
510 score = 0;
511 ret = 0; /* error was end of file, nothing read */
512 }
513 pd.buf_size += ret;
514 pd.buf = &buf[offset];
515
516 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
517
518 /* guess file format */
519 *fmt = av_probe_input_format2(&pd, 1, &score);
520 if(*fmt){
521 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
522 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
523 }else
524 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
525 }
526 }
527
528 if (!*fmt) {
529 av_free(buf);
530 return AVERROR_INVALIDDATA;
531 }
532
533 /* rewind. reuse probe buffer to avoid seeking */
534 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
535 av_free(buf);
536
537 return ret;
538 }
539
540 #if FF_API_FORMAT_PARAMETERS
541 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
542 AVInputFormat *fmt,
543 int buf_size,
544 AVFormatParameters *ap)
545 {
546 int err;
547 AVDictionary *opts = convert_format_parameters(ap);
548
549 if (!ap->prealloced_context)
550 *ic_ptr = NULL;
551
552 err = avformat_open_input(ic_ptr, filename, fmt, &opts);
553
554 av_dict_free(&opts);
555 return err;
556 }
557 #endif
558
559 /* open input file and probe the format if necessary */
560 static int init_input(AVFormatContext *s, const char *filename)
561 {
562 int ret;
563 AVProbeData pd = {filename, NULL, 0};
564
565 if (s->pb) {
566 s->flags |= AVFMT_FLAG_CUSTOM_IO;
567 if (!s->iformat)
568 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
569 else if (s->iformat->flags & AVFMT_NOFILE)
570 return AVERROR(EINVAL);
571 return 0;
572 }
573
574 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
575 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
576 return 0;
577
578 if ((ret = avio_open(&s->pb, filename, AVIO_FLAG_READ)) < 0)
579 return ret;
580 if (s->iformat)
581 return 0;
582 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
583 }
584
585 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
586 {
587 AVFormatContext *s = *ps;
588 int ret = 0;
589 AVFormatParameters ap = { 0 };
590 AVDictionary *tmp = NULL;
591
592 if (!s && !(s = avformat_alloc_context()))
593 return AVERROR(ENOMEM);
594 if (fmt)
595 s->iformat = fmt;
596
597 if (options)
598 av_dict_copy(&tmp, *options, 0);
599
600 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
601 goto fail;
602
603 if ((ret = init_input(s, filename)) < 0)
604 goto fail;
605
606 /* check filename in case an image number is expected */
607 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
608 if (!av_filename_number_test(filename)) {
609 ret = AVERROR(EINVAL);
610 goto fail;
611 }
612 }
613
614 s->duration = s->start_time = AV_NOPTS_VALUE;
615 av_strlcpy(s->filename, filename, sizeof(s->filename));
616
617 /* allocate private data */
618 if (s->iformat->priv_data_size > 0) {
619 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
620 ret = AVERROR(ENOMEM);
621 goto fail;
622 }
623 if (s->iformat->priv_class) {
624 *(const AVClass**)s->priv_data = s->iformat->priv_class;
625 av_opt_set_defaults(s->priv_data);
626 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
627 goto fail;
628 }
629 }
630
631 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
632 if (s->pb)
633 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC);
634
635 if (s->iformat->read_header)
636 if ((ret = s->iformat->read_header(s, &ap)) < 0)
637 goto fail;
638
639 if (s->pb && !s->data_offset)
640 s->data_offset = avio_tell(s->pb);
641
642 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
643
644 if (options) {
645 av_dict_free(options);
646 *options = tmp;
647 }
648 *ps = s;
649 return 0;
650
651 fail:
652 av_dict_free(&tmp);
653 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
654 avio_close(s->pb);
655 avformat_free_context(s);
656 *ps = NULL;
657 return ret;
658 }
659
660 /*******************************************************/
661
662 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
663 AVPacketList **plast_pktl){
664 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
665 if (!pktl)
666 return NULL;
667
668 if (*packet_buffer)
669 (*plast_pktl)->next = pktl;
670 else
671 *packet_buffer = pktl;
672
673 /* add the packet in the buffered packet list */
674 *plast_pktl = pktl;
675 pktl->pkt= *pkt;
676 return &pktl->pkt;
677 }
678
679 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
680 {
681 int ret, i;
682 AVStream *st;
683
684 for(;;){
685 AVPacketList *pktl = s->raw_packet_buffer;
686
687 if (pktl) {
688 *pkt = pktl->pkt;
689 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
690 !s->streams[pkt->stream_index]->probe_packets ||
691 s->raw_packet_buffer_remaining_size < pkt->size){
692 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
693 av_freep(&pd->buf);
694 pd->buf_size = 0;
695 s->raw_packet_buffer = pktl->next;
696 s->raw_packet_buffer_remaining_size += pkt->size;
697 av_free(pktl);
698 return 0;
699 }
700 }
701
702 av_init_packet(pkt);
703 ret= s->iformat->read_packet(s, pkt);
704 if (ret < 0) {
705 if (!pktl || ret == AVERROR(EAGAIN))
706 return ret;
707 for (i = 0; i < s->nb_streams; i++)
708 s->streams[i]->probe_packets = 0;
709 continue;
710 }
711 st= s->streams[pkt->stream_index];
712
713 switch(st->codec->codec_type){
714 case AVMEDIA_TYPE_VIDEO:
715 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
716 break;
717 case AVMEDIA_TYPE_AUDIO:
718 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
719 break;
720 case AVMEDIA_TYPE_SUBTITLE:
721 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
722 break;
723 }
724
725 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
726 !st->probe_packets))
727 return ret;
728
729 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
730 s->raw_packet_buffer_remaining_size -= pkt->size;
731
732 if(st->codec->codec_id == CODEC_ID_PROBE){
733 AVProbeData *pd = &st->probe_data;
734 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
735 --st->probe_packets;
736
737 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
738 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
739 pd->buf_size += pkt->size;
740 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
741
742 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
743 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
744 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
745 if(st->codec->codec_id != CODEC_ID_PROBE){
746 pd->buf_size=0;
747 av_freep(&pd->buf);
748 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
749 }
750 }
751 }
752 }
753 }
754
755 /**********************************************************/
756
757 /**
758 * Get the number of samples of an audio frame. Return -1 on error.
759 */
760 static int get_audio_frame_size(AVCodecContext *enc, int size)
761 {
762 int frame_size;
763
764 if(enc->codec_id == CODEC_ID_VORBIS)
765 return -1;
766
767 if (enc->frame_size <= 1) {
768 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
769
770 if (bits_per_sample) {
771 if (enc->channels == 0)
772 return -1;
773 frame_size = (size << 3) / (bits_per_sample * enc->channels);
774 } else {
775 /* used for example by ADPCM codecs */
776 if (enc->bit_rate == 0)
777 return -1;
778 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
779 }
780 } else {
781 frame_size = enc->frame_size;
782 }
783 return frame_size;
784 }
785
786
787 /**
788 * Return the frame duration in seconds. Return 0 if not available.
789 */
790 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
791 AVCodecParserContext *pc, AVPacket *pkt)
792 {
793 int frame_size;
794
795 *pnum = 0;
796 *pden = 0;
797 switch(st->codec->codec_type) {
798 case AVMEDIA_TYPE_VIDEO:
799 if(st->time_base.num*1000LL > st->time_base.den){
800 *pnum = st->time_base.num;
801 *pden = st->time_base.den;
802 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
803 *pnum = st->codec->time_base.num;
804 *pden = st->codec->time_base.den;
805 if (pc && pc->repeat_pict) {
806 *pnum = (*pnum) * (1 + pc->repeat_pict);
807 }
808 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
809 //Thus if we have no parser in such case leave duration undefined.
810 if(st->codec->ticks_per_frame>1 && !pc){
811 *pnum = *pden = 0;
812 }
813 }
814 break;
815 case AVMEDIA_TYPE_AUDIO:
816 frame_size = get_audio_frame_size(st->codec, pkt->size);
817 if (frame_size <= 0 || st->codec->sample_rate <= 0)
818 break;
819 *pnum = frame_size;
820 *pden = st->codec->sample_rate;
821 break;
822 default:
823 break;
824 }
825 }
826
827 static int is_intra_only(AVCodecContext *enc){
828 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
829 return 1;
830 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
831 switch(enc->codec_id){
832 case CODEC_ID_MJPEG:
833 case CODEC_ID_MJPEGB:
834 case CODEC_ID_LJPEG:
835 case CODEC_ID_RAWVIDEO:
836 case CODEC_ID_DVVIDEO:
837 case CODEC_ID_HUFFYUV:
838 case CODEC_ID_FFVHUFF:
839 case CODEC_ID_ASV1:
840 case CODEC_ID_ASV2:
841 case CODEC_ID_VCR1:
842 case CODEC_ID_DNXHD:
843 case CODEC_ID_JPEG2000:
844 return 1;
845 default: break;
846 }
847 }
848 return 0;
849 }
850
851 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
852 int64_t dts, int64_t pts)
853 {
854 AVStream *st= s->streams[stream_index];
855 AVPacketList *pktl= s->packet_buffer;
856
857 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
858 return;
859
860 st->first_dts= dts - st->cur_dts;
861 st->cur_dts= dts;
862
863 for(; pktl; pktl= pktl->next){
864 if(pktl->pkt.stream_index != stream_index)
865 continue;
866 //FIXME think more about this check
867 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
868 pktl->pkt.pts += st->first_dts;
869
870 if(pktl->pkt.dts != AV_NOPTS_VALUE)
871 pktl->pkt.dts += st->first_dts;
872
873 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
874 st->start_time= pktl->pkt.pts;
875 }
876 if (st->start_time == AV_NOPTS_VALUE)
877 st->start_time = pts;
878 }
879
880 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
881 {
882 AVPacketList *pktl= s->packet_buffer;
883 int64_t cur_dts= 0;
884
885 if(st->first_dts != AV_NOPTS_VALUE){
886 cur_dts= st->first_dts;
887 for(; pktl; pktl= pktl->next){
888 if(pktl->pkt.stream_index == pkt->stream_index){
889 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
890 break;
891 cur_dts -= pkt->duration;
892 }
893 }
894 pktl= s->packet_buffer;
895 st->first_dts = cur_dts;
896 }else if(st->cur_dts)
897 return;
898
899 for(; pktl; pktl= pktl->next){
900 if(pktl->pkt.stream_index != pkt->stream_index)
901 continue;
902 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
903 && !pktl->pkt.duration){
904 pktl->pkt.dts= cur_dts;
905 if(!st->codec->has_b_frames)
906 pktl->pkt.pts= cur_dts;
907 cur_dts += pkt->duration;
908 pktl->pkt.duration= pkt->duration;
909 }else
910 break;
911 }
912 if(st->first_dts == AV_NOPTS_VALUE)
913 st->cur_dts= cur_dts;
914 }
915
916 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
917 AVCodecParserContext *pc, AVPacket *pkt)
918 {
919 int num, den, presentation_delayed, delay, i;
920 int64_t offset;
921
922 if (s->flags & AVFMT_FLAG_NOFILLIN)
923 return;
924
925 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
926 pkt->dts= AV_NOPTS_VALUE;
927
928 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
929 //FIXME Set low_delay = 0 when has_b_frames = 1
930 st->codec->has_b_frames = 1;
931
932 /* do we have a video B-frame ? */
933 delay= st->codec->has_b_frames;
934 presentation_delayed = 0;
935
936 // ignore delay caused by frame threading so that the mpeg2-without-dts
937 // warning will not trigger
938 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
939 delay -= st->codec->thread_count-1;
940
941 /* XXX: need has_b_frame, but cannot get it if the codec is
942 not initialized */
943 if (delay &&
944 pc && pc->pict_type != AV_PICTURE_TYPE_B)
945 presentation_delayed = 1;
946
947 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
948 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
949 pkt->dts -= 1LL<<st->pts_wrap_bits;
950 }
951
952 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
953 // we take the conservative approach and discard both
954 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
955 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
956 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
957 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
958 }
959
960 if (pkt->duration == 0) {
961 compute_frame_duration(&num, &den, st, pc, pkt);
962 if (den && num) {
963 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
964
965 if(pkt->duration != 0 && s->packet_buffer)
966 update_initial_durations(s, st, pkt);
967 }
968 }
969
970 /* correct timestamps with byte offset if demuxers only have timestamps
971 on packet boundaries */
972 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
973 /* this will estimate bitrate based on this frame's duration and size */
974 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
975 if(pkt->pts != AV_NOPTS_VALUE)
976 pkt->pts += offset;
977 if(pkt->dts != AV_NOPTS_VALUE)
978 pkt->dts += offset;
979 }
980
981 if (pc && pc->dts_sync_point >= 0) {
982 // we have synchronization info from the parser
983 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
984 if (den > 0) {
985 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
986 if (pkt->dts != AV_NOPTS_VALUE) {
987 // got DTS from the stream, update reference timestamp
988 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
989 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
990 } else if (st->reference_dts != AV_NOPTS_VALUE) {
991 // compute DTS based on reference timestamp
992 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
993 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
994 }
995 if (pc->dts_sync_point > 0)
996 st->reference_dts = pkt->dts; // new reference
997 }
998 }
999
1000 /* This may be redundant, but it should not hurt. */
1001 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1002 presentation_delayed = 1;
1003
1004 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
1005 /* interpolate PTS and DTS if they are not present */
1006 //We skip H264 currently because delay and has_b_frames are not reliably set
1007 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
1008 if (presentation_delayed) {
1009 /* DTS = decompression timestamp */
1010 /* PTS = presentation timestamp */
1011 if (pkt->dts == AV_NOPTS_VALUE)
1012 pkt->dts = st->last_IP_pts;
1013 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
1014 if (pkt->dts == AV_NOPTS_VALUE)
1015 pkt->dts = st->cur_dts;
1016
1017 /* this is tricky: the dts must be incremented by the duration
1018 of the frame we are displaying, i.e. the last I- or P-frame */
1019 if (st->last_IP_duration == 0)
1020 st->last_IP_duration = pkt->duration;
1021 if(pkt->dts != AV_NOPTS_VALUE)
1022 st->cur_dts = pkt->dts + st->last_IP_duration;
1023 st->last_IP_duration = pkt->duration;
1024 st->last_IP_pts= pkt->pts;
1025 /* cannot compute PTS if not present (we can compute it only
1026 by knowing the future */
1027 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
1028 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
1029 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
1030 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
1031 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
1032 pkt->pts += pkt->duration;
1033 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
1034 }
1035 }
1036
1037 /* presentation is not delayed : PTS and DTS are the same */
1038 if(pkt->pts == AV_NOPTS_VALUE)
1039 pkt->pts = pkt->dts;
1040 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
1041 if(pkt->pts == AV_NOPTS_VALUE)
1042 pkt->pts = st->cur_dts;
1043 pkt->dts = pkt->pts;
1044 if(pkt->pts != AV_NOPTS_VALUE)
1045 st->cur_dts = pkt->pts + pkt->duration;
1046 }
1047 }
1048
1049 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1050 st->pts_buffer[0]= pkt->pts;
1051 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1052 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1053 if(pkt->dts == AV_NOPTS_VALUE)
1054 pkt->dts= st->pts_buffer[0];
1055 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1056 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1057 }
1058 if(pkt->dts > st->cur_dts)
1059 st->cur_dts = pkt->dts;
1060 }
1061
1062 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1063
1064 /* update flags */
1065 if(is_intra_only(st->codec))
1066 pkt->flags |= AV_PKT_FLAG_KEY;
1067 else if (pc) {
1068 pkt->flags = 0;
1069 /* keyframe computation */
1070 if (pc->key_frame == 1)
1071 pkt->flags |= AV_PKT_FLAG_KEY;
1072 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1073 pkt->flags |= AV_PKT_FLAG_KEY;
1074 }
1075 if (pc)
1076 pkt->convergence_duration = pc->convergence_duration;
1077 }
1078
1079
1080 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1081 {
1082 AVStream *st;
1083 int len, ret, i;
1084
1085 av_init_packet(pkt);
1086
1087 for(;;) {
1088 /* select current input stream component */
1089 st = s->cur_st;
1090 if (st) {
1091 if (!st->need_parsing || !st->parser) {
1092 /* no parsing needed: we just output the packet as is */
1093 /* raw data support */
1094 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1095 compute_pkt_fields(s, st, NULL, pkt);
1096 s->cur_st = NULL;
1097 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1098 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1099 ff_reduce_index(s, st->index);
1100 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1101 }
1102 break;
1103 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1104 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1105 st->cur_ptr, st->cur_len,
1106 st->cur_pkt.pts, st->cur_pkt.dts,
1107 st->cur_pkt.pos);
1108 st->cur_pkt.pts = AV_NOPTS_VALUE;
1109 st->cur_pkt.dts = AV_NOPTS_VALUE;
1110 /* increment read pointer */
1111 st->cur_ptr += len;
1112 st->cur_len -= len;
1113
1114 /* return packet if any */
1115 if (pkt->size) {
1116 got_packet:
1117 pkt->duration = 0;
1118 pkt->stream_index = st->index;
1119 pkt->pts = st->parser->pts;
1120 pkt->dts = st->parser->dts;
1121 pkt->pos = st->parser->pos;
1122 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1123 s->cur_st = NULL;
1124 pkt->destruct= st->cur_pkt.destruct;
1125 st->cur_pkt.destruct= NULL;
1126 st->cur_pkt.data = NULL;
1127 assert(st->cur_len == 0);
1128 }else{
1129 pkt->destruct = NULL;
1130 }
1131 compute_pkt_fields(s, st, st->parser, pkt);
1132
1133 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1134 ff_reduce_index(s, st->index);
1135 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1136 0, 0, AVINDEX_KEYFRAME);
1137 }
1138
1139 break;
1140 }
1141 } else {
1142 /* free packet */
1143 av_free_packet(&st->cur_pkt);
1144 s->cur_st = NULL;
1145 }
1146 } else {
1147 AVPacket cur_pkt;
1148 /* read next packet */
1149 ret = av_read_packet(s, &cur_pkt);
1150 if (ret < 0) {
1151 if (ret == AVERROR(EAGAIN))
1152 return ret;
1153 /* return the last frames, if any */
1154 for(i = 0; i < s->nb_streams; i++) {
1155 st = s->streams[i];
1156 if (st->parser && st->need_parsing) {
1157 av_parser_parse2(st->parser, st->codec,
1158 &pkt->data, &pkt->size,
1159 NULL, 0,
1160 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1161 AV_NOPTS_VALUE);
1162 if (pkt->size)
1163 goto got_packet;
1164 }
1165 }
1166 /* no more packets: really terminate parsing */
1167 return ret;
1168 }
1169 st = s->streams[cur_pkt.stream_index];
1170 st->cur_pkt= cur_pkt;
1171
1172 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1173 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1174 st->cur_pkt.pts < st->cur_pkt.dts){
1175 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1176 st->cur_pkt.stream_index,
1177 st->cur_pkt.pts,
1178 st->cur_pkt.dts,
1179 st->cur_pkt.size);
1180 // av_free_packet(&st->cur_pkt);
1181 // return -1;
1182 }
1183
1184 if(s->debug & FF_FDEBUG_TS)
1185 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1186 st->cur_pkt.stream_index,
1187 st->cur_pkt.pts,
1188 st->cur_pkt.dts,
1189 st->cur_pkt.size,
1190 st->cur_pkt.duration,
1191 st->cur_pkt.flags);
1192
1193 s->cur_st = st;
1194 st->cur_ptr = st->cur_pkt.data;
1195 st->cur_len = st->cur_pkt.size;
1196 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1197 st->parser = av_parser_init(st->codec->codec_id);
1198 if (!st->parser) {
1199 /* no parser available: just output the raw packets */
1200 st->need_parsing = AVSTREAM_PARSE_NONE;
1201 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1202 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1203 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1204 st->parser->flags |= PARSER_FLAG_ONCE;
1205 }
1206 }
1207 }
1208 }
1209 if(s->debug & FF_FDEBUG_TS)
1210 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1211 pkt->stream_index,
1212 pkt->pts,
1213 pkt->dts,
1214 pkt->size,
1215 pkt->duration,
1216 pkt->flags);
1217
1218 return 0;
1219 }
1220
1221 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1222 {
1223 AVPacketList *pktl;
1224 int eof=0;
1225 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1226
1227 for(;;){
1228 pktl = s->packet_buffer;
1229 if (pktl) {
1230 AVPacket *next_pkt= &pktl->pkt;
1231
1232 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1233 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1234 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1235 if( pktl->pkt.stream_index == next_pkt->stream_index
1236 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1237 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1238 next_pkt->pts= pktl->pkt.dts;
1239 }
1240 pktl= pktl->next;
1241 }
1242 pktl = s->packet_buffer;
1243 }
1244
1245 if( next_pkt->pts != AV_NOPTS_VALUE
1246 || next_pkt->dts == AV_NOPTS_VALUE
1247 || !genpts || eof){
1248 /* read packet from packet buffer, if there is data */
1249 *pkt = *next_pkt;
1250 s->packet_buffer = pktl->next;
1251 av_free(pktl);
1252 return 0;
1253 }
1254 }
1255 if(genpts){
1256 int ret= av_read_frame_internal(s, pkt);
1257 if(ret<0){
1258 if(pktl && ret != AVERROR(EAGAIN)){
1259 eof=1;
1260 continue;
1261 }else
1262 return ret;
1263 }
1264
1265 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1266 &s->packet_buffer_end)) < 0)
1267 return AVERROR(ENOMEM);
1268 }else{
1269 assert(!s->packet_buffer);
1270 return av_read_frame_internal(s, pkt);
1271 }
1272 }
1273 }
1274
1275 /* XXX: suppress the packet queue */
1276 static void flush_packet_queue(AVFormatContext *s)
1277 {
1278 AVPacketList *pktl;
1279
1280 for(;;) {
1281 pktl = s->packet_buffer;
1282 if (!pktl)
1283 break;
1284 s->packet_buffer = pktl->next;
1285 av_free_packet(&pktl->pkt);
1286 av_free(pktl);
1287 }
1288 while(s->raw_packet_buffer){
1289 pktl = s->raw_packet_buffer;
1290 s->raw_packet_buffer = pktl->next;
1291 av_free_packet(&pktl->pkt);
1292 av_free(pktl);
1293 }
1294 s->packet_buffer_end=
1295 s->raw_packet_buffer_end= NULL;
1296 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1297 }
1298
1299 /*******************************************************/
1300 /* seek support */
1301
1302 int av_find_default_stream_index(AVFormatContext *s)
1303 {
1304 int first_audio_index = -1;
1305 int i;
1306 AVStream *st;
1307
1308 if (s->nb_streams <= 0)
1309 return -1;
1310 for(i = 0; i < s->nb_streams; i++) {
1311 st = s->streams[i];
1312 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1313 return i;
1314 }
1315 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1316 first_audio_index = i;
1317 }
1318 return first_audio_index >= 0 ? first_audio_index : 0;
1319 }
1320
1321 /**
1322 * Flush the frame reader.
1323 */
1324 void ff_read_frame_flush(AVFormatContext *s)
1325 {
1326 AVStream *st;
1327 int i, j;
1328
1329 flush_packet_queue(s);
1330
1331 s->cur_st = NULL;
1332
1333 /* for each stream, reset read state */
1334 for(i = 0; i < s->nb_streams; i++) {
1335 st = s->streams[i];
1336
1337 if (st->parser) {
1338 av_parser_close(st->parser);
1339 st->parser = NULL;
1340 av_free_packet(&st->cur_pkt);
1341 }
1342 st->last_IP_pts = AV_NOPTS_VALUE;
1343 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1344 st->reference_dts = AV_NOPTS_VALUE;
1345 /* fail safe */
1346 st->cur_ptr = NULL;
1347 st->cur_len = 0;
1348
1349 st->probe_packets = MAX_PROBE_PACKETS;
1350
1351 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1352 st->pts_buffer[j]= AV_NOPTS_VALUE;
1353 }
1354 }
1355
1356 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1357 int i;
1358
1359 for(i = 0; i < s->nb_streams; i++) {
1360 AVStream *st = s->streams[i];
1361
1362 st->cur_dts = av_rescale(timestamp,
1363 st->time_base.den * (int64_t)ref_st->time_base.num,
1364 st->time_base.num * (int64_t)ref_st->time_base.den);
1365 }
1366 }
1367
1368 void ff_reduce_index(AVFormatContext *s, int stream_index)
1369 {
1370 AVStream *st= s->streams[stream_index];
1371 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1372
1373 if((unsigned)st->nb_index_entries >= max_entries){
1374 int i;
1375 for(i=0; 2*i<st->nb_index_entries; i++)
1376 st->index_entries[i]= st->index_entries[2*i];
1377 st->nb_index_entries= i;
1378 }
1379 }
1380
1381 int ff_add_index_entry(AVIndexEntry **index_entries,
1382 int *nb_index_entries,
1383 unsigned int *index_entries_allocated_size,
1384 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1385 {
1386 AVIndexEntry *entries, *ie;
1387 int index;
1388
1389 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1390 return -1;
1391
1392 entries = av_fast_realloc(*index_entries,
1393 index_entries_allocated_size,
1394 (*nb_index_entries + 1) *
1395 sizeof(AVIndexEntry));
1396 if(!entries)
1397 return -1;
1398
1399 *index_entries= entries;
1400
1401 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1402
1403 if(index<0){
1404 index= (*nb_index_entries)++;
1405 ie= &entries[index];
1406 assert(index==0 || ie[-1].timestamp < timestamp);
1407 }else{
1408 ie= &entries[index];
1409 if(ie->timestamp != timestamp){
1410 if(ie->timestamp <= timestamp)
1411 return -1;
1412 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1413 (*nb_index_entries)++;
1414 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1415 distance= ie->min_distance;
1416 }
1417
1418 ie->pos = pos;
1419 ie->timestamp = timestamp;
1420 ie->min_distance= distance;
1421 ie->size= size;
1422 ie->flags = flags;
1423
1424 return index;
1425 }
1426
1427 int av_add_index_entry(AVStream *st,
1428 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1429 {
1430 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1431 &st->index_entries_allocated_size, pos,
1432 timestamp, size, distance, flags);
1433 }
1434
1435 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1436 int64_t wanted_timestamp, int flags)
1437 {
1438 int a, b, m;
1439 int64_t timestamp;
1440
1441 a = - 1;
1442 b = nb_entries;
1443
1444 //optimize appending index entries at the end
1445 if(b && entries[b-1].timestamp < wanted_timestamp)
1446 a= b-1;
1447
1448 while (b - a > 1) {
1449 m = (a + b) >> 1;
1450 timestamp = entries[m].timestamp;
1451 if(timestamp >= wanted_timestamp)
1452 b = m;
1453 if(timestamp <= wanted_timestamp)
1454 a = m;
1455 }
1456 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1457
1458 if(!(flags & AVSEEK_FLAG_ANY)){
1459 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1460 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1461 }
1462 }
1463
1464 if(m == nb_entries)
1465 return -1;
1466 return m;
1467 }
1468
1469 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1470 int flags)
1471 {
1472 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1473 wanted_timestamp, flags);
1474 }
1475
1476 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1477 AVInputFormat *avif= s->iformat;
1478 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1479 int64_t ts_min, ts_max, ts;
1480 int index;
1481 int64_t ret;
1482 AVStream *st;
1483
1484 if (stream_index < 0)
1485 return -1;
1486
1487 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1488
1489 ts_max=
1490 ts_min= AV_NOPTS_VALUE;
1491 pos_limit= -1; //gcc falsely says it may be uninitialized
1492
1493 st= s->streams[stream_index];
1494 if(st->index_entries){
1495 AVIndexEntry *e;
1496
1497 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1498 index= FFMAX(index, 0);
1499 e= &st->index_entries[index];
1500
1501 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1502 pos_min= e->pos;
1503 ts_min= e->timestamp;
1504 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1505 pos_min,ts_min);
1506 }else{
1507 assert(index==0);
1508 }
1509
1510 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1511 assert(index < st->nb_index_entries);
1512 if(index >= 0){
1513 e= &st->index_entries[index];
1514 assert(e->timestamp >= target_ts);
1515 pos_max= e->pos;
1516 ts_max= e->timestamp;
1517 pos_limit= pos_max - e->min_distance;
1518 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1519 pos_max,pos_limit, ts_max);
1520 }
1521 }
1522
1523 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1524 if(pos<0)
1525 return -1;
1526
1527 /* do the seek */
1528 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1529 return ret;
1530
1531 av_update_cur_dts(s, st, ts);
1532
1533 return 0;
1534 }
1535
1536 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1537 int64_t pos, ts;
1538 int64_t start_pos, filesize;
1539 int no_change;
1540
1541 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1542
1543 if(ts_min == AV_NOPTS_VALUE){
1544 pos_min = s->data_offset;
1545 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1546 if (ts_min == AV_NOPTS_VALUE)
1547 return -1;
1548 }
1549
1550 if(ts_max == AV_NOPTS_VALUE){
1551 int step= 1024;
1552 filesize = avio_size(s->pb);
1553 pos_max = filesize - 1;
1554 do{
1555 pos_max -= step;
1556 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1557 step += step;
1558 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1559 if (ts_max == AV_NOPTS_VALUE)
1560 return -1;
1561
1562 for(;;){
1563 int64_t tmp_pos= pos_max + 1;
1564 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1565 if(tmp_ts == AV_NOPTS_VALUE)
1566 break;
1567 ts_max= tmp_ts;
1568 pos_max= tmp_pos;
1569 if(tmp_pos >= filesize)
1570 break;
1571 }
1572 pos_limit= pos_max;
1573 }
1574
1575 if(ts_min > ts_max){
1576 return -1;
1577 }else if(ts_min == ts_max){
1578 pos_limit= pos_min;
1579 }
1580
1581 no_change=0;
1582 while (pos_min < pos_limit) {
1583 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1584 pos_min, pos_max, ts_min, ts_max);
1585 assert(pos_limit <= pos_max);
1586
1587 if(no_change==0){
1588 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1589 // interpolate position (better than dichotomy)
1590 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1591 + pos_min - approximate_keyframe_distance;
1592 }else if(no_change==1){
1593 // bisection, if interpolation failed to change min or max pos last time
1594 pos = (pos_min + pos_limit)>>1;
1595 }else{
1596 /* linear search if bisection failed, can only happen if there
1597 are very few or no keyframes between min/max */
1598 pos=pos_min;
1599 }
1600 if(pos <= pos_min)
1601 pos= pos_min + 1;
1602 else if(pos > pos_limit)
1603 pos= pos_limit;
1604 start_pos= pos;
1605
1606 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1607 if(pos == pos_max)
1608 no_change++;
1609 else
1610 no_change=0;
1611 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1612 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1613 pos_limit, start_pos, no_change);
1614 if(ts == AV_NOPTS_VALUE){
1615 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1616 return -1;
1617 }
1618 assert(ts != AV_NOPTS_VALUE);
1619 if (target_ts <= ts) {
1620 pos_limit = start_pos - 1;
1621 pos_max = pos;
1622 ts_max = ts;
1623 }
1624 if (target_ts >= ts) {
1625 pos_min = pos;
1626 ts_min = ts;
1627 }
1628 }
1629
1630 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1631 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1632 pos_min = pos;
1633 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1634 pos_min++;
1635 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1636 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1637 pos, ts_min, target_ts, ts_max);
1638 *ts_ret= ts;
1639 return pos;
1640 }
1641
1642 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1643 int64_t pos_min, pos_max;
1644 #if 0
1645 AVStream *st;
1646
1647 if (stream_index < 0)
1648 return -1;
1649
1650 st= s->streams[stream_index];
1651 #endif
1652
1653 pos_min = s->data_offset;
1654 pos_max = avio_size(s->pb) - 1;
1655
1656 if (pos < pos_min) pos= pos_min;
1657 else if(pos > pos_max) pos= pos_max;
1658
1659 avio_seek(s->pb, pos, SEEK_SET);
1660
1661 #if 0
1662 av_update_cur_dts(s, st, ts);
1663 #endif
1664 return 0;
1665 }
1666
1667 static int av_seek_frame_generic(AVFormatContext *s,
1668 int stream_index, int64_t timestamp, int flags)
1669 {
1670 int index;
1671 int64_t ret;
1672 AVStream *st;
1673 AVIndexEntry *ie;
1674
1675 st = s->streams[stream_index];
1676
1677 index = av_index_search_timestamp(st, timestamp, flags);
1678
1679 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1680 return -1;
1681
1682 if(index < 0 || index==st->nb_index_entries-1){
1683 int i;
1684 AVPacket pkt;
1685
1686 if(st->nb_index_entries){
1687 assert(st->index_entries);
1688 ie= &st->index_entries[st->nb_index_entries-1];
1689 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1690 return ret;
1691 av_update_cur_dts(s, st, ie->timestamp);
1692 }else{
1693 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1694 return ret;
1695 }
1696 for(i=0;; i++) {
1697 int ret;
1698 do{
1699 ret = av_read_frame(s, &pkt);
1700 }while(ret == AVERROR(EAGAIN));
1701 if(ret<0)
1702 break;
1703 av_free_packet(&pkt);
1704 if(stream_index == pkt.stream_index){
1705 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1706 break;
1707 }
1708 }
1709 index = av_index_search_timestamp(st, timestamp, flags);
1710 }
1711 if (index < 0)
1712 return -1;
1713
1714 ff_read_frame_flush(s);
1715 if (s->iformat->read_seek){
1716 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1717 return 0;
1718 }
1719 ie = &st->index_entries[index];
1720 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1721 return ret;
1722 av_update_cur_dts(s, st, ie->timestamp);
1723
1724 return 0;
1725 }
1726
1727 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1728 {
1729 int ret;
1730 AVStream *st;
1731
1732 ff_read_frame_flush(s);
1733
1734 if(flags & AVSEEK_FLAG_BYTE)
1735 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1736
1737 if(stream_index < 0){
1738 stream_index= av_find_default_stream_index(s);
1739 if(stream_index < 0)
1740 return -1;
1741
1742 st= s->streams[stream_index];
1743 /* timestamp for default must be expressed in AV_TIME_BASE units */
1744 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1745 }
1746
1747 /* first, we try the format specific seek */
1748 if (s->iformat->read_seek)
1749 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1750 else
1751 ret = -1;
1752 if (ret >= 0) {
1753 return 0;
1754 }
1755
1756 if(s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH))
1757 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1758 else if (!(s->iformat->flags & AVFMT_NOGENSEARCH))
1759 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1760 else
1761 return -1;
1762 }
1763
1764 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1765 {
1766 if(min_ts > ts || max_ts < ts)
1767 return -1;
1768
1769 ff_read_frame_flush(s);
1770
1771 if (s->iformat->read_seek2)
1772 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1773
1774 if(s->iformat->read_timestamp){
1775 //try to seek via read_timestamp()
1776 }
1777
1778 //Fallback to old API if new is not implemented but old is
1779 //Note the old has somewat different sematics
1780 if(s->iformat->read_seek || 1)
1781 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1782
1783 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1784 }
1785
1786 /*******************************************************/
1787
1788 /**
1789 * Return TRUE if the stream has accurate duration in any stream.
1790 *
1791 * @return TRUE if the stream has accurate duration for at least one component.
1792 */
1793 static int av_has_duration(AVFormatContext *ic)
1794 {
1795 int i;
1796 AVStream *st;
1797
1798 for(i = 0;i < ic->nb_streams; i++) {
1799 st = ic->streams[i];
1800 if (st->duration != AV_NOPTS_VALUE)
1801 return 1;
1802 }
1803 return 0;
1804 }
1805
1806 /**
1807 * Estimate the stream timings from the one of each components.
1808 *
1809 * Also computes the global bitrate if possible.
1810 */
1811 static void av_update_stream_timings(AVFormatContext *ic)
1812 {
1813 int64_t start_time, start_time1, end_time, end_time1;
1814 int64_t duration, duration1;
1815 int i;
1816 AVStream *st;
1817
1818 start_time = INT64_MAX;
1819 end_time = INT64_MIN;
1820 duration = INT64_MIN;
1821 for(i = 0;i < ic->nb_streams; i++) {
1822 st = ic->streams[i];
1823 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1824 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1825 if (start_time1 < start_time)
1826 start_time = start_time1;
1827 if (st->duration != AV_NOPTS_VALUE) {
1828 end_time1 = start_time1
1829 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1830 if (end_time1 > end_time)
1831 end_time = end_time1;
1832 }
1833 }
1834 if (st->duration != AV_NOPTS_VALUE) {
1835 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1836 if (duration1 > duration)
1837 duration = duration1;
1838 }
1839 }
1840 if (start_time != INT64_MAX) {
1841 ic->start_time = start_time;
1842 if (end_time != INT64_MIN) {
1843 if (end_time - start_time > duration)
1844 duration = end_time - start_time;
1845 }
1846 }
1847 if (duration != INT64_MIN) {
1848 ic->duration = duration;
1849 if (ic->file_size > 0) {
1850 /* compute the bitrate */
1851 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1852 (double)ic->duration;
1853 }
1854 }
1855 }
1856
1857 static void fill_all_stream_timings(AVFormatContext *ic)
1858 {
1859 int i;
1860 AVStream *st;
1861
1862 av_update_stream_timings(ic);
1863 for(i = 0;i < ic->nb_streams; i++) {
1864 st = ic->streams[i];
1865 if (st->start_time == AV_NOPTS_VALUE) {
1866 if(ic->start_time != AV_NOPTS_VALUE)
1867 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1868 if(ic->duration != AV_NOPTS_VALUE)
1869 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1870 }
1871 }
1872 }
1873
1874 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1875 {
1876 int64_t filesize, duration;
1877 int bit_rate, i;
1878 AVStream *st;
1879
1880 /* if bit_rate is already set, we believe it */
1881 if (ic->bit_rate <= 0) {
1882 bit_rate = 0;
1883 for(i=0;i<ic->nb_streams;i++) {
1884 st = ic->streams[i];
1885 if (st->codec->bit_rate > 0)
1886 bit_rate += st->codec->bit_rate;
1887 }
1888 ic->bit_rate = bit_rate;
1889 }
1890
1891 /* if duration is already set, we believe it */
1892 if (ic->duration == AV_NOPTS_VALUE &&
1893 ic->bit_rate != 0 &&
1894 ic->file_size != 0) {
1895 filesize = ic->file_size;
1896 if (filesize > 0) {
1897 for(i = 0; i < ic->nb_streams; i++) {
1898 st = ic->streams[i];
1899 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1900 if (st->duration == AV_NOPTS_VALUE)
1901 st->duration = duration;
1902 }
1903 }
1904 }
1905 }
1906
1907 #define DURATION_MAX_READ_SIZE 250000
1908 #define DURATION_MAX_RETRY 3
1909
1910 /* only usable for MPEG-PS streams */
1911 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1912 {
1913 AVPacket pkt1, *pkt = &pkt1;
1914 AVStream *st;
1915 int read_size, i, ret;
1916 int64_t end_time;
1917 int64_t filesize, offset, duration;
1918 int retry=0;
1919
1920 ic->cur_st = NULL;
1921
1922 /* flush packet queue */
1923 flush_packet_queue(ic);
1924
1925 for (i=0; i<ic->nb_streams; i++) {
1926 st = ic->streams[i];
1927 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1928 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1929
1930 if (st->parser) {
1931 av_parser_close(st->parser);
1932 st->parser= NULL;
1933 av_free_packet(&st->cur_pkt);
1934 }
1935 }
1936
1937 /* estimate the end time (duration) */
1938 /* XXX: may need to support wrapping */
1939 filesize = ic->file_size;
1940 end_time = AV_NOPTS_VALUE;
1941 do{
1942 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1943 if (offset < 0)
1944 offset = 0;
1945
1946 avio_seek(ic->pb, offset, SEEK_SET);
1947 read_size = 0;
1948 for(;;) {
1949 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1950 break;
1951
1952 do{
1953 ret = av_read_packet(ic, pkt);
1954 }while(ret == AVERROR(EAGAIN));
1955 if (ret != 0)
1956 break;
1957 read_size += pkt->size;
1958 st = ic->streams[pkt->stream_index];
1959 if (pkt->pts != AV_NOPTS_VALUE &&
1960 (st->start_time != AV_NOPTS_VALUE ||
1961 st->first_dts != AV_NOPTS_VALUE)) {
1962 duration = end_time = pkt->pts;
1963 if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time;
1964 else duration -= st->first_dts;
1965 if (duration < 0)
1966 duration += 1LL<<st->pts_wrap_bits;
1967 if (duration > 0) {
1968 if (st->duration == AV_NOPTS_VALUE ||
1969 st->duration < duration)
1970 st->duration = duration;
1971 }
1972 }
1973 av_free_packet(pkt);
1974 }
1975 }while( end_time==AV_NOPTS_VALUE
1976 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1977 && ++retry <= DURATION_MAX_RETRY);
1978
1979 fill_all_stream_timings(ic);
1980
1981 avio_seek(ic->pb, old_offset, SEEK_SET);
1982 for (i=0; i<ic->nb_streams; i++) {
1983 st= ic->streams[i];
1984 st->cur_dts= st->first_dts;
1985 st->last_IP_pts = AV_NOPTS_VALUE;
1986 st->reference_dts = AV_NOPTS_VALUE;
1987 }
1988 }
1989
1990 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1991 {
1992 int64_t file_size;
1993
1994 /* get the file size, if possible */
1995 if (ic->iformat->flags & AVFMT_NOFILE) {
1996 file_size = 0;
1997 } else {
1998 file_size = avio_size(ic->pb);
1999 if (file_size < 0)
2000 file_size = 0;
2001 }
2002 ic->file_size = file_size;
2003
2004 if ((!strcmp(ic->iformat->name, "mpeg") ||
2005 !strcmp(ic->iformat->name, "mpegts")) &&
2006 file_size && ic->pb->seekable) {
2007 /* get accurate estimate from the PTSes */
2008 av_estimate_timings_from_pts(ic, old_offset);
2009 } else if (av_has_duration(ic)) {
2010 /* at least one component has timings - we use them for all
2011 the components */
2012 fill_all_stream_timings(ic);
2013 } else {
2014 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2015 /* less precise: use bitrate info */
2016 av_estimate_timings_from_bit_rate(ic);
2017 }
2018 av_update_stream_timings(ic);
2019
2020 {
2021 int i;
2022 AVStream av_unused *st;
2023 for(i = 0;i < ic->nb_streams; i++) {
2024 st = ic->streams[i];
2025 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2026 (double) st->start_time / AV_TIME_BASE,
2027 (double) st->duration / AV_TIME_BASE);
2028 }
2029 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2030 (double) ic->start_time / AV_TIME_BASE,
2031 (double) ic->duration / AV_TIME_BASE,
2032 ic->bit_rate / 1000);
2033 }
2034 }
2035
2036 static int has_codec_parameters(AVCodecContext *enc)
2037 {
2038 int val;
2039 switch(enc->codec_type) {
2040 case AVMEDIA_TYPE_AUDIO:
2041 val = enc->sample_rate && enc->channels && enc->sample_fmt != AV_SAMPLE_FMT_NONE;
2042 if(!enc->frame_size &&
2043 (enc->codec_id == CODEC_ID_VORBIS ||
2044 enc->codec_id == CODEC_ID_AAC ||
2045 enc->codec_id == CODEC_ID_MP1 ||
2046 enc->codec_id == CODEC_ID_MP2 ||
2047 enc->codec_id == CODEC_ID_MP3 ||
2048 enc->codec_id == CODEC_ID_SPEEX))
2049 return 0;
2050 break;
2051 case AVMEDIA_TYPE_VIDEO:
2052 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
2053 break;
2054 default:
2055 val = 1;
2056 break;
2057 }
2058 return enc->codec_id != CODEC_ID_NONE && val != 0;
2059 }
2060
2061 static int has_decode_delay_been_guessed(AVStream *st)
2062 {
2063 return st->codec->codec_id != CODEC_ID_H264 ||
2064 st->codec_info_nb_frames >= 6 + st->codec->has_b_frames;
2065 }
2066
2067 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
2068 {
2069 int16_t *samples;
2070 AVCodec *codec;
2071 int got_picture, data_size, ret=0;
2072 AVFrame picture;
2073
2074 if(!st->codec->codec){
2075 codec = avcodec_find_decoder(st->codec->codec_id);
2076 if (!codec)
2077 return -1;
2078 ret = avcodec_open(st->codec, codec);
2079 if (ret < 0)
2080 return ret;
2081 }
2082
2083 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){
2084 switch(st->codec->codec_type) {
2085 case AVMEDIA_TYPE_VIDEO:
2086 avcodec_get_frame_defaults(&picture);
2087 ret = avcodec_decode_video2(st->codec, &picture,
2088 &got_picture, avpkt);
2089 break;
2090 case AVMEDIA_TYPE_AUDIO:
2091 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2092 samples = av_malloc(data_size);
2093 if (!samples)
2094 goto fail;
2095 ret = avcodec_decode_audio3(st->codec, samples,
2096 &data_size, avpkt);
2097 av_free(samples);
2098 break;
2099 default:
2100 break;
2101 }
2102 }
2103 fail:
2104 return ret;
2105 }
2106
2107 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2108 {
2109 while (tags->id != CODEC_ID_NONE) {
2110 if (tags->id == id)
2111 return tags->tag;
2112 tags++;
2113 }
2114 return 0;
2115 }
2116
2117 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2118 {
2119 int i;
2120 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2121 if(tag == tags[i].tag)
2122 return tags[i].id;
2123 }
2124 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2125 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag))
2126 return tags[i].id;
2127 }
2128 return CODEC_ID_NONE;
2129 }
2130
2131 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2132 {
2133 int i;
2134 for(i=0; tags && tags[i]; i++){
2135 int tag= ff_codec_get_tag(tags[i], id);
2136 if(tag) return tag;
2137 }
2138 return 0;
2139 }
2140
2141 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2142 {
2143 int i;
2144 for(i=0; tags && tags[i]; i++){
2145 enum CodecID id= ff_codec_get_id(tags[i], tag);
2146 if(id!=CODEC_ID_NONE) return id;
2147 }
2148 return CODEC_ID_NONE;
2149 }
2150
2151 static void compute_chapters_end(AVFormatContext *s)
2152 {
2153 unsigned int i, j;
2154 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2155
2156 for (i = 0; i < s->nb_chapters; i++)
2157 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2158 AVChapter *ch = s->chapters[i];
2159 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2160 : INT64_MAX;
2161
2162 for (j = 0; j < s->nb_chapters; j++) {
2163 AVChapter *ch1 = s->chapters[j];
2164 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2165 if (j != i && next_start > ch->start && next_start < end)
2166 end = next_start;
2167 }
2168 ch->end = (end == INT64_MAX) ? ch->start : end;
2169 }
2170 }
2171
2172 static int get_std_framerate(int i){
2173 if(i<60*12) return i*1001;
2174 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2175 }
2176
2177 /*
2178 * Is the time base unreliable.
2179 * This is a heuristic to balance between quick acceptance of the values in
2180 * the headers vs. some extra checks.
2181 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2182 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2183 * And there are "variable" fps files this needs to detect as well.
2184 */
2185 static int tb_unreliable(AVCodecContext *c){
2186 if( c->time_base.den >= 101L*c->time_base.num
2187 || c->time_base.den < 5L*c->time_base.num
2188 /* || c->codec_tag == AV_RL32("DIVX")
2189 || c->codec_tag == AV_RL32("XVID")*/
2190 || c->codec_id == CODEC_ID_MPEG2VIDEO
2191 || c->codec_id == CODEC_ID_H264
2192 )
2193 return 1;
2194 return 0;
2195 }
2196
2197 int av_find_stream_info(AVFormatContext *ic)
2198 {
2199 int i, count, ret, read_size, j;
2200 AVStream *st;
2201 AVPacket pkt1, *pkt;
2202 int64_t old_offset = avio_tell(ic->pb);
2203
2204 for(i=0;i<ic->nb_streams;i++) {
2205 AVCodec *codec;
2206 st = ic->streams[i];
2207 if (st->codec->codec_id == CODEC_ID_AAC) {
2208 st->codec->sample_rate = 0;
2209 st->codec->frame_size = 0;
2210 st->codec->channels = 0;
2211 }
2212 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2213 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2214 /* if(!st->time_base.num)
2215 st->time_base= */
2216 if(!st->codec->time_base.num)
2217 st->codec->time_base= st->time_base;
2218 }
2219 //only for the split stuff
2220 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2221 st->parser = av_parser_init(st->codec->codec_id);
2222 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2223 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2224 }
2225 }
2226 assert(!st->codec->codec);
2227 codec = avcodec_find_decoder(st->codec->codec_id);
2228
2229 /* Force decoding of at least one frame of codec data
2230 * this makes sure the codec initializes the channel configuration
2231 * and does not trust the values from the container.
2232 */
2233 if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF)
2234 st->codec->channels = 0;
2235
2236 /* Ensure that subtitle_header is properly set. */
2237 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2238 && codec && !st->codec->codec)
2239 avcodec_open(st->codec, codec);
2240
2241 //try to just open decoders, in case this is enough to get parameters
2242 if(!has_codec_parameters(st->codec)){
2243 if (codec && !st->codec->codec)
2244 avcodec_open(st->codec, codec);
2245 }
2246 }
2247
2248 for (i=0; i<ic->nb_streams; i++) {
2249 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2250 }
2251
2252 count = 0;
2253 read_size = 0;
2254 for(;;) {
2255 if(url_interrupt_cb()){
2256 ret= AVERROR_EXIT;
2257 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2258 break;
2259 }
2260
2261 /* check if one codec still needs to be handled */
2262 for(i=0;i<ic->nb_streams;i++) {
2263 int fps_analyze_framecount = 20;
2264
2265 st = ic->streams[i];
2266 if (!has_codec_parameters(st->codec))
2267 break;
2268 /* if the timebase is coarse (like the usual millisecond precision
2269 of mkv), we need to analyze more frames to reliably arrive at
2270 the correct fps */
2271 if (av_q2d(st->time_base) > 0.0005)
2272 fps_analyze_framecount *= 2;
2273 if (ic->fps_probe_size >= 0)
2274 fps_analyze_framecount = ic->fps_probe_size;
2275 /* variable fps and no guess at the real fps */
2276 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2277 && st->info->duration_count < fps_analyze_framecount
2278 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2279 break;
2280 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2281 break;
2282 if(st->first_dts == AV_NOPTS_VALUE)
2283 break;
2284 }
2285 if (i == ic->nb_streams) {
2286 /* NOTE: if the format has no header, then we need to read
2287 some packets to get most of the streams, so we cannot
2288 stop here */
2289 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2290 /* if we found the info for all the codecs, we can stop */
2291 ret = count;
2292 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2293 break;
2294 }
2295 }
2296 /* we did not get all the codec info, but we read too much data */
2297 if (read_size >= ic->probesize) {
2298 ret = count;
2299 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2300 break;
2301 }
2302
2303 /* NOTE: a new stream can be added there if no header in file
2304 (AVFMTCTX_NOHEADER) */
2305 ret = av_read_frame_internal(ic, &pkt1);
2306 if (ret < 0 && ret != AVERROR(EAGAIN)) {
2307 /* EOF or error */
2308 ret = -1; /* we could not have all the codec parameters before EOF */
2309 for(i=0;i<ic->nb_streams;i++) {
2310 st = ic->streams[i];
2311 if (!has_codec_parameters(st->codec)){
2312 char buf[256];
2313 avcodec_string(buf, sizeof(buf), st->codec, 0);
2314 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2315 } else {
2316 ret = 0;
2317 }
2318 }
2319 break;
2320 }
2321
2322 if (ret == AVERROR(EAGAIN))
2323 continue;
2324
2325 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2326 if ((ret = av_dup_packet(pkt)) < 0)
2327 goto find_stream_info_err;
2328
2329 read_size += pkt->size;
2330
2331 st = ic->streams[pkt->stream_index];
2332 if (st->codec_info_nb_frames>1) {
2333 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2334 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2335 break;
2336 }
2337 st->info->codec_info_duration += pkt->duration;
2338 }
2339 {
2340 int64_t last = st->info->last_dts;
2341 int64_t duration= pkt->dts - last;
2342
2343 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2344 double dur= duration * av_q2d(st->time_base);
2345
2346 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2347 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2348 if (st->info->duration_count < 2)
2349 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2350 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2351 int framerate= get_std_framerate(i);
2352 int ticks= lrintf(dur*framerate/(1001*12));
2353 double error= dur - ticks*1001*12/(double)framerate;
2354 st->info->duration_error[i] += error*error;
2355 }
2356 st->info->duration_count++;
2357 // ignore the first 4 values, they might have some random jitter
2358 if (st->info->duration_count > 3)
2359 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2360 }
2361 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2362 st->info->last_dts = pkt->dts;
2363 }
2364 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2365 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2366 if(i){
2367 st->codec->extradata_size= i;
2368 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2369 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2370 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2371 }
2372 }
2373
2374 /* if still no information, we try to open the codec and to
2375 decompress the frame. We try to avoid that in most cases as
2376 it takes longer and uses more memory. For MPEG-4, we need to
2377 decompress for QuickTime. */
2378 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st))
2379 try_decode_frame(st, pkt);
2380
2381 st->codec_info_nb_frames++;
2382 count++;
2383 }
2384
2385 // close codecs which were opened in try_decode_frame()
2386 for(i=0;i<ic->nb_streams;i++) {
2387 st = ic->streams[i];
2388 if(st->codec->codec)
2389 avcodec_close(st->codec);
2390 }
2391 for(i=0;i<ic->nb_streams;i++) {
2392 st = ic->streams[i];
2393 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2394 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2395 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2396 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2397 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2398 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2399 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2400
2401 // the check for tb_unreliable() is not completely correct, since this is not about handling
2402 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2403 // ipmovie.c produces.
2404 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2405 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2406 if (st->info->duration_count && !st->r_frame_rate.num
2407 && tb_unreliable(st->codec) /*&&
2408 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2409 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2410 int num = 0;
2411 double best_error= 2*av_q2d(st->time_base);
2412 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2413
2414 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2415 double error = st->info->duration_error[j] * get_std_framerate(j);
2416 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2417 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2418 if(error < best_error){
2419 best_error= error;
2420 num = get_std_framerate(j);
2421 }
2422 }
2423 // do not increase frame rate by more than 1 % in order to match a standard rate.
2424 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2425 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2426 }
2427
2428 if (!st->r_frame_rate.num){
2429 if( st->codec->time_base.den * (int64_t)st->time_base.num
2430 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2431 st->r_frame_rate.num = st->codec->time_base.den;
2432 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2433 }else{
2434 st->r_frame_rate.num = st->time_base.den;
2435 st->r_frame_rate.den = st->time_base.num;
2436 }
2437 }
2438 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2439 if(!st->codec->bits_per_coded_sample)
2440 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2441 // set stream disposition based on audio service type
2442 switch (st->codec->audio_service_type) {
2443 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2444 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2445 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2446 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2447 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2448 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2449 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2450 st->disposition = AV_DISPOSITION_COMMENT; break;
2451 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2452 st->disposition = AV_DISPOSITION_KARAOKE; break;
2453 }
2454 }
2455 }
2456
2457 av_estimate_timings(ic, old_offset);
2458
2459 compute_chapters_end(ic);
2460
2461 #if 0
2462 /* correct DTS for B-frame streams with no timestamps */
2463 for(i=0;i<ic->nb_streams;i++) {
2464 st = ic->streams[i];
2465 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2466 if(b-frames){
2467 ppktl = &ic->packet_buffer;
2468 while(ppkt1){
2469 if(ppkt1->stream_index != i)
2470 continue;
2471 if(ppkt1->pkt->dts < 0)
2472 break;
2473 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2474 break;
2475 ppkt1->pkt->dts -= delta;
2476 ppkt1= ppkt1->next;
2477 }
2478 if(ppkt1)
2479 continue;
2480 st->cur_dts -= delta;
2481 }
2482 }
2483 }
2484 #endif
2485
2486 find_stream_info_err:
2487 for (i=0; i < ic->nb_streams; i++)
2488 av_freep(&ic->streams[i]->info);
2489 return ret;
2490 }
2491
2492 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2493 {
2494 int i, j;
2495
2496 for (i = 0; i < ic->nb_programs; i++)
2497 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2498 if (ic->programs[i]->stream_index[j] == s)
2499 return ic->programs[i];
2500 return NULL;
2501 }
2502
2503 int av_find_best_stream(AVFormatContext *ic,
2504 enum AVMediaType type,
2505 int wanted_stream_nb,
2506 int related_stream,
2507 AVCodec **decoder_ret,
2508 int flags)
2509 {
2510 int i, nb_streams = ic->nb_streams;
2511 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2512 unsigned *program = NULL;
2513 AVCodec *decoder = NULL, *best_decoder = NULL;
2514
2515 if (related_stream >= 0 && wanted_stream_nb < 0) {
2516 AVProgram *p = find_program_from_stream(ic, related_stream);
2517 if (p) {
2518 program = p->stream_index;
2519 nb_streams = p->nb_stream_indexes;
2520 }
2521 }
2522 for (i = 0; i < nb_streams; i++) {
2523 int real_stream_index = program ? program[i] : i;
2524 AVStream *st = ic->streams[real_stream_index];
2525 AVCodecContext *avctx = st->codec;
2526 if (avctx->codec_type != type)
2527 continue;
2528 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2529 continue;
2530 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2531 continue;
2532 if (decoder_ret) {
2533 decoder = avcodec_find_decoder(st->codec->codec_id);
2534 if (!decoder) {
2535 if (ret < 0)
2536 ret = AVERROR_DECODER_NOT_FOUND;
2537 continue;
2538 }
2539 }
2540 if (best_count >= st->codec_info_nb_frames)
2541 continue;
2542 best_count = st->codec_info_nb_frames;
2543 ret = real_stream_index;
2544 best_decoder = decoder;
2545 if (program && i == nb_streams - 1 && ret < 0) {
2546 program = NULL;
2547 nb_streams = ic->nb_streams;
2548 i = 0; /* no related stream found, try again with everything */
2549 }
2550 }
2551 if (decoder_ret)
2552 *decoder_ret = best_decoder;
2553 return ret;
2554 }
2555
2556 /*******************************************************/
2557
2558 int av_read_play(AVFormatContext *s)
2559 {
2560 if (s->iformat->read_play)
2561 return s->iformat->read_play(s);
2562 if (s->pb)
2563 return avio_pause(s->pb, 0);
2564 return AVERROR(ENOSYS);
2565 }
2566
2567 int av_read_pause(AVFormatContext *s)
2568 {
2569 if (s->iformat->read_pause)
2570 return s->iformat->read_pause(s);
2571 if (s->pb)
2572 return avio_pause(s->pb, 1);
2573 return AVERROR(ENOSYS);
2574 }
2575
2576 void av_close_input_stream(AVFormatContext *s)
2577 {
2578 flush_packet_queue(s);
2579 if (s->iformat->read_close)
2580 s->iformat->read_close(s);
2581 avformat_free_context(s);
2582 }
2583
2584 void avformat_free_context(AVFormatContext *s)
2585 {
2586 int i;
2587 AVStream *st;
2588
2589 av_opt_free(s);
2590 if (s->iformat && s->iformat->priv_class && s->priv_data)
2591 av_opt_free(s->priv_data);
2592
2593 for(i=0;i<s->nb_streams;i++) {
2594 /* free all data in a stream component */
2595 st = s->streams[i];
2596 if (st->parser) {
2597 av_parser_close(st->parser);
2598 av_free_packet(&st->cur_pkt);
2599 }
2600 av_dict_free(&st->metadata);
2601 av_free(st->index_entries);
2602 av_free(st->codec->extradata);
2603 av_free(st->codec->subtitle_header);
2604 av_free(st->codec);
2605 av_free(st->priv_data);
2606 av_free(st->info);
2607 av_free(st);
2608 }
2609 for(i=s->nb_programs-1; i>=0; i--) {
2610 av_dict_free(&s->programs[i]->metadata);
2611 av_freep(&s->programs[i]->stream_index);
2612 av_freep(&s->programs[i]);
2613 }
2614 av_freep(&s->programs);
2615 av_freep(&s->priv_data);
2616 while(s->nb_chapters--) {
2617 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2618 av_free(s->chapters[s->nb_chapters]);
2619 }
2620 av_freep(&s->chapters);
2621 av_dict_free(&s->metadata);
2622 av_freep(&s->streams);
2623 av_free(s);
2624 }
2625
2626 void av_close_input_file(AVFormatContext *s)
2627 {
2628 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ?
2629 NULL : s->pb;
2630 av_close_input_stream(s);
2631 if (pb)
2632 avio_close(pb);
2633 }
2634
2635 AVStream *av_new_stream(AVFormatContext *s, int id)
2636 {
2637 AVStream *st;
2638 int i;
2639 AVStream **streams;
2640
2641 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2642 return NULL;
2643 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2644 if (!streams)
2645 return NULL;
2646 s->streams = streams;
2647
2648 st = av_mallocz(sizeof(AVStream));
2649 if (!st)
2650 return NULL;
2651 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2652 av_free(st);
2653 return NULL;
2654 }
2655
2656 st->codec= avcodec_alloc_context();
2657 if (s->iformat) {
2658 /* no default bitrate if decoding */
2659 st->codec->bit_rate = 0;
2660 }
2661 st->index = s->nb_streams;
2662 st->id = id;
2663 st->start_time = AV_NOPTS_VALUE;
2664 st->duration = AV_NOPTS_VALUE;
2665 /* we set the current DTS to 0 so that formats without any timestamps
2666 but durations get some timestamps, formats with some unknown
2667 timestamps have their first few packets buffered and the
2668 timestamps corrected before they are returned to the user */
2669 st->cur_dts = 0;
2670 st->first_dts = AV_NOPTS_VALUE;
2671 st->probe_packets = MAX_PROBE_PACKETS;
2672
2673 /* default pts setting is MPEG-like */
2674 av_set_pts_info(st, 33, 1, 90000);
2675 st->last_IP_pts = AV_NOPTS_VALUE;
2676 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2677 st->pts_buffer[i]= AV_NOPTS_VALUE;
2678 st->reference_dts = AV_NOPTS_VALUE;
2679
2680 st->sample_aspect_ratio = (AVRational){0,1};
2681
2682 s->streams[s->nb_streams++] = st;
2683 return st;
2684 }
2685
2686 AVProgram *av_new_program(AVFormatContext *ac, int id)
2687 {
2688 AVProgram *program=NULL;
2689 int i;
2690
2691 av_dlog(ac, "new_program: id=0x%04x\n", id);
2692
2693 for(i=0; i<ac->nb_programs; i++)
2694 if(ac->programs[i]->id == id)
2695 program = ac->programs[i];
2696
2697 if(!program){
2698 program = av_mallocz(sizeof(AVProgram));
2699 if (!program)
2700 return NULL;
2701 dynarray_add(&ac->programs, &ac->nb_programs, program);
2702 program->discard = AVDISCARD_NONE;
2703 }
2704 program->id = id;
2705
2706 return program;
2707 }
2708
2709 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2710 {
2711 AVChapter *chapter = NULL;
2712 int i;
2713
2714 for(i=0; i<s->nb_chapters; i++)
2715 if(s->chapters[i]->id == id)
2716 chapter = s->chapters[i];
2717
2718 if(!chapter){
2719 chapter= av_mallocz(sizeof(AVChapter));
2720 if(!chapter)
2721 return NULL;
2722 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2723 }
2724 av_dict_set(&chapter->metadata, "title", title, 0);
2725 chapter->id = id;
2726 chapter->time_base= time_base;
2727 chapter->start = start;
2728 chapter->end = end;
2729
2730 return chapter;
2731 }
2732
2733 /************************************************************/
2734 /* output media file */
2735
2736 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2737 {
2738 int ret;
2739
2740 if (s->oformat->priv_data_size > 0) {
2741 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2742 if (!s->priv_data)
2743 return AVERROR(ENOMEM);
2744 if (s->oformat->priv_class) {
2745 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2746 av_opt_set_defaults(s->priv_data);
2747 }
2748 } else
2749 s->priv_data = NULL;
2750
2751 if (s->oformat->set_parameters) {
2752 ret = s->oformat->set_parameters(s, ap);
2753 if (ret < 0)
2754 return ret;
2755 }
2756 return 0;
2757 }
2758
2759 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2760 {
2761 const AVCodecTag *avctag;
2762 int n;
2763 enum CodecID id = CODEC_ID_NONE;
2764 unsigned int tag = 0;
2765
2766 /**
2767 * Check that tag + id is in the table
2768 * If neither is in the table -> OK
2769 * If tag is in the table with another id -> FAIL
2770 * If id is in the table with another tag -> FAIL unless strict < normal
2771 */
2772 for (n = 0; s->oformat->codec_tag[n]; n++) {
2773 avctag = s->oformat->codec_tag[n];
2774 while (avctag->id != CODEC_ID_NONE) {
2775 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) {
2776 id = avctag->id;
2777 if (id == st->codec->codec_id)
2778 return 1;
2779 }
2780 if (avctag->id == st->codec->codec_id)
2781 tag = avctag->tag;
2782 avctag++;
2783 }
2784 }
2785 if (id != CODEC_ID_NONE)
2786 return 0;
2787 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2788 return 0;
2789 return 1;
2790 }
2791
2792 int av_write_header(AVFormatContext *s)
2793 {
2794 int ret, i;
2795 AVStream *st;
2796
2797 // some sanity checks
2798 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2799 av_log(s, AV_LOG_ERROR, "no streams\n");
2800 return AVERROR(EINVAL);
2801 }
2802
2803 for(i=0;i<s->nb_streams;i++) {
2804 st = s->streams[i];
2805
2806 switch (st->codec->codec_type) {
2807 case AVMEDIA_TYPE_AUDIO:
2808 if(st->codec->sample_rate<=0){
2809 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2810 return AVERROR(EINVAL);
2811 }
2812 if(!st->codec->block_align)
2813 st->codec->block_align = st->codec->channels *
2814 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2815 break;
2816 case AVMEDIA_TYPE_VIDEO:
2817 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2818 av_log(s, AV_LOG_ERROR, "time base not set\n");
2819 return AVERROR(EINVAL);
2820 }
2821 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2822 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2823 return AVERROR(EINVAL);
2824 }
2825 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2826 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2827 return AVERROR(EINVAL);
2828 }
2829 break;
2830 }
2831
2832 if(s->oformat->codec_tag){
2833 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2834 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2835 st->codec->codec_tag= 0;
2836 }
2837 if(st->codec->codec_tag){
2838 if (!validate_codec_tag(s, st)) {
2839 char tagbuf[32];
2840 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2841 av_log(s, AV_LOG_ERROR,
2842 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2843 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2844 return AVERROR_INVALIDDATA;
2845 }
2846 }else
2847 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2848 }
2849
2850 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2851 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2852 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2853 }
2854
2855 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2856 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2857 if (!s->priv_data)
2858 return AVERROR(ENOMEM);
2859 }
2860
2861 /* set muxer identification string */
2862 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2863 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2864 }
2865
2866 if(s->oformat->write_header){
2867 ret = s->oformat->write_header(s);
2868 if (ret < 0)
2869 return ret;
2870 }
2871
2872 /* init PTS generation */
2873 for(i=0;i<s->nb_streams;i++) {
2874 int64_t den = AV_NOPTS_VALUE;
2875 st = s->streams[i];
2876
2877 switch (st->codec->codec_type) {
2878 case AVMEDIA_TYPE_AUDIO:
2879 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2880 break;
2881 case AVMEDIA_TYPE_VIDEO:
2882 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2883 break;
2884 default:
2885 break;
2886 }
2887 if (den != AV_NOPTS_VALUE) {
2888 if (den <= 0)
2889 return AVERROR_INVALIDDATA;
2890 av_frac_init(&st->pts, 0, 0, den);
2891 }
2892 }
2893 return 0;
2894 }
2895
2896 //FIXME merge with compute_pkt_fields
2897 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2898 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2899 int num, den, frame_size, i;
2900
2901 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
2902 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2903
2904 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2905 return AVERROR(EINVAL);*/
2906
2907 /* duration field */
2908 if (pkt->duration == 0) {
2909 compute_frame_duration(&num, &den, st, NULL, pkt);
2910 if (den && num) {
2911 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2912 }
2913 }
2914
2915 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2916 pkt->pts= pkt->dts;
2917
2918 //XXX/FIXME this is a temporary hack until all encoders output pts
2919 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2920 pkt->dts=
2921 // pkt->pts= st->cur_dts;
2922 pkt->pts= st->pts.val;
2923 }
2924
2925 //calculate dts from pts
2926 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2927 st->pts_buffer[0]= pkt->pts;
2928 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2929 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2930 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2931 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2932
2933 pkt->dts= st->pts_buffer[0];
2934 }
2935
2936 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2937 av_log(s, AV_LOG_ERROR,
2938 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
2939 st->index, st->cur_dts, pkt->dts);
2940 return AVERROR(EINVAL);
2941 }
2942 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2943 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
2944 return AVERROR(EINVAL);
2945 }
2946
2947 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2948 st->cur_dts= pkt->dts;
2949 st->pts.val= pkt->dts;
2950
2951 /* update pts */
2952 switch (st->codec->codec_type) {
2953 case AVMEDIA_TYPE_AUDIO:
2954 frame_size = get_audio_frame_size(st->codec, pkt->size);
2955
2956 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2957 likely equal to the encoder delay, but it would be better if we
2958 had the real timestamps from the encoder */
2959 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2960 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2961 }
2962 break;
2963 case AVMEDIA_TYPE_VIDEO:
2964 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2965 break;
2966 default:
2967 break;
2968 }
2969 return 0;
2970 }
2971
2972 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2973 {
2974 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2975
2976 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2977 return ret;
2978
2979 ret= s->oformat->write_packet(s, pkt);
2980 return ret;
2981 }
2982
2983 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2984 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2985 {
2986 AVPacketList **next_point, *this_pktl;
2987
2988 this_pktl = av_mallocz(sizeof(AVPacketList));
2989 this_pktl->pkt= *pkt;
2990 pkt->destruct= NULL; // do not free original but only the copy
2991 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2992
2993 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
2994 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
2995 }else
2996 next_point = &s->packet_buffer;
2997
2998 if(*next_point){
2999 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
3000 while(!compare(s, &(*next_point)->pkt, pkt)){
3001 next_point= &(*next_point)->next;
3002 }
3003 goto next_non_null;
3004 }else{
3005 next_point = &(s->packet_buffer_end->next);
3006 }
3007 }
3008 assert(!*next_point);
3009
3010 s->packet_buffer_end= this_pktl;
3011 next_non_null:
3012
3013 this_pktl->next= *next_point;
3014
3015 s->streams[pkt->stream_index]->last_in_packet_buffer=
3016 *next_point= this_pktl;
3017 }
3018
3019 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
3020 {
3021 AVStream *st = s->streams[ pkt ->stream_index];
3022 AVStream *st2= s->streams[ next->stream_index];
3023 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
3024 st->time_base);
3025
3026 if (comp == 0)
3027 return pkt->stream_index < next->stream_index;
3028 return comp > 0;
3029 }
3030
3031 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
3032 AVPacketList *pktl;
3033 int stream_count=0;
3034 int i;
3035
3036 if(pkt){
3037 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
3038 }
3039
3040 for(i=0; i < s->nb_streams; i++)
3041 stream_count+= !!s->streams[i]->last_in_packet_buffer;
3042
3043 if(stream_count && (s->nb_streams == stream_count || flush)){
3044 pktl= s->packet_buffer;
3045 *out= pktl->pkt;
3046
3047 s->packet_buffer= pktl->next;
3048 if(!s->packet_buffer)
3049 s->packet_buffer_end= NULL;
3050
3051 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
3052 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3053 av_freep(&pktl);
3054 return 1;
3055 }else{
3056 av_init_packet(out);
3057 return 0;
3058 }
3059 }
3060
3061 /**
3062 * Interleave an AVPacket correctly so it can be muxed.
3063 * @param out the interleaved packet will be output here
3064 * @param in the input packet
3065 * @param flush 1 if no further packets are available as input and all
3066 * remaining packets should be output
3067 * @return 1 if a packet was output, 0 if no packet could be output,
3068 * < 0 if an error occurred
3069 */
3070 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3071 if(s->oformat->interleave_packet)
3072 return s->oformat->interleave_packet(s, out, in, flush);
3073 else
3074 return av_interleave_packet_per_dts(s, out, in, flush);
3075 }
3076
3077 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3078 AVStream *st= s->streams[ pkt->stream_index];
3079 int ret;
3080
3081 //FIXME/XXX/HACK drop zero sized packets
3082 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3083 return 0;
3084
3085 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3086 pkt->size, pkt->dts, pkt->pts);
3087 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3088 return ret;
3089
3090 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3091 return AVERROR(EINVAL);
3092
3093 for(;;){
3094 AVPacket opkt;
3095 int ret= av_interleave_packet(s, &opkt, pkt, 0);
3096 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3097 return ret;
3098
3099 ret= s->oformat->write_packet(s, &opkt);
3100
3101 av_free_packet(&opkt);
3102 pkt= NULL;
3103
3104 if(ret<0)
3105 return ret;
3106 }
3107 }
3108
3109 int av_write_trailer(AVFormatContext *s)
3110 {
3111 int ret, i;
3112
3113 for(;;){
3114 AVPacket pkt;
3115 ret= av_interleave_packet(s, &pkt, NULL, 1);
3116 if(ret<0) //FIXME cleanup needed for ret<0 ?
3117 goto fail;
3118 if(!ret)
3119 break;
3120
3121 ret= s->oformat->write_packet(s, &pkt);
3122
3123 av_free_packet(&pkt);
3124
3125 if(ret<0)
3126 goto fail;
3127 }
3128
3129 if(s->oformat->write_trailer)
3130 ret = s->oformat->write_trailer(s);
3131 fail:
3132 for(i=0;i<s->nb_streams;i++) {
3133 av_freep(&s->streams[i]->priv_data);
3134 av_freep(&s->streams[i]->index_entries);
3135 }
3136 if (s->iformat && s->iformat->priv_class)
3137 av_opt_free(s->priv_data);
3138 av_freep(&s->priv_data);
3139 return ret;
3140 }
3141
3142 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3143 {
3144 int i, j;
3145 AVProgram *program=NULL;
3146 void *tmp;
3147
3148 if (idx >= ac->nb_streams) {
3149 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3150 return;
3151 }
3152
3153 for(i=0; i<ac->nb_programs; i++){
3154 if(ac->programs[i]->id != progid)
3155 continue;
3156 program = ac->programs[i];
3157 for(j=0; j<program->nb_stream_indexes; j++)
3158 if(program->stream_index[j] == idx)
3159 return;
3160
3161 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3162 if(!tmp)
3163 return;
3164 program->stream_index = tmp;
3165 program->stream_index[program->nb_stream_indexes++] = idx;
3166 return;
3167 }
3168 }
3169
3170 static void print_fps(double d, const char *postfix){
3171 uint64_t v= lrintf(d*100);
3172 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3173 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3174 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3175 }
3176
3177 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3178 {
3179 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3180 AVDictionaryEntry *tag=NULL;
3181
3182 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3183 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3184 if(strcmp("language", tag->key))
3185 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3186 }
3187 }
3188 }
3189
3190 /* "user interface" functions */
3191 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3192 {
3193 char buf[256];
3194 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3195 AVStream *st = ic->streams[i];
3196 int g = av_gcd(st->time_base.num, st->time_base.den);
3197 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3198 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3199 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3200 /* the pid is an important information, so we display it */
3201 /* XXX: add a generic system */
3202 if (flags & AVFMT_SHOW_IDS)
3203 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3204 if (lang)
3205 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3206 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3207 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3208 if (st->sample_aspect_ratio.num && // default
3209 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3210 AVRational display_aspect_ratio;
3211 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3212 st->codec->width*st->sample_aspect_ratio.num,
3213 st->codec->height*st->sample_aspect_ratio.den,
3214 1024*1024);
3215 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3216 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3217 display_aspect_ratio.num, display_aspect_ratio.den);
3218 }
3219 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3220 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3221 print_fps(av_q2d(st->avg_frame_rate), "fps");
3222 if(st->r_frame_rate.den && st->r_frame_rate.num)
3223 print_fps(av_q2d(st->r_frame_rate), "tbr");
3224 if(st->time_base.den && st->time_base.num)
3225 print_fps(1/av_q2d(st->time_base), "tbn");
3226 if(st->codec->time_base.den && st->codec->time_base.num)
3227 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3228 }
3229 if (st->disposition & AV_DISPOSITION_DEFAULT)
3230 av_log(NULL, AV_LOG_INFO, " (default)");
3231 if (st->disposition & AV_DISPOSITION_DUB)
3232 av_log(NULL, AV_LOG_INFO, " (dub)");
3233 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3234 av_log(NULL, AV_LOG_INFO, " (original)");
3235 if (st->disposition & AV_DISPOSITION_COMMENT)
3236 av_log(NULL, AV_LOG_INFO, " (comment)");
3237 if (st->disposition & AV_DISPOSITION_LYRICS)
3238 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3239 if (st->disposition & AV_DISPOSITION_KARAOKE)
3240 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3241 if (st->disposition & AV_DISPOSITION_FORCED)
3242 av_log(NULL, AV_LOG_INFO, " (forced)");
3243 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3244 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3245 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3246 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3247 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3248 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3249 av_log(NULL, AV_LOG_INFO, "\n");
3250 dump_metadata(NULL, st->metadata, " ");
3251 }
3252
3253 #if FF_API_DUMP_FORMAT
3254 void dump_format(AVFormatContext *ic,
3255 int index,
3256 const char *url,
3257 int is_output)
3258 {
3259 av_dump_format(ic, index, url, is_output);
3260 }
3261 #endif
3262
3263 void av_dump_format(AVFormatContext *ic,
3264 int index,
3265 const char *url,
3266 int is_output)
3267 {
3268 int i;
3269 uint8_t *printed = av_mallocz(ic->nb_streams);
3270 if (ic->nb_streams && !printed)
3271 return;
3272
3273 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3274 is_output ? "Output" : "Input",
3275 index,
3276 is_output ? ic->oformat->name : ic->iformat->name,
3277 is_output ? "to" : "from", url);
3278 dump_metadata(NULL, ic->metadata, " ");
3279 if (!is_output) {
3280 av_log(NULL, AV_LOG_INFO, " Duration: ");
3281 if (ic->duration != AV_NOPTS_VALUE) {
3282 int hours, mins, secs, us;
3283 secs = ic->duration / AV_TIME_BASE;
3284 us = ic->duration % AV_TIME_BASE;
3285 mins = secs / 60;
3286 secs %= 60;
3287 hours = mins / 60;
3288 mins %= 60;
3289 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3290 (100 * us) / AV_TIME_BASE);
3291 } else {
3292 av_log(NULL, AV_LOG_INFO, "N/A");
3293 }
3294 if (ic->start_time != AV_NOPTS_VALUE) {
3295 int secs, us;
3296 av_log(NULL, AV_LOG_INFO, ", start: ");
3297 secs = ic->start_time / AV_TIME_BASE;
3298 us = abs(ic->start_time % AV_TIME_BASE);
3299 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3300 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3301 }
3302 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3303 if (ic->bit_rate) {
3304 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3305 } else {
3306 av_log(NULL, AV_LOG_INFO, "N/A");
3307 }
3308 av_log(NULL, AV_LOG_INFO, "\n");
3309 }
3310 for (i = 0; i < ic->nb_chapters; i++) {
3311 AVChapter *ch = ic->chapters[i];
3312 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3313 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3314 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3315
3316 dump_metadata(NULL, ch->metadata, " ");
3317 }
3318 if(ic->nb_programs) {
3319 int j, k, total = 0;
3320 for(j=0; j<ic->nb_programs; j++) {
3321 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3322 "name", NULL, 0);
3323 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3324 name ? name->value : "");
3325 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3326 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3327 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3328 printed[ic->programs[j]->stream_index[k]] = 1;
3329 }
3330 total += ic->programs[j]->nb_stream_indexes;
3331 }
3332 if (total < ic->nb_streams)
3333 av_log(NULL, AV_LOG_INFO, " No Program\n");
3334 }
3335 for(i=0;i<ic->nb_streams;i++)
3336 if (!printed[i])
3337 dump_stream_format(ic, i, index, is_output);
3338
3339 av_free(printed);
3340 }
3341
3342 int64_t av_gettime(void)
3343 {
3344 struct timeval tv;
3345 gettimeofday(&tv,NULL);
3346 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3347 }
3348
3349 uint64_t ff_ntp_time(void)
3350 {
3351 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3352 }
3353
3354 #if FF_API_PARSE_DATE
3355 #include "libavutil/parseutils.h"
3356
3357 int64_t parse_date(const char *timestr, int duration)
3358 {
3359 int64_t timeval;
3360 av_parse_time(&timeval, timestr, duration);
3361 return timeval;
3362 }
3363 #endif
3364
3365 #if FF_API_FIND_INFO_TAG
3366 #include "libavutil/parseutils.h"
3367
3368 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3369 {
3370 return av_find_info_tag(arg, arg_size, tag1, info);
3371 }
3372 #endif
3373
3374 int av_get_frame_filename(char *buf, int buf_size,
3375 const char *path, int number)
3376 {
3377 const char *p;
3378 char *q, buf1[20], c;
3379 int nd, len, percentd_found;
3380
3381 q = buf;
3382 p = path;
3383 percentd_found = 0;
3384 for(;;) {
3385 c = *p++;
3386 if (c == '\0')
3387 break;
3388 if (c == '%') {
3389 do {
3390 nd = 0;
3391 while (isdigit(*p)) {
3392 nd = nd * 10 + *p++ - '0';
3393 }
3394 c = *p++;
3395 } while (isdigit(c));
3396
3397 switch(c) {
3398 case '%':
3399 goto addchar;
3400 case 'd':
3401 if (percentd_found)
3402 goto fail;
3403 percentd_found = 1;
3404 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3405 len = strlen(buf1);
3406 if ((q - buf + len) > buf_size - 1)
3407 goto fail;
3408 memcpy(q, buf1, len);
3409 q += len;
3410 break;
3411 default:
3412 goto fail;
3413 }
3414 } else {
3415 addchar:
3416 if ((q - buf) < buf_size - 1)
3417 *q++ = c;
3418 }
3419 }
3420 if (!percentd_found)
3421 goto fail;
3422 *q = '\0';
3423 return 0;
3424 fail:
3425 *q = '\0';
3426 return -1;
3427 }
3428
3429 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3430 {
3431 int len, i, j, c;
3432 #undef fprintf
3433 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3434
3435 for(i=0;i<size;i+=16) {
3436 len = size - i;
3437 if (len > 16)
3438 len = 16;
3439 PRINT("%08x ", i);
3440 for(j=0;j<16;j++) {
3441 if (j < len)
3442 PRINT(" %02x", buf[i+j]);
3443 else
3444 PRINT(" ");
3445 }
3446 PRINT(" ");
3447 for(j=0;j<len;j++) {
3448 c = buf[i+j];
3449 if (c < ' ' || c > '~')
3450 c = '.';
3451 PRINT("%c", c);
3452 }
3453 PRINT("\n");
3454 }
3455 #undef PRINT
3456 }
3457
3458 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3459 {
3460 hex_dump_internal(NULL, f, 0, buf, size);
3461 }
3462
3463 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3464 {
3465 hex_dump_internal(avcl, NULL, level, buf, size);
3466 }
3467
3468 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3469 {
3470 #undef fprintf
3471 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3472 PRINT("stream #%d:\n", pkt->stream_index);
3473 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3474 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3475 /* DTS is _always_ valid after av_read_frame() */
3476 PRINT(" dts=");
3477 if (pkt->dts == AV_NOPTS_VALUE)
3478 PRINT("N/A");
3479 else
3480 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3481 /* PTS may not be known if B-frames are present. */
3482 PRINT(" pts=");
3483 if (pkt->pts == AV_NOPTS_VALUE)
3484 PRINT("N/A");
3485 else
3486 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3487 PRINT("\n");
3488 PRINT(" size=%d\n", pkt->size);
3489 #undef PRINT
3490 if (dump_payload)
3491 av_hex_dump(f, pkt->data, pkt->size);
3492 }
3493
3494 #if FF_API_PKT_DUMP
3495 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3496 {
3497 AVRational tb = { 1, AV_TIME_BASE };
3498 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3499 }
3500 #endif
3501
3502 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3503 {
3504 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3505 }
3506
3507 #if FF_API_PKT_DUMP
3508 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3509 {
3510 AVRational tb = { 1, AV_TIME_BASE };
3511 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3512 }
3513 #endif
3514
3515 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3516 AVStream *st)
3517 {
3518 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3519 }
3520
3521 void av_url_split(char *proto, int proto_size,
3522 char *authorization, int authorization_size,
3523 char *hostname, int hostname_size,
3524 int *port_ptr,
3525 char *path, int path_size,
3526 const char *url)
3527 {
3528 const char *p, *ls, *at, *col, *brk;
3529
3530 if (port_ptr) *port_ptr = -1;
3531 if (proto_size > 0) proto[0] = 0;
3532 if (authorization_size > 0) authorization[0] = 0;
3533 if (hostname_size > 0) hostname[0] = 0;
3534 if (path_size > 0) path