60f4d03e4bae723ec04e50c0f3f475eaac8727d3
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* #define DEBUG */
23
24 #include "avformat.h"
25 #include "avio_internal.h"
26 #include "internal.h"
27 #include "libavcodec/internal.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "metadata.h"
31 #include "id3v2.h"
32 #include "libavutil/avstring.h"
33 #include "riff.h"
34 #include "audiointerleave.h"
35 #include "url.h"
36 #include <sys/time.h>
37 #include <time.h>
38 #include <strings.h>
39 #include <stdarg.h>
40 #if CONFIG_NETWORK
41 #include "network.h"
42 #endif
43
44 #undef NDEBUG
45 #include <assert.h>
46
47 /**
48 * @file
49 * various utility functions for use within Libav
50 */
51
52 unsigned avformat_version(void)
53 {
54 return LIBAVFORMAT_VERSION_INT;
55 }
56
57 const char *avformat_configuration(void)
58 {
59 return LIBAV_CONFIGURATION;
60 }
61
62 const char *avformat_license(void)
63 {
64 #define LICENSE_PREFIX "libavformat license: "
65 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
66 }
67
68 /* fraction handling */
69
70 /**
71 * f = val + (num / den) + 0.5.
72 *
73 * 'num' is normalized so that it is such as 0 <= num < den.
74 *
75 * @param f fractional number
76 * @param val integer value
77 * @param num must be >= 0
78 * @param den must be >= 1
79 */
80 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
81 {
82 num += (den >> 1);
83 if (num >= den) {
84 val += num / den;
85 num = num % den;
86 }
87 f->val = val;
88 f->num = num;
89 f->den = den;
90 }
91
92 /**
93 * Fractional addition to f: f = f + (incr / f->den).
94 *
95 * @param f fractional number
96 * @param incr increment, can be positive or negative
97 */
98 static void av_frac_add(AVFrac *f, int64_t incr)
99 {
100 int64_t num, den;
101
102 num = f->num + incr;
103 den = f->den;
104 if (num < 0) {
105 f->val += num / den;
106 num = num % den;
107 if (num < 0) {
108 num += den;
109 f->val--;
110 }
111 } else if (num >= den) {
112 f->val += num / den;
113 num = num % den;
114 }
115 f->num = num;
116 }
117
118 /** head of registered input format linked list */
119 static AVInputFormat *first_iformat = NULL;
120 /** head of registered output format linked list */
121 static AVOutputFormat *first_oformat = NULL;
122
123 AVInputFormat *av_iformat_next(AVInputFormat *f)
124 {
125 if(f) return f->next;
126 else return first_iformat;
127 }
128
129 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
130 {
131 if(f) return f->next;
132 else return first_oformat;
133 }
134
135 void av_register_input_format(AVInputFormat *format)
136 {
137 AVInputFormat **p;
138 p = &first_iformat;
139 while (*p != NULL) p = &(*p)->next;
140 *p = format;
141 format->next = NULL;
142 }
143
144 void av_register_output_format(AVOutputFormat *format)
145 {
146 AVOutputFormat **p;
147 p = &first_oformat;
148 while (*p != NULL) p = &(*p)->next;
149 *p = format;
150 format->next = NULL;
151 }
152
153 int av_match_ext(const char *filename, const char *extensions)
154 {
155 const char *ext, *p;
156 char ext1[32], *q;
157
158 if(!filename)
159 return 0;
160
161 ext = strrchr(filename, '.');
162 if (ext) {
163 ext++;
164 p = extensions;
165 for(;;) {
166 q = ext1;
167 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
168 *q++ = *p++;
169 *q = '\0';
170 if (!strcasecmp(ext1, ext))
171 return 1;
172 if (*p == '\0')
173 break;
174 p++;
175 }
176 }
177 return 0;
178 }
179
180 static int match_format(const char *name, const char *names)
181 {
182 const char *p;
183 int len, namelen;
184
185 if (!name || !names)
186 return 0;
187
188 namelen = strlen(name);
189 while ((p = strchr(names, ','))) {
190 len = FFMAX(p - names, namelen);
191 if (!strncasecmp(name, names, len))
192 return 1;
193 names = p+1;
194 }
195 return !strcasecmp(name, names);
196 }
197
198 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
199 const char *mime_type)
200 {
201 AVOutputFormat *fmt = NULL, *fmt_found;
202 int score_max, score;
203
204 /* specific test for image sequences */
205 #if CONFIG_IMAGE2_MUXER
206 if (!short_name && filename &&
207 av_filename_number_test(filename) &&
208 ff_guess_image2_codec(filename) != CODEC_ID_NONE) {
209 return av_guess_format("image2", NULL, NULL);
210 }
211 #endif
212 /* Find the proper file type. */
213 fmt_found = NULL;
214 score_max = 0;
215 while ((fmt = av_oformat_next(fmt))) {
216 score = 0;
217 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
218 score += 100;
219 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
220 score += 10;
221 if (filename && fmt->extensions &&
222 av_match_ext(filename, fmt->extensions)) {
223 score += 5;
224 }
225 if (score > score_max) {
226 score_max = score;
227 fmt_found = fmt;
228 }
229 }
230 return fmt_found;
231 }
232
233 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
234 const char *filename, const char *mime_type, enum AVMediaType type){
235 if(type == AVMEDIA_TYPE_VIDEO){
236 enum CodecID codec_id= CODEC_ID_NONE;
237
238 #if CONFIG_IMAGE2_MUXER
239 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
240 codec_id= ff_guess_image2_codec(filename);
241 }
242 #endif
243 if(codec_id == CODEC_ID_NONE)
244 codec_id= fmt->video_codec;
245 return codec_id;
246 }else if(type == AVMEDIA_TYPE_AUDIO)
247 return fmt->audio_codec;
248 else if (type == AVMEDIA_TYPE_SUBTITLE)
249 return fmt->subtitle_codec;
250 else
251 return CODEC_ID_NONE;
252 }
253
254 AVInputFormat *av_find_input_format(const char *short_name)
255 {
256 AVInputFormat *fmt = NULL;
257 while ((fmt = av_iformat_next(fmt))) {
258 if (match_format(short_name, fmt->name))
259 return fmt;
260 }
261 return NULL;
262 }
263
264
265 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
266 {
267 int ret= av_new_packet(pkt, size);
268
269 if(ret<0)
270 return ret;
271
272 pkt->pos= avio_tell(s);
273
274 ret= avio_read(s, pkt->data, size);
275 if(ret<=0)
276 av_free_packet(pkt);
277 else
278 av_shrink_packet(pkt, ret);
279
280 return ret;
281 }
282
283 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
284 {
285 int ret;
286 int old_size;
287 if (!pkt->size)
288 return av_get_packet(s, pkt, size);
289 old_size = pkt->size;
290 ret = av_grow_packet(pkt, size);
291 if (ret < 0)
292 return ret;
293 ret = avio_read(s, pkt->data + old_size, size);
294 av_shrink_packet(pkt, old_size + FFMAX(ret, 0));
295 return ret;
296 }
297
298
299 int av_filename_number_test(const char *filename)
300 {
301 char buf[1024];
302 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
303 }
304
305 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
306 {
307 AVProbeData lpd = *pd;
308 AVInputFormat *fmt1 = NULL, *fmt;
309 int score, id3 = 0;
310
311 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
312 int id3len = ff_id3v2_tag_len(lpd.buf);
313 if (lpd.buf_size > id3len + 16) {
314 lpd.buf += id3len;
315 lpd.buf_size -= id3len;
316 }
317 id3 = 1;
318 }
319
320 fmt = NULL;
321 while ((fmt1 = av_iformat_next(fmt1))) {
322 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
323 continue;
324 score = 0;
325 if (fmt1->read_probe) {
326 score = fmt1->read_probe(&lpd);
327 } else if (fmt1->extensions) {
328 if (av_match_ext(lpd.filename, fmt1->extensions)) {
329 score = 50;
330 }
331 }
332 if (score > *score_max) {
333 *score_max = score;
334 fmt = fmt1;
335 }else if (score == *score_max)
336 fmt = NULL;
337 }
338
339 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
340 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4) {
341 while ((fmt = av_iformat_next(fmt)))
342 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
343 *score_max = AVPROBE_SCORE_MAX/4;
344 break;
345 }
346 }
347
348 return fmt;
349 }
350
351 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
352 int score=0;
353 return av_probe_input_format2(pd, is_opened, &score);
354 }
355
356 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
357 {
358 static const struct {
359 const char *name; enum CodecID id; enum AVMediaType type;
360 } fmt_id_type[] = {
361 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
362 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
363 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
364 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
365 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
366 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
367 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
368 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
369 { 0 }
370 };
371 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
372
373 if (fmt) {
374 int i;
375 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
376 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
377 for (i = 0; fmt_id_type[i].name; i++) {
378 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
379 st->codec->codec_id = fmt_id_type[i].id;
380 st->codec->codec_type = fmt_id_type[i].type;
381 break;
382 }
383 }
384 }
385 return !!fmt;
386 }
387
388 /************************************************************/
389 /* input media file */
390
391 /**
392 * Open a media file from an IO stream. 'fmt' must be specified.
393 */
394 int av_open_input_stream(AVFormatContext **ic_ptr,
395 AVIOContext *pb, const char *filename,
396 AVInputFormat *fmt, AVFormatParameters *ap)
397 {
398 int err;
399 AVFormatContext *ic;
400 AVFormatParameters default_ap;
401
402 if(!ap){
403 ap=&default_ap;
404 memset(ap, 0, sizeof(default_ap));
405 }
406
407 if(!ap->prealloced_context)
408 ic = avformat_alloc_context();
409 else
410 ic = *ic_ptr;
411 if (!ic) {
412 err = AVERROR(ENOMEM);
413 goto fail;
414 }
415 ic->iformat = fmt;
416 ic->pb = pb;
417 ic->duration = AV_NOPTS_VALUE;
418 ic->start_time = AV_NOPTS_VALUE;
419 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
420
421 /* allocate private data */
422 if (fmt->priv_data_size > 0) {
423 ic->priv_data = av_mallocz(fmt->priv_data_size);
424 if (!ic->priv_data) {
425 err = AVERROR(ENOMEM);
426 goto fail;
427 }
428 if (fmt->priv_class) {
429 *(const AVClass**)ic->priv_data = fmt->priv_class;
430 av_opt_set_defaults(ic->priv_data);
431 }
432 } else {
433 ic->priv_data = NULL;
434 }
435
436 // e.g. AVFMT_NOFILE formats will not have a AVIOContext
437 if (ic->pb)
438 ff_id3v2_read(ic, ID3v2_DEFAULT_MAGIC);
439
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic, ap);
442 if (err < 0)
443 goto fail;
444 }
445
446 if (pb && !ic->data_offset)
447 ic->data_offset = avio_tell(ic->pb);
448
449 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
450
451 *ic_ptr = ic;
452 return 0;
453 fail:
454 if (ic) {
455 int i;
456 av_freep(&ic->priv_data);
457 for(i=0;i<ic->nb_streams;i++) {
458 AVStream *st = ic->streams[i];
459 if (st) {
460 av_free(st->priv_data);
461 av_free(st->codec->extradata);
462 av_free(st->codec);
463 av_free(st->info);
464 }
465 av_free(st);
466 }
467 }
468 av_free(ic);
469 *ic_ptr = NULL;
470 return err;
471 }
472
473 /** size of probe buffer, for guessing file type from file contents */
474 #define PROBE_BUF_MIN 2048
475 #define PROBE_BUF_MAX (1<<20)
476
477 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
478 const char *filename, void *logctx,
479 unsigned int offset, unsigned int max_probe_size)
480 {
481 AVProbeData pd = { filename ? filename : "", NULL, -offset };
482 unsigned char *buf = NULL;
483 int ret = 0, probe_size;
484
485 if (!max_probe_size) {
486 max_probe_size = PROBE_BUF_MAX;
487 } else if (max_probe_size > PROBE_BUF_MAX) {
488 max_probe_size = PROBE_BUF_MAX;
489 } else if (max_probe_size < PROBE_BUF_MIN) {
490 return AVERROR(EINVAL);
491 }
492
493 if (offset >= max_probe_size) {
494 return AVERROR(EINVAL);
495 }
496
497 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0;
498 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
499 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
500 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
501
502 if (probe_size < offset) {
503 continue;
504 }
505
506 /* read probe data */
507 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
508 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
509 /* fail if error was not end of file, otherwise, lower score */
510 if (ret != AVERROR_EOF) {
511 av_free(buf);
512 return ret;
513 }
514 score = 0;
515 ret = 0; /* error was end of file, nothing read */
516 }
517 pd.buf_size += ret;
518 pd.buf = &buf[offset];
519
520 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
521
522 /* guess file format */
523 *fmt = av_probe_input_format2(&pd, 1, &score);
524 if(*fmt){
525 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
526 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
527 }else
528 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
529 }
530 }
531
532 if (!*fmt) {
533 av_free(buf);
534 return AVERROR_INVALIDDATA;
535 }
536
537 /* rewind. reuse probe buffer to avoid seeking */
538 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
539 av_free(buf);
540
541 return ret;
542 }
543
544 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
545 AVInputFormat *fmt,
546 int buf_size,
547 AVFormatParameters *ap)
548 {
549 int err;
550 AVProbeData probe_data, *pd = &probe_data;
551 AVIOContext *pb = NULL;
552 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
553
554 pd->filename = "";
555 if (filename)
556 pd->filename = filename;
557 pd->buf = NULL;
558 pd->buf_size = 0;
559
560 if (!fmt) {
561 /* guess format if no file can be opened */
562 fmt = av_probe_input_format(pd, 0);
563 }
564
565 /* Do not open file if the format does not need it. XXX: specific
566 hack needed to handle RTSP/TCP */
567 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
568 /* if no file needed do not try to open one */
569 if ((err=avio_open(&pb, filename, AVIO_FLAG_READ)) < 0) {
570 goto fail;
571 }
572 if (buf_size > 0) {
573 ffio_set_buf_size(pb, buf_size);
574 }
575 if (!fmt && (err = av_probe_input_buffer(pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) {
576 goto fail;
577 }
578 }
579
580 /* if still no format found, error */
581 if (!fmt) {
582 err = AVERROR_INVALIDDATA;
583 goto fail;
584 }
585
586 /* check filename in case an image number is expected */
587 if (fmt->flags & AVFMT_NEEDNUMBER) {
588 if (!av_filename_number_test(filename)) {
589 err = AVERROR(EINVAL);
590 goto fail;
591 }
592 }
593 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
594 if (err)
595 goto fail;
596 return 0;
597 fail:
598 av_freep(&pd->buf);
599 if (pb)
600 avio_close(pb);
601 if (ap && ap->prealloced_context)
602 av_free(*ic_ptr);
603 *ic_ptr = NULL;
604 return err;
605
606 }
607
608 /*******************************************************/
609
610 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
611 AVPacketList **plast_pktl){
612 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
613 if (!pktl)
614 return NULL;
615
616 if (*packet_buffer)
617 (*plast_pktl)->next = pktl;
618 else
619 *packet_buffer = pktl;
620
621 /* add the packet in the buffered packet list */
622 *plast_pktl = pktl;
623 pktl->pkt= *pkt;
624 return &pktl->pkt;
625 }
626
627 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
628 {
629 int ret, i;
630 AVStream *st;
631
632 for(;;){
633 AVPacketList *pktl = s->raw_packet_buffer;
634
635 if (pktl) {
636 *pkt = pktl->pkt;
637 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
638 !s->streams[pkt->stream_index]->probe_packets ||
639 s->raw_packet_buffer_remaining_size < pkt->size){
640 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
641 av_freep(&pd->buf);
642 pd->buf_size = 0;
643 s->raw_packet_buffer = pktl->next;
644 s->raw_packet_buffer_remaining_size += pkt->size;
645 av_free(pktl);
646 return 0;
647 }
648 }
649
650 av_init_packet(pkt);
651 ret= s->iformat->read_packet(s, pkt);
652 if (ret < 0) {
653 if (!pktl || ret == AVERROR(EAGAIN))
654 return ret;
655 for (i = 0; i < s->nb_streams; i++)
656 s->streams[i]->probe_packets = 0;
657 continue;
658 }
659 st= s->streams[pkt->stream_index];
660
661 switch(st->codec->codec_type){
662 case AVMEDIA_TYPE_VIDEO:
663 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
664 break;
665 case AVMEDIA_TYPE_AUDIO:
666 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
667 break;
668 case AVMEDIA_TYPE_SUBTITLE:
669 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
670 break;
671 }
672
673 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
674 !st->probe_packets))
675 return ret;
676
677 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
678 s->raw_packet_buffer_remaining_size -= pkt->size;
679
680 if(st->codec->codec_id == CODEC_ID_PROBE){
681 AVProbeData *pd = &st->probe_data;
682 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
683 --st->probe_packets;
684
685 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
686 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
687 pd->buf_size += pkt->size;
688 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
689
690 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
691 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
692 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
693 if(st->codec->codec_id != CODEC_ID_PROBE){
694 pd->buf_size=0;
695 av_freep(&pd->buf);
696 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
697 }
698 }
699 }
700 }
701 }
702
703 /**********************************************************/
704
705 /**
706 * Get the number of samples of an audio frame. Return -1 on error.
707 */
708 static int get_audio_frame_size(AVCodecContext *enc, int size)
709 {
710 int frame_size;
711
712 if(enc->codec_id == CODEC_ID_VORBIS)
713 return -1;
714
715 if (enc->frame_size <= 1) {
716 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
717
718 if (bits_per_sample) {
719 if (enc->channels == 0)
720 return -1;
721 frame_size = (size << 3) / (bits_per_sample * enc->channels);
722 } else {
723 /* used for example by ADPCM codecs */
724 if (enc->bit_rate == 0)
725 return -1;
726 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
727 }
728 } else {
729 frame_size = enc->frame_size;
730 }
731 return frame_size;
732 }
733
734
735 /**
736 * Return the frame duration in seconds. Return 0 if not available.
737 */
738 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
739 AVCodecParserContext *pc, AVPacket *pkt)
740 {
741 int frame_size;
742
743 *pnum = 0;
744 *pden = 0;
745 switch(st->codec->codec_type) {
746 case AVMEDIA_TYPE_VIDEO:
747 if(st->time_base.num*1000LL > st->time_base.den){
748 *pnum = st->time_base.num;
749 *pden = st->time_base.den;
750 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
751 *pnum = st->codec->time_base.num;
752 *pden = st->codec->time_base.den;
753 if (pc && pc->repeat_pict) {
754 *pnum = (*pnum) * (1 + pc->repeat_pict);
755 }
756 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
757 //Thus if we have no parser in such case leave duration undefined.
758 if(st->codec->ticks_per_frame>1 && !pc){
759 *pnum = *pden = 0;
760 }
761 }
762 break;
763 case AVMEDIA_TYPE_AUDIO:
764 frame_size = get_audio_frame_size(st->codec, pkt->size);
765 if (frame_size <= 0 || st->codec->sample_rate <= 0)
766 break;
767 *pnum = frame_size;
768 *pden = st->codec->sample_rate;
769 break;
770 default:
771 break;
772 }
773 }
774
775 static int is_intra_only(AVCodecContext *enc){
776 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
777 return 1;
778 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
779 switch(enc->codec_id){
780 case CODEC_ID_MJPEG:
781 case CODEC_ID_MJPEGB:
782 case CODEC_ID_LJPEG:
783 case CODEC_ID_RAWVIDEO:
784 case CODEC_ID_DVVIDEO:
785 case CODEC_ID_HUFFYUV:
786 case CODEC_ID_FFVHUFF:
787 case CODEC_ID_ASV1:
788 case CODEC_ID_ASV2:
789 case CODEC_ID_VCR1:
790 case CODEC_ID_DNXHD:
791 case CODEC_ID_JPEG2000:
792 return 1;
793 default: break;
794 }
795 }
796 return 0;
797 }
798
799 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
800 int64_t dts, int64_t pts)
801 {
802 AVStream *st= s->streams[stream_index];
803 AVPacketList *pktl= s->packet_buffer;
804
805 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
806 return;
807
808 st->first_dts= dts - st->cur_dts;
809 st->cur_dts= dts;
810
811 for(; pktl; pktl= pktl->next){
812 if(pktl->pkt.stream_index != stream_index)
813 continue;
814 //FIXME think more about this check
815 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
816 pktl->pkt.pts += st->first_dts;
817
818 if(pktl->pkt.dts != AV_NOPTS_VALUE)
819 pktl->pkt.dts += st->first_dts;
820
821 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
822 st->start_time= pktl->pkt.pts;
823 }
824 if (st->start_time == AV_NOPTS_VALUE)
825 st->start_time = pts;
826 }
827
828 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
829 {
830 AVPacketList *pktl= s->packet_buffer;
831 int64_t cur_dts= 0;
832
833 if(st->first_dts != AV_NOPTS_VALUE){
834 cur_dts= st->first_dts;
835 for(; pktl; pktl= pktl->next){
836 if(pktl->pkt.stream_index == pkt->stream_index){
837 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
838 break;
839 cur_dts -= pkt->duration;
840 }
841 }
842 pktl= s->packet_buffer;
843 st->first_dts = cur_dts;
844 }else if(st->cur_dts)
845 return;
846
847 for(; pktl; pktl= pktl->next){
848 if(pktl->pkt.stream_index != pkt->stream_index)
849 continue;
850 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
851 && !pktl->pkt.duration){
852 pktl->pkt.dts= cur_dts;
853 if(!st->codec->has_b_frames)
854 pktl->pkt.pts= cur_dts;
855 cur_dts += pkt->duration;
856 pktl->pkt.duration= pkt->duration;
857 }else
858 break;
859 }
860 if(st->first_dts == AV_NOPTS_VALUE)
861 st->cur_dts= cur_dts;
862 }
863
864 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
865 AVCodecParserContext *pc, AVPacket *pkt)
866 {
867 int num, den, presentation_delayed, delay, i;
868 int64_t offset;
869
870 if (s->flags & AVFMT_FLAG_NOFILLIN)
871 return;
872
873 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
874 pkt->dts= AV_NOPTS_VALUE;
875
876 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
877 //FIXME Set low_delay = 0 when has_b_frames = 1
878 st->codec->has_b_frames = 1;
879
880 /* do we have a video B-frame ? */
881 delay= st->codec->has_b_frames;
882 presentation_delayed = 0;
883
884 // ignore delay caused by frame threading so that the mpeg2-without-dts
885 // warning will not trigger
886 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME)
887 delay -= st->codec->thread_count-1;
888
889 /* XXX: need has_b_frame, but cannot get it if the codec is
890 not initialized */
891 if (delay &&
892 pc && pc->pict_type != AV_PICTURE_TYPE_B)
893 presentation_delayed = 1;
894
895 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
896 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
897 pkt->dts -= 1LL<<st->pts_wrap_bits;
898 }
899
900 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
901 // we take the conservative approach and discard both
902 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
903 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
904 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
905 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
906 }
907
908 if (pkt->duration == 0) {
909 compute_frame_duration(&num, &den, st, pc, pkt);
910 if (den && num) {
911 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
912
913 if(pkt->duration != 0 && s->packet_buffer)
914 update_initial_durations(s, st, pkt);
915 }
916 }
917
918 /* correct timestamps with byte offset if demuxers only have timestamps
919 on packet boundaries */
920 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
921 /* this will estimate bitrate based on this frame's duration and size */
922 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
923 if(pkt->pts != AV_NOPTS_VALUE)
924 pkt->pts += offset;
925 if(pkt->dts != AV_NOPTS_VALUE)
926 pkt->dts += offset;
927 }
928
929 if (pc && pc->dts_sync_point >= 0) {
930 // we have synchronization info from the parser
931 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
932 if (den > 0) {
933 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
934 if (pkt->dts != AV_NOPTS_VALUE) {
935 // got DTS from the stream, update reference timestamp
936 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
937 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
938 } else if (st->reference_dts != AV_NOPTS_VALUE) {
939 // compute DTS based on reference timestamp
940 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
941 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
942 }
943 if (pc->dts_sync_point > 0)
944 st->reference_dts = pkt->dts; // new reference
945 }
946 }
947
948 /* This may be redundant, but it should not hurt. */
949 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
950 presentation_delayed = 1;
951
952 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
953 /* interpolate PTS and DTS if they are not present */
954 //We skip H264 currently because delay and has_b_frames are not reliably set
955 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
956 if (presentation_delayed) {
957 /* DTS = decompression timestamp */
958 /* PTS = presentation timestamp */
959 if (pkt->dts == AV_NOPTS_VALUE)
960 pkt->dts = st->last_IP_pts;
961 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
962 if (pkt->dts == AV_NOPTS_VALUE)
963 pkt->dts = st->cur_dts;
964
965 /* this is tricky: the dts must be incremented by the duration
966 of the frame we are displaying, i.e. the last I- or P-frame */
967 if (st->last_IP_duration == 0)
968 st->last_IP_duration = pkt->duration;
969 if(pkt->dts != AV_NOPTS_VALUE)
970 st->cur_dts = pkt->dts + st->last_IP_duration;
971 st->last_IP_duration = pkt->duration;
972 st->last_IP_pts= pkt->pts;
973 /* cannot compute PTS if not present (we can compute it only
974 by knowing the future */
975 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
976 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
977 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
978 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
979 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
980 pkt->pts += pkt->duration;
981 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
982 }
983 }
984
985 /* presentation is not delayed : PTS and DTS are the same */
986 if(pkt->pts == AV_NOPTS_VALUE)
987 pkt->pts = pkt->dts;
988 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
989 if(pkt->pts == AV_NOPTS_VALUE)
990 pkt->pts = st->cur_dts;
991 pkt->dts = pkt->pts;
992 if(pkt->pts != AV_NOPTS_VALUE)
993 st->cur_dts = pkt->pts + pkt->duration;
994 }
995 }
996
997 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
998 st->pts_buffer[0]= pkt->pts;
999 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1000 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1001 if(pkt->dts == AV_NOPTS_VALUE)
1002 pkt->dts= st->pts_buffer[0];
1003 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1004 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1005 }
1006 if(pkt->dts > st->cur_dts)
1007 st->cur_dts = pkt->dts;
1008 }
1009
1010 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1011
1012 /* update flags */
1013 if(is_intra_only(st->codec))
1014 pkt->flags |= AV_PKT_FLAG_KEY;
1015 else if (pc) {
1016 pkt->flags = 0;
1017 /* keyframe computation */
1018 if (pc->key_frame == 1)
1019 pkt->flags |= AV_PKT_FLAG_KEY;
1020 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I)
1021 pkt->flags |= AV_PKT_FLAG_KEY;
1022 }
1023 if (pc)
1024 pkt->convergence_duration = pc->convergence_duration;
1025 }
1026
1027
1028 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1029 {
1030 AVStream *st;
1031 int len, ret, i;
1032
1033 av_init_packet(pkt);
1034
1035 for(;;) {
1036 /* select current input stream component */
1037 st = s->cur_st;
1038 if (st) {
1039 if (!st->need_parsing || !st->parser) {
1040 /* no parsing needed: we just output the packet as is */
1041 /* raw data support */
1042 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1043 compute_pkt_fields(s, st, NULL, pkt);
1044 s->cur_st = NULL;
1045 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1046 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1047 ff_reduce_index(s, st->index);
1048 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1049 }
1050 break;
1051 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1052 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1053 st->cur_ptr, st->cur_len,
1054 st->cur_pkt.pts, st->cur_pkt.dts,
1055 st->cur_pkt.pos);
1056 st->cur_pkt.pts = AV_NOPTS_VALUE;
1057 st->cur_pkt.dts = AV_NOPTS_VALUE;
1058 /* increment read pointer */
1059 st->cur_ptr += len;
1060 st->cur_len -= len;
1061
1062 /* return packet if any */
1063 if (pkt->size) {
1064 got_packet:
1065 pkt->duration = 0;
1066 pkt->stream_index = st->index;
1067 pkt->pts = st->parser->pts;
1068 pkt->dts = st->parser->dts;
1069 pkt->pos = st->parser->pos;
1070 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1071 s->cur_st = NULL;
1072 pkt->destruct= st->cur_pkt.destruct;
1073 st->cur_pkt.destruct= NULL;
1074 st->cur_pkt.data = NULL;
1075 assert(st->cur_len == 0);
1076 }else{
1077 pkt->destruct = NULL;
1078 }
1079 compute_pkt_fields(s, st, st->parser, pkt);
1080
1081 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1082 ff_reduce_index(s, st->index);
1083 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1084 0, 0, AVINDEX_KEYFRAME);
1085 }
1086
1087 break;
1088 }
1089 } else {
1090 /* free packet */
1091 av_free_packet(&st->cur_pkt);
1092 s->cur_st = NULL;
1093 }
1094 } else {
1095 AVPacket cur_pkt;
1096 /* read next packet */
1097 ret = av_read_packet(s, &cur_pkt);
1098 if (ret < 0) {
1099 if (ret == AVERROR(EAGAIN))
1100 return ret;
1101 /* return the last frames, if any */
1102 for(i = 0; i < s->nb_streams; i++) {
1103 st = s->streams[i];
1104 if (st->parser && st->need_parsing) {
1105 av_parser_parse2(st->parser, st->codec,
1106 &pkt->data, &pkt->size,
1107 NULL, 0,
1108 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1109 AV_NOPTS_VALUE);
1110 if (pkt->size)
1111 goto got_packet;
1112 }
1113 }
1114 /* no more packets: really terminate parsing */
1115 return ret;
1116 }
1117 st = s->streams[cur_pkt.stream_index];
1118 st->cur_pkt= cur_pkt;
1119
1120 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1121 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1122 st->cur_pkt.pts < st->cur_pkt.dts){
1123 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1124 st->cur_pkt.stream_index,
1125 st->cur_pkt.pts,
1126 st->cur_pkt.dts,
1127 st->cur_pkt.size);
1128 // av_free_packet(&st->cur_pkt);
1129 // return -1;
1130 }
1131
1132 if(s->debug & FF_FDEBUG_TS)
1133 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1134 st->cur_pkt.stream_index,
1135 st->cur_pkt.pts,
1136 st->cur_pkt.dts,
1137 st->cur_pkt.size,
1138 st->cur_pkt.duration,
1139 st->cur_pkt.flags);
1140
1141 s->cur_st = st;
1142 st->cur_ptr = st->cur_pkt.data;
1143 st->cur_len = st->cur_pkt.size;
1144 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1145 st->parser = av_parser_init(st->codec->codec_id);
1146 if (!st->parser) {
1147 /* no parser available: just output the raw packets */
1148 st->need_parsing = AVSTREAM_PARSE_NONE;
1149 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1150 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1151 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1152 st->parser->flags |= PARSER_FLAG_ONCE;
1153 }
1154 }
1155 }
1156 }
1157 if(s->debug & FF_FDEBUG_TS)
1158 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1159 pkt->stream_index,
1160 pkt->pts,
1161 pkt->dts,
1162 pkt->size,
1163 pkt->duration,
1164 pkt->flags);
1165
1166 return 0;
1167 }
1168
1169 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1170 {
1171 AVPacketList *pktl;
1172 int eof=0;
1173 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1174
1175 for(;;){
1176 pktl = s->packet_buffer;
1177 if (pktl) {
1178 AVPacket *next_pkt= &pktl->pkt;
1179
1180 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1181 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1182 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1183 if( pktl->pkt.stream_index == next_pkt->stream_index
1184 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1185 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1186 next_pkt->pts= pktl->pkt.dts;
1187 }
1188 pktl= pktl->next;
1189 }
1190 pktl = s->packet_buffer;
1191 }
1192
1193 if( next_pkt->pts != AV_NOPTS_VALUE
1194 || next_pkt->dts == AV_NOPTS_VALUE
1195 || !genpts || eof){
1196 /* read packet from packet buffer, if there is data */
1197 *pkt = *next_pkt;
1198 s->packet_buffer = pktl->next;
1199 av_free(pktl);
1200 return 0;
1201 }
1202 }
1203 if(genpts){
1204 int ret= av_read_frame_internal(s, pkt);
1205 if(ret<0){
1206 if(pktl && ret != AVERROR(EAGAIN)){
1207 eof=1;
1208 continue;
1209 }else
1210 return ret;
1211 }
1212
1213 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1214 &s->packet_buffer_end)) < 0)
1215 return AVERROR(ENOMEM);
1216 }else{
1217 assert(!s->packet_buffer);
1218 return av_read_frame_internal(s, pkt);
1219 }
1220 }
1221 }
1222
1223 /* XXX: suppress the packet queue */
1224 static void flush_packet_queue(AVFormatContext *s)
1225 {
1226 AVPacketList *pktl;
1227
1228 for(;;) {
1229 pktl = s->packet_buffer;
1230 if (!pktl)
1231 break;
1232 s->packet_buffer = pktl->next;
1233 av_free_packet(&pktl->pkt);
1234 av_free(pktl);
1235 }
1236 while(s->raw_packet_buffer){
1237 pktl = s->raw_packet_buffer;
1238 s->raw_packet_buffer = pktl->next;
1239 av_free_packet(&pktl->pkt);
1240 av_free(pktl);
1241 }
1242 s->packet_buffer_end=
1243 s->raw_packet_buffer_end= NULL;
1244 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1245 }
1246
1247 /*******************************************************/
1248 /* seek support */
1249
1250 int av_find_default_stream_index(AVFormatContext *s)
1251 {
1252 int first_audio_index = -1;
1253 int i;
1254 AVStream *st;
1255
1256 if (s->nb_streams <= 0)
1257 return -1;
1258 for(i = 0; i < s->nb_streams; i++) {
1259 st = s->streams[i];
1260 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1261 return i;
1262 }
1263 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1264 first_audio_index = i;
1265 }
1266 return first_audio_index >= 0 ? first_audio_index : 0;
1267 }
1268
1269 /**
1270 * Flush the frame reader.
1271 */
1272 void ff_read_frame_flush(AVFormatContext *s)
1273 {
1274 AVStream *st;
1275 int i, j;
1276
1277 flush_packet_queue(s);
1278
1279 s->cur_st = NULL;
1280
1281 /* for each stream, reset read state */
1282 for(i = 0; i < s->nb_streams; i++) {
1283 st = s->streams[i];
1284
1285 if (st->parser) {
1286 av_parser_close(st->parser);
1287 st->parser = NULL;
1288 av_free_packet(&st->cur_pkt);
1289 }
1290 st->last_IP_pts = AV_NOPTS_VALUE;
1291 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1292 st->reference_dts = AV_NOPTS_VALUE;
1293 /* fail safe */
1294 st->cur_ptr = NULL;
1295 st->cur_len = 0;
1296
1297 st->probe_packets = MAX_PROBE_PACKETS;
1298
1299 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1300 st->pts_buffer[j]= AV_NOPTS_VALUE;
1301 }
1302 }
1303
1304 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1305 int i;
1306
1307 for(i = 0; i < s->nb_streams; i++) {
1308 AVStream *st = s->streams[i];
1309
1310 st->cur_dts = av_rescale(timestamp,
1311 st->time_base.den * (int64_t)ref_st->time_base.num,
1312 st->time_base.num * (int64_t)ref_st->time_base.den);
1313 }
1314 }
1315
1316 void ff_reduce_index(AVFormatContext *s, int stream_index)
1317 {
1318 AVStream *st= s->streams[stream_index];
1319 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1320
1321 if((unsigned)st->nb_index_entries >= max_entries){
1322 int i;
1323 for(i=0; 2*i<st->nb_index_entries; i++)
1324 st->index_entries[i]= st->index_entries[2*i];
1325 st->nb_index_entries= i;
1326 }
1327 }
1328
1329 int ff_add_index_entry(AVIndexEntry **index_entries,
1330 int *nb_index_entries,
1331 unsigned int *index_entries_allocated_size,
1332 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1333 {
1334 AVIndexEntry *entries, *ie;
1335 int index;
1336
1337 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1338 return -1;
1339
1340 entries = av_fast_realloc(*index_entries,
1341 index_entries_allocated_size,
1342 (*nb_index_entries + 1) *
1343 sizeof(AVIndexEntry));
1344 if(!entries)
1345 return -1;
1346
1347 *index_entries= entries;
1348
1349 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1350
1351 if(index<0){
1352 index= (*nb_index_entries)++;
1353 ie= &entries[index];
1354 assert(index==0 || ie[-1].timestamp < timestamp);
1355 }else{
1356 ie= &entries[index];
1357 if(ie->timestamp != timestamp){
1358 if(ie->timestamp <= timestamp)
1359 return -1;
1360 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1361 (*nb_index_entries)++;
1362 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1363 distance= ie->min_distance;
1364 }
1365
1366 ie->pos = pos;
1367 ie->timestamp = timestamp;
1368 ie->min_distance= distance;
1369 ie->size= size;
1370 ie->flags = flags;
1371
1372 return index;
1373 }
1374
1375 int av_add_index_entry(AVStream *st,
1376 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1377 {
1378 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1379 &st->index_entries_allocated_size, pos,
1380 timestamp, size, distance, flags);
1381 }
1382
1383 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1384 int64_t wanted_timestamp, int flags)
1385 {
1386 int a, b, m;
1387 int64_t timestamp;
1388
1389 a = - 1;
1390 b = nb_entries;
1391
1392 //optimize appending index entries at the end
1393 if(b && entries[b-1].timestamp < wanted_timestamp)
1394 a= b-1;
1395
1396 while (b - a > 1) {
1397 m = (a + b) >> 1;
1398 timestamp = entries[m].timestamp;
1399 if(timestamp >= wanted_timestamp)
1400 b = m;
1401 if(timestamp <= wanted_timestamp)
1402 a = m;
1403 }
1404 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1405
1406 if(!(flags & AVSEEK_FLAG_ANY)){
1407 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1408 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1409 }
1410 }
1411
1412 if(m == nb_entries)
1413 return -1;
1414 return m;
1415 }
1416
1417 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1418 int flags)
1419 {
1420 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1421 wanted_timestamp, flags);
1422 }
1423
1424 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1425 AVInputFormat *avif= s->iformat;
1426 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1427 int64_t ts_min, ts_max, ts;
1428 int index;
1429 int64_t ret;
1430 AVStream *st;
1431
1432 if (stream_index < 0)
1433 return -1;
1434
1435 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1436
1437 ts_max=
1438 ts_min= AV_NOPTS_VALUE;
1439 pos_limit= -1; //gcc falsely says it may be uninitialized
1440
1441 st= s->streams[stream_index];
1442 if(st->index_entries){
1443 AVIndexEntry *e;
1444
1445 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1446 index= FFMAX(index, 0);
1447 e= &st->index_entries[index];
1448
1449 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1450 pos_min= e->pos;
1451 ts_min= e->timestamp;
1452 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1453 pos_min,ts_min);
1454 }else{
1455 assert(index==0);
1456 }
1457
1458 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1459 assert(index < st->nb_index_entries);
1460 if(index >= 0){
1461 e= &st->index_entries[index];
1462 assert(e->timestamp >= target_ts);
1463 pos_max= e->pos;
1464 ts_max= e->timestamp;
1465 pos_limit= pos_max - e->min_distance;
1466 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1467 pos_max,pos_limit, ts_max);
1468 }
1469 }
1470
1471 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1472 if(pos<0)
1473 return -1;
1474
1475 /* do the seek */
1476 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1477 return ret;
1478
1479 av_update_cur_dts(s, st, ts);
1480
1481 return 0;
1482 }
1483
1484 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1485 int64_t pos, ts;
1486 int64_t start_pos, filesize;
1487 int no_change;
1488
1489 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1490
1491 if(ts_min == AV_NOPTS_VALUE){
1492 pos_min = s->data_offset;
1493 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1494 if (ts_min == AV_NOPTS_VALUE)
1495 return -1;
1496 }
1497
1498 if(ts_max == AV_NOPTS_VALUE){
1499 int step= 1024;
1500 filesize = avio_size(s->pb);
1501 pos_max = filesize - 1;
1502 do{
1503 pos_max -= step;
1504 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1505 step += step;
1506 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1507 if (ts_max == AV_NOPTS_VALUE)
1508 return -1;
1509
1510 for(;;){
1511 int64_t tmp_pos= pos_max + 1;
1512 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1513 if(tmp_ts == AV_NOPTS_VALUE)
1514 break;
1515 ts_max= tmp_ts;
1516 pos_max= tmp_pos;
1517 if(tmp_pos >= filesize)
1518 break;
1519 }
1520 pos_limit= pos_max;
1521 }
1522
1523 if(ts_min > ts_max){
1524 return -1;
1525 }else if(ts_min == ts_max){
1526 pos_limit= pos_min;
1527 }
1528
1529 no_change=0;
1530 while (pos_min < pos_limit) {
1531 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1532 pos_min, pos_max, ts_min, ts_max);
1533 assert(pos_limit <= pos_max);
1534
1535 if(no_change==0){
1536 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1537 // interpolate position (better than dichotomy)
1538 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1539 + pos_min - approximate_keyframe_distance;
1540 }else if(no_change==1){
1541 // bisection, if interpolation failed to change min or max pos last time
1542 pos = (pos_min + pos_limit)>>1;
1543 }else{
1544 /* linear search if bisection failed, can only happen if there
1545 are very few or no keyframes between min/max */
1546 pos=pos_min;
1547 }
1548 if(pos <= pos_min)
1549 pos= pos_min + 1;
1550 else if(pos > pos_limit)
1551 pos= pos_limit;
1552 start_pos= pos;
1553
1554 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1555 if(pos == pos_max)
1556 no_change++;
1557 else
1558 no_change=0;
1559 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1560 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1561 pos_limit, start_pos, no_change);
1562 if(ts == AV_NOPTS_VALUE){
1563 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1564 return -1;
1565 }
1566 assert(ts != AV_NOPTS_VALUE);
1567 if (target_ts <= ts) {
1568 pos_limit = start_pos - 1;
1569 pos_max = pos;
1570 ts_max = ts;
1571 }
1572 if (target_ts >= ts) {
1573 pos_min = pos;
1574 ts_min = ts;
1575 }
1576 }
1577
1578 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1579 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1580 pos_min = pos;
1581 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1582 pos_min++;
1583 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1584 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1585 pos, ts_min, target_ts, ts_max);
1586 *ts_ret= ts;
1587 return pos;
1588 }
1589
1590 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1591 int64_t pos_min, pos_max;
1592 #if 0
1593 AVStream *st;
1594
1595 if (stream_index < 0)
1596 return -1;
1597
1598 st= s->streams[stream_index];
1599 #endif
1600
1601 pos_min = s->data_offset;
1602 pos_max = avio_size(s->pb) - 1;
1603
1604 if (pos < pos_min) pos= pos_min;
1605 else if(pos > pos_max) pos= pos_max;
1606
1607 avio_seek(s->pb, pos, SEEK_SET);
1608
1609 #if 0
1610 av_update_cur_dts(s, st, ts);
1611 #endif
1612 return 0;
1613 }
1614
1615 static int av_seek_frame_generic(AVFormatContext *s,
1616 int stream_index, int64_t timestamp, int flags)
1617 {
1618 int index;
1619 int64_t ret;
1620 AVStream *st;
1621 AVIndexEntry *ie;
1622
1623 st = s->streams[stream_index];
1624
1625 index = av_index_search_timestamp(st, timestamp, flags);
1626
1627 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1628 return -1;
1629
1630 if(index < 0 || index==st->nb_index_entries-1){
1631 int i;
1632 AVPacket pkt;
1633
1634 if(st->nb_index_entries){
1635 assert(st->index_entries);
1636 ie= &st->index_entries[st->nb_index_entries-1];
1637 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1638 return ret;
1639 av_update_cur_dts(s, st, ie->timestamp);
1640 }else{
1641 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1642 return ret;
1643 }
1644 for(i=0;; i++) {
1645 int ret;
1646 do{
1647 ret = av_read_frame(s, &pkt);
1648 }while(ret == AVERROR(EAGAIN));
1649 if(ret<0)
1650 break;
1651 av_free_packet(&pkt);
1652 if(stream_index == pkt.stream_index){
1653 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1654 break;
1655 }
1656 }
1657 index = av_index_search_timestamp(st, timestamp, flags);
1658 }
1659 if (index < 0)
1660 return -1;
1661
1662 ff_read_frame_flush(s);
1663 if (s->iformat->read_seek){
1664 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1665 return 0;
1666 }
1667 ie = &st->index_entries[index];
1668 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1669 return ret;
1670 av_update_cur_dts(s, st, ie->timestamp);
1671
1672 return 0;
1673 }
1674
1675 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1676 {
1677 int ret;
1678 AVStream *st;
1679
1680 ff_read_frame_flush(s);
1681
1682 if(flags & AVSEEK_FLAG_BYTE)
1683 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1684
1685 if(stream_index < 0){
1686 stream_index= av_find_default_stream_index(s);
1687 if(stream_index < 0)
1688 return -1;
1689
1690 st= s->streams[stream_index];
1691 /* timestamp for default must be expressed in AV_TIME_BASE units */
1692 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1693 }
1694
1695 /* first, we try the format specific seek */
1696 if (s->iformat->read_seek)
1697 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1698 else
1699 ret = -1;
1700 if (ret >= 0) {
1701 return 0;
1702 }
1703
1704 if(s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH))
1705 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1706 else if (!(s->iformat->flags & AVFMT_NOGENSEARCH))
1707 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1708 else
1709 return -1;
1710 }
1711
1712 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1713 {
1714 if(min_ts > ts || max_ts < ts)
1715 return -1;
1716
1717 ff_read_frame_flush(s);
1718
1719 if (s->iformat->read_seek2)
1720 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1721
1722 if(s->iformat->read_timestamp){
1723 //try to seek via read_timestamp()
1724 }
1725
1726 //Fallback to old API if new is not implemented but old is
1727 //Note the old has somewat different sematics
1728 if(s->iformat->read_seek || 1)
1729 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1730
1731 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1732 }
1733
1734 /*******************************************************/
1735
1736 /**
1737 * Return TRUE if the stream has accurate duration in any stream.
1738 *
1739 * @return TRUE if the stream has accurate duration for at least one component.
1740 */
1741 static int av_has_duration(AVFormatContext *ic)
1742 {
1743 int i;
1744 AVStream *st;
1745
1746 for(i = 0;i < ic->nb_streams; i++) {
1747 st = ic->streams[i];
1748 if (st->duration != AV_NOPTS_VALUE)
1749 return 1;
1750 }
1751 return 0;
1752 }
1753
1754 /**
1755 * Estimate the stream timings from the one of each components.
1756 *
1757 * Also computes the global bitrate if possible.
1758 */
1759 static void av_update_stream_timings(AVFormatContext *ic)
1760 {
1761 int64_t start_time, start_time1, end_time, end_time1;
1762 int64_t duration, duration1;
1763 int i;
1764 AVStream *st;
1765
1766 start_time = INT64_MAX;
1767 end_time = INT64_MIN;
1768 duration = INT64_MIN;
1769 for(i = 0;i < ic->nb_streams; i++) {
1770 st = ic->streams[i];
1771 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1772 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1773 if (start_time1 < start_time)
1774 start_time = start_time1;
1775 if (st->duration != AV_NOPTS_VALUE) {
1776 end_time1 = start_time1
1777 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1778 if (end_time1 > end_time)
1779 end_time = end_time1;
1780 }
1781 }
1782 if (st->duration != AV_NOPTS_VALUE) {
1783 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1784 if (duration1 > duration)
1785 duration = duration1;
1786 }
1787 }
1788 if (start_time != INT64_MAX) {
1789 ic->start_time = start_time;
1790 if (end_time != INT64_MIN) {
1791 if (end_time - start_time > duration)
1792 duration = end_time - start_time;
1793 }
1794 }
1795 if (duration != INT64_MIN) {
1796 ic->duration = duration;
1797 if (ic->file_size > 0) {
1798 /* compute the bitrate */
1799 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1800 (double)ic->duration;
1801 }
1802 }
1803 }
1804
1805 static void fill_all_stream_timings(AVFormatContext *ic)
1806 {
1807 int i;
1808 AVStream *st;
1809
1810 av_update_stream_timings(ic);
1811 for(i = 0;i < ic->nb_streams; i++) {
1812 st = ic->streams[i];
1813 if (st->start_time == AV_NOPTS_VALUE) {
1814 if(ic->start_time != AV_NOPTS_VALUE)
1815 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1816 if(ic->duration != AV_NOPTS_VALUE)
1817 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1818 }
1819 }
1820 }
1821
1822 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1823 {
1824 int64_t filesize, duration;
1825 int bit_rate, i;
1826 AVStream *st;
1827
1828 /* if bit_rate is already set, we believe it */
1829 if (ic->bit_rate <= 0) {
1830 bit_rate = 0;
1831 for(i=0;i<ic->nb_streams;i++) {
1832 st = ic->streams[i];
1833 if (st->codec->bit_rate > 0)
1834 bit_rate += st->codec->bit_rate;
1835 }
1836 ic->bit_rate = bit_rate;
1837 }
1838
1839 /* if duration is already set, we believe it */
1840 if (ic->duration == AV_NOPTS_VALUE &&
1841 ic->bit_rate != 0 &&
1842 ic->file_size != 0) {
1843 filesize = ic->file_size;
1844 if (filesize > 0) {
1845 for(i = 0; i < ic->nb_streams; i++) {
1846 st = ic->streams[i];
1847 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1848 if (st->duration == AV_NOPTS_VALUE)
1849 st->duration = duration;
1850 }
1851 }
1852 }
1853 }
1854
1855 #define DURATION_MAX_READ_SIZE 250000
1856 #define DURATION_MAX_RETRY 3
1857
1858 /* only usable for MPEG-PS streams */
1859 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1860 {
1861 AVPacket pkt1, *pkt = &pkt1;
1862 AVStream *st;
1863 int read_size, i, ret;
1864 int64_t end_time;
1865 int64_t filesize, offset, duration;
1866 int retry=0;
1867
1868 ic->cur_st = NULL;
1869
1870 /* flush packet queue */
1871 flush_packet_queue(ic);
1872
1873 for (i=0; i<ic->nb_streams; i++) {
1874 st = ic->streams[i];
1875 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1876 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1877
1878 if (st->parser) {
1879 av_parser_close(st->parser);
1880 st->parser= NULL;
1881 av_free_packet(&st->cur_pkt);
1882 }
1883 }
1884
1885 /* estimate the end time (duration) */
1886 /* XXX: may need to support wrapping */
1887 filesize = ic->file_size;
1888 end_time = AV_NOPTS_VALUE;
1889 do{
1890 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1891 if (offset < 0)
1892 offset = 0;
1893
1894 avio_seek(ic->pb, offset, SEEK_SET);
1895 read_size = 0;
1896 for(;;) {
1897 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1898 break;
1899
1900 do{
1901 ret = av_read_packet(ic, pkt);
1902 }while(ret == AVERROR(EAGAIN));
1903 if (ret != 0)
1904 break;
1905 read_size += pkt->size;
1906 st = ic->streams[pkt->stream_index];
1907 if (pkt->pts != AV_NOPTS_VALUE &&
1908 (st->start_time != AV_NOPTS_VALUE ||
1909 st->first_dts != AV_NOPTS_VALUE)) {
1910 duration = end_time = pkt->pts;
1911 if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time;
1912 else duration -= st->first_dts;
1913 if (duration < 0)
1914 duration += 1LL<<st->pts_wrap_bits;
1915 if (duration > 0) {
1916 if (st->duration == AV_NOPTS_VALUE ||
1917 st->duration < duration)
1918 st->duration = duration;
1919 }
1920 }
1921 av_free_packet(pkt);
1922 }
1923 }while( end_time==AV_NOPTS_VALUE
1924 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1925 && ++retry <= DURATION_MAX_RETRY);
1926
1927 fill_all_stream_timings(ic);
1928
1929 avio_seek(ic->pb, old_offset, SEEK_SET);
1930 for (i=0; i<ic->nb_streams; i++) {
1931 st= ic->streams[i];
1932 st->cur_dts= st->first_dts;
1933 st->last_IP_pts = AV_NOPTS_VALUE;
1934 st->reference_dts = AV_NOPTS_VALUE;
1935 }
1936 }
1937
1938 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1939 {
1940 int64_t file_size;
1941
1942 /* get the file size, if possible */
1943 if (ic->iformat->flags & AVFMT_NOFILE) {
1944 file_size = 0;
1945 } else {
1946 file_size = avio_size(ic->pb);
1947 if (file_size < 0)
1948 file_size = 0;
1949 }
1950 ic->file_size = file_size;
1951
1952 if ((!strcmp(ic->iformat->name, "mpeg") ||
1953 !strcmp(ic->iformat->name, "mpegts")) &&
1954 file_size && ic->pb->seekable) {
1955 /* get accurate estimate from the PTSes */
1956 av_estimate_timings_from_pts(ic, old_offset);
1957 } else if (av_has_duration(ic)) {
1958 /* at least one component has timings - we use them for all
1959 the components */
1960 fill_all_stream_timings(ic);
1961 } else {
1962 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
1963 /* less precise: use bitrate info */
1964 av_estimate_timings_from_bit_rate(ic);
1965 }
1966 av_update_stream_timings(ic);
1967
1968 {
1969 int i;
1970 AVStream av_unused *st;
1971 for(i = 0;i < ic->nb_streams; i++) {
1972 st = ic->streams[i];
1973 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
1974 (double) st->start_time / AV_TIME_BASE,
1975 (double) st->duration / AV_TIME_BASE);
1976 }
1977 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1978 (double) ic->start_time / AV_TIME_BASE,
1979 (double) ic->duration / AV_TIME_BASE,
1980 ic->bit_rate / 1000);
1981 }
1982 }
1983
1984 static int has_codec_parameters(AVCodecContext *enc)
1985 {
1986 int val;
1987 switch(enc->codec_type) {
1988 case AVMEDIA_TYPE_AUDIO:
1989 val = enc->sample_rate && enc->channels && enc->sample_fmt != AV_SAMPLE_FMT_NONE;
1990 if(!enc->frame_size &&
1991 (enc->codec_id == CODEC_ID_VORBIS ||
1992 enc->codec_id == CODEC_ID_AAC ||
1993 enc->codec_id == CODEC_ID_MP1 ||
1994 enc->codec_id == CODEC_ID_MP2 ||
1995 enc->codec_id == CODEC_ID_MP3 ||
1996 enc->codec_id == CODEC_ID_SPEEX))
1997 return 0;
1998 break;
1999 case AVMEDIA_TYPE_VIDEO:
2000 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
2001 break;
2002 default:
2003 val = 1;
2004 break;
2005 }
2006 return enc->codec_id != CODEC_ID_NONE && val != 0;
2007 }
2008
2009 static int has_decode_delay_been_guessed(AVStream *st)
2010 {
2011 return st->codec->codec_id != CODEC_ID_H264 ||
2012 st->codec_info_nb_frames >= 6 + st->codec->has_b_frames;
2013 }
2014
2015 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
2016 {
2017 int16_t *samples;
2018 AVCodec *codec;
2019 int got_picture, data_size, ret=0;
2020 AVFrame picture;
2021
2022 if(!st->codec->codec){
2023 codec = avcodec_find_decoder(st->codec->codec_id);
2024 if (!codec)
2025 return -1;
2026 ret = avcodec_open(st->codec, codec);
2027 if (ret < 0)
2028 return ret;
2029 }
2030
2031 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){
2032 switch(st->codec->codec_type) {
2033 case AVMEDIA_TYPE_VIDEO:
2034 avcodec_get_frame_defaults(&picture);
2035 ret = avcodec_decode_video2(st->codec, &picture,
2036 &got_picture, avpkt);
2037 break;
2038 case AVMEDIA_TYPE_AUDIO:
2039 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2040 samples = av_malloc(data_size);
2041 if (!samples)
2042 goto fail;
2043 ret = avcodec_decode_audio3(st->codec, samples,
2044 &data_size, avpkt);
2045 av_free(samples);
2046 break;
2047 default:
2048 break;
2049 }
2050 }
2051 fail:
2052 return ret;
2053 }
2054
2055 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2056 {
2057 while (tags->id != CODEC_ID_NONE) {
2058 if (tags->id == id)
2059 return tags->tag;
2060 tags++;
2061 }
2062 return 0;
2063 }
2064
2065 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2066 {
2067 int i;
2068 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2069 if(tag == tags[i].tag)
2070 return tags[i].id;
2071 }
2072 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2073 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag))
2074 return tags[i].id;
2075 }
2076 return CODEC_ID_NONE;
2077 }
2078
2079 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2080 {
2081 int i;
2082 for(i=0; tags && tags[i]; i++){
2083 int tag= ff_codec_get_tag(tags[i], id);
2084 if(tag) return tag;
2085 }
2086 return 0;
2087 }
2088
2089 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2090 {
2091 int i;
2092 for(i=0; tags && tags[i]; i++){
2093 enum CodecID id= ff_codec_get_id(tags[i], tag);
2094 if(id!=CODEC_ID_NONE) return id;
2095 }
2096 return CODEC_ID_NONE;
2097 }
2098
2099 static void compute_chapters_end(AVFormatContext *s)
2100 {
2101 unsigned int i, j;
2102 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2103
2104 for (i = 0; i < s->nb_chapters; i++)
2105 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2106 AVChapter *ch = s->chapters[i];
2107 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2108 : INT64_MAX;
2109
2110 for (j = 0; j < s->nb_chapters; j++) {
2111 AVChapter *ch1 = s->chapters[j];
2112 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2113 if (j != i && next_start > ch->start && next_start < end)
2114 end = next_start;
2115 }
2116 ch->end = (end == INT64_MAX) ? ch->start : end;
2117 }
2118 }
2119
2120 static int get_std_framerate(int i){
2121 if(i<60*12) return i*1001;
2122 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2123 }
2124
2125 /*
2126 * Is the time base unreliable.
2127 * This is a heuristic to balance between quick acceptance of the values in
2128 * the headers vs. some extra checks.
2129 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2130 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2131 * And there are "variable" fps files this needs to detect as well.
2132 */
2133 static int tb_unreliable(AVCodecContext *c){
2134 if( c->time_base.den >= 101L*c->time_base.num
2135 || c->time_base.den < 5L*c->time_base.num
2136 /* || c->codec_tag == AV_RL32("DIVX")
2137 || c->codec_tag == AV_RL32("XVID")*/
2138 || c->codec_id == CODEC_ID_MPEG2VIDEO
2139 || c->codec_id == CODEC_ID_H264
2140 )
2141 return 1;
2142 return 0;
2143 }
2144
2145 int av_find_stream_info(AVFormatContext *ic)
2146 {
2147 int i, count, ret, read_size, j;
2148 AVStream *st;
2149 AVPacket pkt1, *pkt;
2150 int64_t old_offset = avio_tell(ic->pb);
2151
2152 for(i=0;i<ic->nb_streams;i++) {
2153 AVCodec *codec;
2154 st = ic->streams[i];
2155 if (st->codec->codec_id == CODEC_ID_AAC) {
2156 st->codec->sample_rate = 0;
2157 st->codec->frame_size = 0;
2158 st->codec->channels = 0;
2159 }
2160 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2161 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2162 /* if(!st->time_base.num)
2163 st->time_base= */
2164 if(!st->codec->time_base.num)
2165 st->codec->time_base= st->time_base;
2166 }
2167 //only for the split stuff
2168 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2169 st->parser = av_parser_init(st->codec->codec_id);
2170 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2171 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2172 }
2173 }
2174 assert(!st->codec->codec);
2175 codec = avcodec_find_decoder(st->codec->codec_id);
2176
2177 /* Force decoding of at least one frame of codec data
2178 * this makes sure the codec initializes the channel configuration
2179 * and does not trust the values from the container.
2180 */
2181 if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF)
2182 st->codec->channels = 0;
2183
2184 /* Ensure that subtitle_header is properly set. */
2185 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2186 && codec && !st->codec->codec)
2187 avcodec_open(st->codec, codec);
2188
2189 //try to just open decoders, in case this is enough to get parameters
2190 if(!has_codec_parameters(st->codec)){
2191 if (codec && !st->codec->codec)
2192 avcodec_open(st->codec, codec);
2193 }
2194 }
2195
2196 for (i=0; i<ic->nb_streams; i++) {
2197 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2198 }
2199
2200 count = 0;
2201 read_size = 0;
2202 for(;;) {
2203 if(url_interrupt_cb()){
2204 ret= AVERROR_EXIT;
2205 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2206 break;
2207 }
2208
2209 /* check if one codec still needs to be handled */
2210 for(i=0;i<ic->nb_streams;i++) {
2211 int fps_analyze_framecount = 20;
2212
2213 st = ic->streams[i];
2214 if (!has_codec_parameters(st->codec))
2215 break;
2216 /* if the timebase is coarse (like the usual millisecond precision
2217 of mkv), we need to analyze more frames to reliably arrive at
2218 the correct fps */
2219 if (av_q2d(st->time_base) > 0.0005)
2220 fps_analyze_framecount *= 2;
2221 if (ic->fps_probe_size >= 0)
2222 fps_analyze_framecount = ic->fps_probe_size;
2223 /* variable fps and no guess at the real fps */
2224 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2225 && st->info->duration_count < fps_analyze_framecount
2226 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2227 break;
2228 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2229 break;
2230 if(st->first_dts == AV_NOPTS_VALUE)
2231 break;
2232 }
2233 if (i == ic->nb_streams) {
2234 /* NOTE: if the format has no header, then we need to read
2235 some packets to get most of the streams, so we cannot
2236 stop here */
2237 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2238 /* if we found the info for all the codecs, we can stop */
2239 ret = count;
2240 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2241 break;
2242 }
2243 }
2244 /* we did not get all the codec info, but we read too much data */
2245 if (read_size >= ic->probesize) {
2246 ret = count;
2247 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2248 break;
2249 }
2250
2251 /* NOTE: a new stream can be added there if no header in file
2252 (AVFMTCTX_NOHEADER) */
2253 ret = av_read_frame_internal(ic, &pkt1);
2254 if (ret < 0 && ret != AVERROR(EAGAIN)) {
2255 /* EOF or error */
2256 ret = -1; /* we could not have all the codec parameters before EOF */
2257 for(i=0;i<ic->nb_streams;i++) {
2258 st = ic->streams[i];
2259 if (!has_codec_parameters(st->codec)){
2260 char buf[256];
2261 avcodec_string(buf, sizeof(buf), st->codec, 0);
2262 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2263 } else {
2264 ret = 0;
2265 }
2266 }
2267 break;
2268 }
2269
2270 if (ret == AVERROR(EAGAIN))
2271 continue;
2272
2273 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2274 if ((ret = av_dup_packet(pkt)) < 0)
2275 goto find_stream_info_err;
2276
2277 read_size += pkt->size;
2278
2279 st = ic->streams[pkt->stream_index];
2280 if (st->codec_info_nb_frames>1) {
2281 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2282 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2283 break;
2284 }
2285 st->info->codec_info_duration += pkt->duration;
2286 }
2287 {
2288 int64_t last = st->info->last_dts;
2289 int64_t duration= pkt->dts - last;
2290
2291 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2292 double dur= duration * av_q2d(st->time_base);
2293
2294 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2295 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2296 if (st->info->duration_count < 2)
2297 memset(st->info->duration_error, 0, sizeof(st->info->duration_error));
2298 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) {
2299 int framerate= get_std_framerate(i);
2300 int ticks= lrintf(dur*framerate/(1001*12));
2301 double error= dur - ticks*1001*12/(double)framerate;
2302 st->info->duration_error[i] += error*error;
2303 }
2304 st->info->duration_count++;
2305 // ignore the first 4 values, they might have some random jitter
2306 if (st->info->duration_count > 3)
2307 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2308 }
2309 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1)
2310 st->info->last_dts = pkt->dts;
2311 }
2312 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2313 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2314 if(i){
2315 st->codec->extradata_size= i;
2316 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2317 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2318 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2319 }
2320 }
2321
2322 /* if still no information, we try to open the codec and to
2323 decompress the frame. We try to avoid that in most cases as
2324 it takes longer and uses more memory. For MPEG-4, we need to
2325 decompress for QuickTime. */
2326 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st))
2327 try_decode_frame(st, pkt);
2328
2329 st->codec_info_nb_frames++;
2330 count++;
2331 }
2332
2333 // close codecs which were opened in try_decode_frame()
2334 for(i=0;i<ic->nb_streams;i++) {
2335 st = ic->streams[i];
2336 if(st->codec->codec)
2337 avcodec_close(st->codec);
2338 }
2339 for(i=0;i<ic->nb_streams;i++) {
2340 st = ic->streams[i];
2341 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
2342 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2343 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2344 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
2345 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2346 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2347 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2348
2349 // the check for tb_unreliable() is not completely correct, since this is not about handling
2350 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2351 // ipmovie.c produces.
2352 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num)
2353 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2354 if (st->info->duration_count && !st->r_frame_rate.num
2355 && tb_unreliable(st->codec) /*&&
2356 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2357 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){
2358 int num = 0;
2359 double best_error= 2*av_q2d(st->time_base);
2360 best_error = best_error*best_error*st->info->duration_count*1000*12*30;
2361
2362 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) {
2363 double error = st->info->duration_error[j] * get_std_framerate(j);
2364 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2365 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2366 if(error < best_error){
2367 best_error= error;
2368 num = get_std_framerate(j);
2369 }
2370 }
2371 // do not increase frame rate by more than 1 % in order to match a standard rate.
2372 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2373 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2374 }
2375
2376 if (!st->r_frame_rate.num){
2377 if( st->codec->time_base.den * (int64_t)st->time_base.num
2378 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2379 st->r_frame_rate.num = st->codec->time_base.den;
2380 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2381 }else{
2382 st->r_frame_rate.num = st->time_base.den;
2383 st->r_frame_rate.den = st->time_base.num;
2384 }
2385 }
2386 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2387 if(!st->codec->bits_per_coded_sample)
2388 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2389 // set stream disposition based on audio service type
2390 switch (st->codec->audio_service_type) {
2391 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2392 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2393 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2394 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2395 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2396 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2397 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2398 st->disposition = AV_DISPOSITION_COMMENT; break;
2399 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2400 st->disposition = AV_DISPOSITION_KARAOKE; break;
2401 }
2402 }
2403 }
2404
2405 av_estimate_timings(ic, old_offset);
2406
2407 compute_chapters_end(ic);
2408
2409 #if 0
2410 /* correct DTS for B-frame streams with no timestamps */
2411 for(i=0;i<ic->nb_streams;i++) {
2412 st = ic->streams[i];
2413 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2414 if(b-frames){
2415 ppktl = &ic->packet_buffer;
2416 while(ppkt1){
2417 if(ppkt1->stream_index != i)
2418 continue;
2419 if(ppkt1->pkt->dts < 0)
2420 break;
2421 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2422 break;
2423 ppkt1->pkt->dts -= delta;
2424 ppkt1= ppkt1->next;
2425 }
2426 if(ppkt1)
2427 continue;
2428 st->cur_dts -= delta;
2429 }
2430 }
2431 }
2432 #endif
2433
2434 find_stream_info_err:
2435 for (i=0; i < ic->nb_streams; i++)
2436 av_freep(&ic->streams[i]->info);
2437 return ret;
2438 }
2439
2440 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2441 {
2442 int i, j;
2443
2444 for (i = 0; i < ic->nb_programs; i++)
2445 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2446 if (ic->programs[i]->stream_index[j] == s)
2447 return ic->programs[i];
2448 return NULL;
2449 }
2450
2451 int av_find_best_stream(AVFormatContext *ic,
2452 enum AVMediaType type,
2453 int wanted_stream_nb,
2454 int related_stream,
2455 AVCodec **decoder_ret,
2456 int flags)
2457 {
2458 int i, nb_streams = ic->nb_streams;
2459 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2460 unsigned *program = NULL;
2461 AVCodec *decoder = NULL, *best_decoder = NULL;
2462
2463 if (related_stream >= 0 && wanted_stream_nb < 0) {
2464 AVProgram *p = find_program_from_stream(ic, related_stream);
2465 if (p) {
2466 program = p->stream_index;
2467 nb_streams = p->nb_stream_indexes;
2468 }
2469 }
2470 for (i = 0; i < nb_streams; i++) {
2471 int real_stream_index = program ? program[i] : i;
2472 AVStream *st = ic->streams[real_stream_index];
2473 AVCodecContext *avctx = st->codec;
2474 if (avctx->codec_type != type)
2475 continue;
2476 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2477 continue;
2478 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2479 continue;
2480 if (decoder_ret) {
2481 decoder = avcodec_find_decoder(st->codec->codec_id);
2482 if (!decoder) {
2483 if (ret < 0)
2484 ret = AVERROR_DECODER_NOT_FOUND;
2485 continue;
2486 }
2487 }
2488 if (best_count >= st->codec_info_nb_frames)
2489 continue;
2490 best_count = st->codec_info_nb_frames;
2491 ret = real_stream_index;
2492 best_decoder = decoder;
2493 if (program && i == nb_streams - 1 && ret < 0) {
2494 program = NULL;
2495 nb_streams = ic->nb_streams;
2496 i = 0; /* no related stream found, try again with everything */
2497 }
2498 }
2499 if (decoder_ret)
2500 *decoder_ret = best_decoder;
2501 return ret;
2502 }
2503
2504 /*******************************************************/
2505
2506 int av_read_play(AVFormatContext *s)
2507 {
2508 if (s->iformat->read_play)
2509 return s->iformat->read_play(s);
2510 if (s->pb)
2511 return avio_pause(s->pb, 0);
2512 return AVERROR(ENOSYS);
2513 }
2514
2515 int av_read_pause(AVFormatContext *s)
2516 {
2517 if (s->iformat->read_pause)
2518 return s->iformat->read_pause(s);
2519 if (s->pb)
2520 return avio_pause(s->pb, 1);
2521 return AVERROR(ENOSYS);
2522 }
2523
2524 void av_close_input_stream(AVFormatContext *s)
2525 {
2526 flush_packet_queue(s);
2527 if (s->iformat->read_close)
2528 s->iformat->read_close(s);
2529 avformat_free_context(s);
2530 }
2531
2532 void avformat_free_context(AVFormatContext *s)
2533 {
2534 int i;
2535 AVStream *st;
2536
2537 av_opt_free(s);
2538 if (s->iformat && s->iformat->priv_class && s->priv_data)
2539 av_opt_free(s->priv_data);
2540
2541 for(i=0;i<s->nb_streams;i++) {
2542 /* free all data in a stream component */
2543 st = s->streams[i];
2544 if (st->parser) {
2545 av_parser_close(st->parser);
2546 av_free_packet(&st->cur_pkt);
2547 }
2548 av_dict_free(&st->metadata);
2549 av_free(st->index_entries);
2550 av_free(st->codec->extradata);
2551 av_free(st->codec->subtitle_header);
2552 av_free(st->codec);
2553 av_free(st->priv_data);
2554 av_free(st->info);
2555 av_free(st);
2556 }
2557 for(i=s->nb_programs-1; i>=0; i--) {
2558 av_dict_free(&s->programs[i]->metadata);
2559 av_freep(&s->programs[i]->stream_index);
2560 av_freep(&s->programs[i]);
2561 }
2562 av_freep(&s->programs);
2563 av_freep(&s->priv_data);
2564 while(s->nb_chapters--) {
2565 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2566 av_free(s->chapters[s->nb_chapters]);
2567 }
2568 av_freep(&s->chapters);
2569 av_dict_free(&s->metadata);
2570 av_freep(&s->streams);
2571 av_free(s);
2572 }
2573
2574 void av_close_input_file(AVFormatContext *s)
2575 {
2576 AVIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2577 av_close_input_stream(s);
2578 if (pb)
2579 avio_close(pb);
2580 }
2581
2582 AVStream *av_new_stream(AVFormatContext *s, int id)
2583 {
2584 AVStream *st;
2585 int i;
2586 AVStream **streams;
2587
2588 if (s->nb_streams >= INT_MAX/sizeof(*streams))
2589 return NULL;
2590 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
2591 if (!streams)
2592 return NULL;
2593 s->streams = streams;
2594
2595 st = av_mallocz(sizeof(AVStream));
2596 if (!st)
2597 return NULL;
2598 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2599 av_free(st);
2600 return NULL;
2601 }
2602
2603 st->codec= avcodec_alloc_context();
2604 if (s->iformat) {
2605 /* no default bitrate if decoding */
2606 st->codec->bit_rate = 0;
2607 }
2608 st->index = s->nb_streams;
2609 st->id = id;
2610 st->start_time = AV_NOPTS_VALUE;
2611 st->duration = AV_NOPTS_VALUE;
2612 /* we set the current DTS to 0 so that formats without any timestamps
2613 but durations get some timestamps, formats with some unknown
2614 timestamps have their first few packets buffered and the
2615 timestamps corrected before they are returned to the user */
2616 st->cur_dts = 0;
2617 st->first_dts = AV_NOPTS_VALUE;
2618 st->probe_packets = MAX_PROBE_PACKETS;
2619
2620 /* default pts setting is MPEG-like */
2621 av_set_pts_info(st, 33, 1, 90000);
2622 st->last_IP_pts = AV_NOPTS_VALUE;
2623 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2624 st->pts_buffer[i]= AV_NOPTS_VALUE;
2625 st->reference_dts = AV_NOPTS_VALUE;
2626
2627 st->sample_aspect_ratio = (AVRational){0,1};
2628
2629 s->streams[s->nb_streams++] = st;
2630 return st;
2631 }
2632
2633 AVProgram *av_new_program(AVFormatContext *ac, int id)
2634 {
2635 AVProgram *program=NULL;
2636 int i;
2637
2638 av_dlog(ac, "new_program: id=0x%04x\n", id);
2639
2640 for(i=0; i<ac->nb_programs; i++)
2641 if(ac->programs[i]->id == id)
2642 program = ac->programs[i];
2643
2644 if(!program){
2645 program = av_mallocz(sizeof(AVProgram));
2646 if (!program)
2647 return NULL;
2648 dynarray_add(&ac->programs, &ac->nb_programs, program);
2649 program->discard = AVDISCARD_NONE;
2650 }
2651 program->id = id;
2652
2653 return program;
2654 }
2655
2656 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2657 {
2658 AVChapter *chapter = NULL;
2659 int i;
2660
2661 for(i=0; i<s->nb_chapters; i++)
2662 if(s->chapters[i]->id == id)
2663 chapter = s->chapters[i];
2664
2665 if(!chapter){
2666 chapter= av_mallocz(sizeof(AVChapter));
2667 if(!chapter)
2668 return NULL;
2669 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2670 }
2671 av_dict_set(&chapter->metadata, "title", title, 0);
2672 chapter->id = id;
2673 chapter->time_base= time_base;
2674 chapter->start = start;
2675 chapter->end = end;
2676
2677 return chapter;
2678 }
2679
2680 /************************************************************/
2681 /* output media file */
2682
2683 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2684 {
2685 int ret;
2686
2687 if (s->oformat->priv_data_size > 0) {
2688 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2689 if (!s->priv_data)
2690 return AVERROR(ENOMEM);
2691 if (s->oformat->priv_class) {
2692 *(const AVClass**)s->priv_data= s->oformat->priv_class;
2693 av_opt_set_defaults(s->priv_data);
2694 }
2695 } else
2696 s->priv_data = NULL;
2697
2698 if (s->oformat->set_parameters) {
2699 ret = s->oformat->set_parameters(s, ap);
2700 if (ret < 0)
2701 return ret;
2702 }
2703 return 0;
2704 }
2705
2706 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2707 {
2708 const AVCodecTag *avctag;
2709 int n;
2710 enum CodecID id = CODEC_ID_NONE;
2711 unsigned int tag = 0;
2712
2713 /**
2714 * Check that tag + id is in the table
2715 * If neither is in the table -> OK
2716 * If tag is in the table with another id -> FAIL
2717 * If id is in the table with another tag -> FAIL unless strict < normal
2718 */
2719 for (n = 0; s->oformat->codec_tag[n]; n++) {
2720 avctag = s->oformat->codec_tag[n];
2721 while (avctag->id != CODEC_ID_NONE) {
2722 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) {
2723 id = avctag->id;
2724 if (id == st->codec->codec_id)
2725 return 1;
2726 }
2727 if (avctag->id == st->codec->codec_id)
2728 tag = avctag->tag;
2729 avctag++;
2730 }
2731 }
2732 if (id != CODEC_ID_NONE)
2733 return 0;
2734 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2735 return 0;
2736 return 1;
2737 }
2738
2739 int av_write_header(AVFormatContext *s)
2740 {
2741 int ret, i;
2742 AVStream *st;
2743
2744 // some sanity checks
2745 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) {
2746 av_log(s, AV_LOG_ERROR, "no streams\n");
2747 return AVERROR(EINVAL);
2748 }
2749
2750 for(i=0;i<s->nb_streams;i++) {
2751 st = s->streams[i];
2752
2753 switch (st->codec->codec_type) {
2754 case AVMEDIA_TYPE_AUDIO:
2755 if(st->codec->sample_rate<=0){
2756 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2757 return AVERROR(EINVAL);
2758 }
2759 if(!st->codec->block_align)
2760 st->codec->block_align = st->codec->channels *
2761 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2762 break;
2763 case AVMEDIA_TYPE_VIDEO:
2764 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2765 av_log(s, AV_LOG_ERROR, "time base not set\n");
2766 return AVERROR(EINVAL);
2767 }
2768 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2769 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2770 return AVERROR(EINVAL);
2771 }
2772 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2773 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2774 return AVERROR(EINVAL);
2775 }
2776 break;
2777 }
2778
2779 if(s->oformat->codec_tag){
2780 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2781 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2782 st->codec->codec_tag= 0;
2783 }
2784 if(st->codec->codec_tag){
2785 if (!validate_codec_tag(s, st)) {
2786 char tagbuf[32];
2787 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2788 av_log(s, AV_LOG_ERROR,
2789 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2790 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2791 return AVERROR_INVALIDDATA;
2792 }
2793 }else
2794 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2795 }
2796
2797 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2798 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2799 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2800 }
2801
2802 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2803 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2804 if (!s->priv_data)
2805 return AVERROR(ENOMEM);
2806 }
2807
2808 /* set muxer identification string */
2809 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2810 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
2811 }
2812
2813 if(s->oformat->write_header){
2814 ret = s->oformat->write_header(s);
2815 if (ret < 0)
2816 return ret;
2817 }
2818
2819 /* init PTS generation */
2820 for(i=0;i<s->nb_streams;i++) {
2821 int64_t den = AV_NOPTS_VALUE;
2822 st = s->streams[i];
2823
2824 switch (st->codec->codec_type) {
2825 case AVMEDIA_TYPE_AUDIO:
2826 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2827 break;
2828 case AVMEDIA_TYPE_VIDEO:
2829 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2830 break;
2831 default:
2832 break;
2833 }
2834 if (den != AV_NOPTS_VALUE) {
2835 if (den <= 0)
2836 return AVERROR_INVALIDDATA;
2837 av_frac_init(&st->pts, 0, 0, den);
2838 }
2839 }
2840 return 0;
2841 }
2842
2843 //FIXME merge with compute_pkt_fields
2844 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2845 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2846 int num, den, frame_size, i;
2847
2848 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n",
2849 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2850
2851 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2852 return AVERROR(EINVAL);*/
2853
2854 /* duration field */
2855 if (pkt->duration == 0) {
2856 compute_frame_duration(&num, &den, st, NULL, pkt);
2857 if (den && num) {
2858 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2859 }
2860 }
2861
2862 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2863 pkt->pts= pkt->dts;
2864
2865 //XXX/FIXME this is a temporary hack until all encoders output pts
2866 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2867 pkt->dts=
2868 // pkt->pts= st->cur_dts;
2869 pkt->pts= st->pts.val;
2870 }
2871
2872 //calculate dts from pts
2873 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2874 st->pts_buffer[0]= pkt->pts;
2875 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2876 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2877 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2878 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2879
2880 pkt->dts= st->pts_buffer[0];
2881 }
2882
2883 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2884 av_log(s, AV_LOG_ERROR,
2885 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
2886 st->index, st->cur_dts, pkt->dts);
2887 return AVERROR(EINVAL);
2888 }
2889 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2890 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index);
2891 return AVERROR(EINVAL);
2892 }
2893
2894 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2895 st->cur_dts= pkt->dts;
2896 st->pts.val= pkt->dts;
2897
2898 /* update pts */
2899 switch (st->codec->codec_type) {
2900 case AVMEDIA_TYPE_AUDIO:
2901 frame_size = get_audio_frame_size(st->codec, pkt->size);
2902
2903 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2904 likely equal to the encoder delay, but it would be better if we
2905 had the real timestamps from the encoder */
2906 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2907 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2908 }
2909 break;
2910 case AVMEDIA_TYPE_VIDEO:
2911 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2912 break;
2913 default:
2914 break;
2915 }
2916 return 0;
2917 }
2918
2919 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2920 {
2921 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2922
2923 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2924 return ret;
2925
2926 ret= s->oformat->write_packet(s, pkt);
2927 return ret;
2928 }
2929
2930 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2931 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2932 {
2933 AVPacketList **next_point, *this_pktl;
2934
2935 this_pktl = av_mallocz(sizeof(AVPacketList));
2936 this_pktl->pkt= *pkt;
2937 pkt->destruct= NULL; // do not free original but only the copy
2938 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2939
2940 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
2941 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
2942 }else
2943 next_point = &s->packet_buffer;
2944
2945 if(*next_point){
2946 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
2947 while(!compare(s, &(*next_point)->pkt, pkt)){
2948 next_point= &(*next_point)->next;
2949 }
2950 goto next_non_null;
2951 }else{
2952 next_point = &(s->packet_buffer_end->next);
2953 }
2954 }
2955 assert(!*next_point);
2956
2957 s->packet_buffer_end= this_pktl;
2958 next_non_null:
2959
2960 this_pktl->next= *next_point;
2961
2962 s->streams[pkt->stream_index]->last_in_packet_buffer=
2963 *next_point= this_pktl;
2964 }
2965
2966 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2967 {
2968 AVStream *st = s->streams[ pkt ->stream_index];
2969 AVStream *st2= s->streams[ next->stream_index];
2970 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
2971 st->time_base);
2972
2973 if (comp == 0)
2974 return pkt->stream_index < next->stream_index;
2975 return comp > 0;
2976 }
2977
2978 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2979 AVPacketList *pktl;
2980 int stream_count=0;
2981 int i;
2982
2983 if(pkt){
2984 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2985 }
2986
2987 for(i=0; i < s->nb_streams; i++)
2988 stream_count+= !!s->streams[i]->last_in_packet_buffer;
2989
2990 if(stream_count && (s->nb_streams == stream_count || flush)){
2991 pktl= s->packet_buffer;
2992 *out= pktl->pkt;
2993
2994 s->packet_buffer= pktl->next;
2995 if(!s->packet_buffer)
2996 s->packet_buffer_end= NULL;
2997
2998 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
2999 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
3000 av_freep(&pktl);
3001 return 1;
3002 }else{
3003 av_init_packet(out);
3004 return 0;
3005 }
3006 }
3007
3008 /**
3009 * Interleave an AVPacket correctly so it can be muxed.
3010 * @param out the interleaved packet will be output here
3011 * @param in the input packet
3012 * @param flush 1 if no further packets are available as input and all
3013 * remaining packets should be output
3014 * @return 1 if a packet was output, 0 if no packet could be output,
3015 * < 0 if an error occurred
3016 */
3017 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
3018 if(s->oformat->interleave_packet)
3019 return s->oformat->interleave_packet(s, out, in, flush);
3020 else
3021 return av_interleave_packet_per_dts(s, out, in, flush);
3022 }
3023
3024 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
3025 AVStream *st= s->streams[ pkt->stream_index];
3026 int ret;
3027
3028 //FIXME/XXX/HACK drop zero sized packets
3029 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
3030 return 0;
3031
3032 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
3033 pkt->size, pkt->dts, pkt->pts);
3034 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3035 return ret;
3036
3037 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
3038 return AVERROR(EINVAL);
3039
3040 for(;;){
3041 AVPacket opkt;
3042 int ret= av_interleave_packet(s, &opkt, pkt, 0);
3043 if(ret<=0) //FIXME cleanup needed for ret<0 ?
3044 return ret;
3045
3046 ret= s->oformat->write_packet(s, &opkt);
3047
3048 av_free_packet(&opkt);
3049 pkt= NULL;
3050
3051 if(ret<0)
3052 return ret;
3053 }
3054 }
3055
3056 int av_write_trailer(AVFormatContext *s)
3057 {
3058 int ret, i;
3059
3060 for(;;){
3061 AVPacket pkt;
3062 ret= av_interleave_packet(s, &pkt, NULL, 1);
3063 if(ret<0) //FIXME cleanup needed for ret<0 ?
3064 goto fail;
3065 if(!ret)
3066 break;
3067
3068 ret= s->oformat->write_packet(s, &pkt);
3069
3070 av_free_packet(&pkt);
3071
3072 if(ret<0)
3073 goto fail;
3074 }
3075
3076 if(s->oformat->write_trailer)
3077 ret = s->oformat->write_trailer(s);
3078 fail:
3079 for(i=0;i<s->nb_streams;i++) {
3080 av_freep(&s->streams[i]->priv_data);
3081 av_freep(&s->streams[i]->index_entries);
3082 }
3083 if (s->iformat && s->iformat->priv_class)
3084 av_opt_free(s->priv_data);
3085 av_freep(&s->priv_data);
3086 return ret;
3087 }
3088
3089 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3090 {
3091 int i, j;
3092 AVProgram *program=NULL;
3093 void *tmp;
3094
3095 if (idx >= ac->nb_streams) {
3096 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3097 return;
3098 }
3099
3100 for(i=0; i<ac->nb_programs; i++){
3101 if(ac->programs[i]->id != progid)
3102 continue;
3103 program = ac->programs[i];
3104 for(j=0; j<program->nb_stream_indexes; j++)
3105 if(program->stream_index[j] == idx)
3106 return;
3107
3108 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3109 if(!tmp)
3110 return;
3111 program->stream_index = tmp;
3112 program->stream_index[program->nb_stream_indexes++] = idx;
3113 return;
3114 }
3115 }
3116
3117 static void print_fps(double d, const char *postfix){
3118 uint64_t v= lrintf(d*100);
3119 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3120 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3121 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3122 }
3123
3124 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3125 {
3126 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){
3127 AVDictionaryEntry *tag=NULL;
3128
3129 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3130 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3131 if(strcmp("language", tag->key))
3132 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3133 }
3134 }
3135 }
3136
3137 /* "user interface" functions */
3138 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3139 {
3140 char buf[256];
3141 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3142 AVStream *st = ic->streams[i];
3143 int g = av_gcd(st->time_base.num, st->time_base.den);
3144 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3145 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3146 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3147 /* the pid is an important information, so we display it */
3148 /* XXX: add a generic system */
3149 if (flags & AVFMT_SHOW_IDS)
3150 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3151 if (lang)
3152 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3153 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3154 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3155 if (st->sample_aspect_ratio.num && // default
3156 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3157 AVRational display_aspect_ratio;
3158 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3159 st->codec->width*st->sample_aspect_ratio.num,
3160 st->codec->height*st->sample_aspect_ratio.den,
3161 1024*1024);
3162 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3163 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3164 display_aspect_ratio.num, display_aspect_ratio.den);
3165 }
3166 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3167 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3168 print_fps(av_q2d(st->avg_frame_rate), "fps");
3169 if(st->r_frame_rate.den && st->r_frame_rate.num)
3170 print_fps(av_q2d(st->r_frame_rate), "tbr");
3171 if(st->time_base.den && st->time_base.num)
3172 print_fps(1/av_q2d(st->time_base), "tbn");
3173 if(st->codec->time_base.den && st->codec->time_base.num)
3174 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3175 }
3176 if (st->disposition & AV_DISPOSITION_DEFAULT)
3177 av_log(NULL, AV_LOG_INFO, " (default)");
3178 if (st->disposition & AV_DISPOSITION_DUB)
3179 av_log(NULL, AV_LOG_INFO, " (dub)");
3180 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3181 av_log(NULL, AV_LOG_INFO, " (original)");
3182 if (st->disposition & AV_DISPOSITION_COMMENT)
3183 av_log(NULL, AV_LOG_INFO, " (comment)");
3184 if (st->disposition & AV_DISPOSITION_LYRICS)
3185 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3186 if (st->disposition & AV_DISPOSITION_KARAOKE)
3187 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3188 if (st->disposition & AV_DISPOSITION_FORCED)
3189 av_log(NULL, AV_LOG_INFO, " (forced)");
3190 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3191 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3192 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3193 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3194 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3195 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3196 av_log(NULL, AV_LOG_INFO, "\n");
3197 dump_metadata(NULL, st->metadata, " ");
3198 }
3199
3200 #if FF_API_DUMP_FORMAT
3201 void dump_format(AVFormatContext *ic,
3202 int index,
3203 const char *url,
3204 int is_output)
3205 {
3206 av_dump_format(ic, index, url, is_output);
3207 }
3208 #endif
3209
3210 void av_dump_format(AVFormatContext *ic,
3211 int index,
3212 const char *url,
3213 int is_output)
3214 {
3215 int i;
3216 uint8_t *printed = av_mallocz(ic->nb_streams);
3217 if (ic->nb_streams && !printed)
3218 return;
3219
3220 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3221 is_output ? "Output" : "Input",
3222 index,
3223 is_output ? ic->oformat->name : ic->iformat->name,
3224 is_output ? "to" : "from", url);
3225 dump_metadata(NULL, ic->metadata, " ");
3226 if (!is_output) {
3227 av_log(NULL, AV_LOG_INFO, " Duration: ");
3228 if (ic->duration != AV_NOPTS_VALUE) {
3229 int hours, mins, secs, us;
3230 secs = ic->duration / AV_TIME_BASE;
3231 us = ic->duration % AV_TIME_BASE;
3232 mins = secs / 60;
3233 secs %= 60;
3234 hours = mins / 60;
3235 mins %= 60;
3236 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3237 (100 * us) / AV_TIME_BASE);
3238 } else {
3239 av_log(NULL, AV_LOG_INFO, "N/A");
3240 }
3241 if (ic->start_time != AV_NOPTS_VALUE) {
3242 int secs, us;
3243 av_log(NULL, AV_LOG_INFO, ", start: ");
3244 secs = ic->start_time / AV_TIME_BASE;
3245 us = abs(ic->start_time % AV_TIME_BASE);
3246 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3247 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3248 }
3249 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3250 if (ic->bit_rate) {
3251 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3252 } else {
3253 av_log(NULL, AV_LOG_INFO, "N/A");
3254 }
3255 av_log(NULL, AV_LOG_INFO, "\n");
3256 }
3257 for (i = 0; i < ic->nb_chapters; i++) {
3258 AVChapter *ch = ic->chapters[i];
3259 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3260 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3261 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3262
3263 dump_metadata(NULL, ch->metadata, " ");
3264 }
3265 if(ic->nb_programs) {
3266 int j, k, total = 0;
3267 for(j=0; j<ic->nb_programs; j++) {
3268 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3269 "name", NULL, 0);
3270 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3271 name ? name->value : "");
3272 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3273 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3274 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3275 printed[ic->programs[j]->stream_index[k]] = 1;
3276 }
3277 total += ic->programs[j]->nb_stream_indexes;
3278 }
3279 if (total < ic->nb_streams)
3280 av_log(NULL, AV_LOG_INFO, " No Program\n");
3281 }
3282 for(i=0;i<ic->nb_streams;i++)
3283 if (!printed[i])
3284 dump_stream_format(ic, i, index, is_output);
3285
3286 av_free(printed);
3287 }
3288
3289 int64_t av_gettime(void)
3290 {
3291 struct timeval tv;
3292 gettimeofday(&tv,NULL);
3293 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3294 }
3295
3296 uint64_t ff_ntp_time(void)
3297 {
3298 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3299 }
3300
3301 #if FF_API_PARSE_DATE
3302 #include "libavutil/parseutils.h"
3303
3304 int64_t parse_date(const char *timestr, int duration)
3305 {
3306 int64_t timeval;
3307 av_parse_time(&timeval, timestr, duration);
3308 return timeval;
3309 }
3310 #endif
3311
3312 #if FF_API_FIND_INFO_TAG
3313 #include "libavutil/parseutils.h"
3314
3315 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3316 {
3317 return av_find_info_tag(arg, arg_size, tag1, info);
3318 }
3319 #endif
3320
3321 int av_get_frame_filename(char *buf, int buf_size,
3322 const char *path, int number)
3323 {
3324 const char *p;
3325 char *q, buf1[20], c;
3326 int nd, len, percentd_found;
3327
3328 q = buf;
3329 p = path;
3330 percentd_found = 0;
3331 for(;;) {
3332 c = *p++;
3333 if (c == '\0')
3334 break;
3335 if (c == '%') {
3336 do {
3337 nd = 0;
3338 while (isdigit(*p)) {
3339 nd = nd * 10 + *p++ - '0';
3340 }
3341 c = *p++;
3342 } while (isdigit(c));
3343
3344 switch(c) {
3345 case '%':
3346 goto addchar;
3347 case 'd':
3348 if (percentd_found)
3349 goto fail;
3350 percentd_found = 1;
3351 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3352 len = strlen(buf1);
3353 if ((q - buf + len) > buf_size - 1)
3354 goto fail;
3355 memcpy(q, buf1, len);
3356 q += len;
3357 break;
3358 default:
3359 goto fail;
3360 }
3361 } else {
3362 addchar:
3363 if ((q - buf) < buf_size - 1)
3364 *q++ = c;
3365 }
3366 }
3367 if (!percentd_found)
3368 goto fail;
3369 *q = '\0';
3370 return 0;
3371 fail:
3372 *q = '\0';
3373 return -1;
3374 }
3375
3376 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3377 {
3378 int len, i, j, c;
3379 #undef fprintf
3380 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3381
3382 for(i=0;i<size;i+=16) {
3383 len = size - i;
3384 if (len > 16)
3385 len = 16;
3386 PRINT("%08x ", i);
3387 for(j=0;j<16;j++) {
3388 if (j < len)
3389 PRINT(" %02x", buf[i+j]);
3390 else
3391 PRINT(" ");
3392 }
3393 PRINT(" ");
3394 for(j=0;j<len;j++) {
3395 c = buf[i+j];
3396 if (c < ' ' || c > '~')
3397 c = '.';
3398 PRINT("%c", c);
3399 }
3400 PRINT("\n");
3401 }
3402 #undef PRINT
3403 }
3404
3405 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3406 {
3407 hex_dump_internal(NULL, f, 0, buf, size);
3408 }
3409
3410 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3411 {
3412 hex_dump_internal(avcl, NULL, level, buf, size);
3413 }
3414
3415 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3416 {
3417 #undef fprintf
3418 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3419 PRINT("stream #%d:\n", pkt->stream_index);
3420 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3421 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3422 /* DTS is _always_ valid after av_read_frame() */
3423 PRINT(" dts=");
3424 if (pkt->dts == AV_NOPTS_VALUE)
3425 PRINT("N/A");
3426 else
3427 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3428 /* PTS may not be known if B-frames are present. */
3429 PRINT(" pts=");
3430 if (pkt->pts == AV_NOPTS_VALUE)
3431 PRINT("N/A");
3432 else
3433 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3434 PRINT("\n");
3435 PRINT(" size=%d\n", pkt->size);
3436 #undef PRINT
3437 if (dump_payload)
3438 av_hex_dump(f, pkt->data, pkt->size);
3439 }
3440
3441 #if FF_API_PKT_DUMP
3442 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3443 {
3444 AVRational tb = { 1, AV_TIME_BASE };
3445 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3446 }
3447 #endif
3448
3449 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3450 {
3451 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3452 }
3453
3454 #if FF_API_PKT_DUMP
3455 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3456 {
3457 AVRational tb = { 1, AV_TIME_BASE };
3458 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3459 }
3460 #endif
3461
3462 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3463 AVStream *st)
3464 {
3465 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3466 }
3467
3468 void av_url_split(char *proto, int proto_size,
3469 char *authorization, int authorization_size,
3470 char *hostname, int hostname_size,
3471 int *port_ptr,
3472 char *path, int path_size,
3473 const char *url)
3474 {
3475 const char *p, *ls, *at, *col, *brk;
3476
3477 if (port_ptr) *port_ptr = -1;
3478 if (proto_size > 0) proto[0] = 0;
3479 if (authorization_size > 0) authorization[0] = 0;
3480 if (hostname_size > 0) hostname[0] = 0;
3481 if (path_size > 0) path[0] = 0;
3482
3483 /* parse protocol */
3484 if ((p = strchr(url, ':'))) {
3485 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3486 p++; /* skip ':' */
3487 if (*p == '/') p++;
3488 if (*p == '/') p++;
3489 } else {
3490 /* no protocol means plain filename */
3491 av_strlcpy(path, url, path_size);
3492 return;
3493 }
3494
3495 /* separate path from hostname */
3496 ls = strchr(p, '/');
3497 if(!ls)
3498 ls = strchr(p, '?');
3499 if(ls)
3500 av_strlcpy(path, ls, path_size);
3501 else
3502 ls = &p[strlen(p)]; // XXX
3503
3504 /* the rest is hostname, use that to parse auth/port */
3505 if (ls != p) {
3506 /* authorization (user[:pass]@hostname) */
3507 if ((at = strchr(p, '@')) && at < ls) {
3508 av_strlcpy(authorization, p,
3509 FFMIN(authorization_size, at + 1 - p));
3510 p = at + 1; /* skip '@' */
3511 }
3512
3513 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3514 /* [host]:port */
3515 av_strlcpy(hostname, p + 1,
3516 FFMIN(hostname_size, brk - p));
3517 if (brk[1] == ':' && port_ptr)
3518 *port_ptr = atoi(brk + 2);
3519 } else if ((col = strchr(p, ':')) && col < ls) {
3520 av_strlcpy(hostname, p,
3521 FFMIN(col + 1 - p, hostname_size));
3522 if (port_ptr) *port_ptr = atoi(col + 1);
3523 } else
3524 av_strlcpy(hostname, p,
3525 FFMIN(ls + 1 - p, hostname_size));
3526 }
3527 }
3528
3529 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3530 {
3531 int i;
3532 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3533 '4', '5', '6', '7',
3534 '8', '9', 'A', 'B',
3535 'C', 'D', 'E', 'F' };
3536 static const char hex_table_lc[16] = { '0', '1',