Make sure mp1/mp2 get their frame_size set.
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "internal.h"
23 #include "libavcodec/opt.h"
24 #include "metadata.h"
25 #include "libavutil/avstring.h"
26 #include "riff.h"
27 #include <sys/time.h>
28 #include <time.h>
29 #include <strings.h>
30
31 #undef NDEBUG
32 #include <assert.h>
33
34 /**
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
37 */
38
39 unsigned avformat_version(void)
40 {
41 return LIBAVFORMAT_VERSION_INT;
42 }
43
44 const char *avformat_configuration(void)
45 {
46 return FFMPEG_CONFIGURATION;
47 }
48
49 const char *avformat_license(void)
50 {
51 #define LICENSE_PREFIX "libavformat license: "
52 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
53 }
54
55 /* fraction handling */
56
57 /**
58 * f = val + (num / den) + 0.5.
59 *
60 * 'num' is normalized so that it is such as 0 <= num < den.
61 *
62 * @param f fractional number
63 * @param val integer value
64 * @param num must be >= 0
65 * @param den must be >= 1
66 */
67 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
68 {
69 num += (den >> 1);
70 if (num >= den) {
71 val += num / den;
72 num = num % den;
73 }
74 f->val = val;
75 f->num = num;
76 f->den = den;
77 }
78
79 /**
80 * Fractional addition to f: f = f + (incr / f->den).
81 *
82 * @param f fractional number
83 * @param incr increment, can be positive or negative
84 */
85 static void av_frac_add(AVFrac *f, int64_t incr)
86 {
87 int64_t num, den;
88
89 num = f->num + incr;
90 den = f->den;
91 if (num < 0) {
92 f->val += num / den;
93 num = num % den;
94 if (num < 0) {
95 num += den;
96 f->val--;
97 }
98 } else if (num >= den) {
99 f->val += num / den;
100 num = num % den;
101 }
102 f->num = num;
103 }
104
105 /** head of registered input format linked list */
106 AVInputFormat *first_iformat = NULL;
107 /** head of registered output format linked list */
108 AVOutputFormat *first_oformat = NULL;
109
110 AVInputFormat *av_iformat_next(AVInputFormat *f)
111 {
112 if(f) return f->next;
113 else return first_iformat;
114 }
115
116 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
117 {
118 if(f) return f->next;
119 else return first_oformat;
120 }
121
122 void av_register_input_format(AVInputFormat *format)
123 {
124 AVInputFormat **p;
125 p = &first_iformat;
126 while (*p != NULL) p = &(*p)->next;
127 *p = format;
128 format->next = NULL;
129 }
130
131 void av_register_output_format(AVOutputFormat *format)
132 {
133 AVOutputFormat **p;
134 p = &first_oformat;
135 while (*p != NULL) p = &(*p)->next;
136 *p = format;
137 format->next = NULL;
138 }
139
140 #if LIBAVFORMAT_VERSION_MAJOR < 53
141 int match_ext(const char *filename, const char *extensions)
142 {
143 return av_match_ext(filename, extensions);
144 }
145 #endif
146
147 int av_match_ext(const char *filename, const char *extensions)
148 {
149 const char *ext, *p;
150 char ext1[32], *q;
151
152 if(!filename)
153 return 0;
154
155 ext = strrchr(filename, '.');
156 if (ext) {
157 ext++;
158 p = extensions;
159 for(;;) {
160 q = ext1;
161 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
162 *q++ = *p++;
163 *q = '\0';
164 if (!strcasecmp(ext1, ext))
165 return 1;
166 if (*p == '\0')
167 break;
168 p++;
169 }
170 }
171 return 0;
172 }
173
174 static int match_format(const char *name, const char *names)
175 {
176 const char *p;
177 int len, namelen;
178
179 if (!name || !names)
180 return 0;
181
182 namelen = strlen(name);
183 while ((p = strchr(names, ','))) {
184 len = FFMAX(p - names, namelen);
185 if (!strncasecmp(name, names, len))
186 return 1;
187 names = p+1;
188 }
189 return !strcasecmp(name, names);
190 }
191
192 #if LIBAVFORMAT_VERSION_MAJOR < 53
193 AVOutputFormat *guess_format(const char *short_name, const char *filename,
194 const char *mime_type)
195 {
196 return av_guess_format(short_name, filename, mime_type);
197 }
198 #endif
199
200 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
201 const char *mime_type)
202 {
203 AVOutputFormat *fmt, *fmt_found;
204 int score_max, score;
205
206 /* specific test for image sequences */
207 #if CONFIG_IMAGE2_MUXER
208 if (!short_name && filename &&
209 av_filename_number_test(filename) &&
210 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
211 return av_guess_format("image2", NULL, NULL);
212 }
213 #endif
214 /* Find the proper file type. */
215 fmt_found = NULL;
216 score_max = 0;
217 fmt = first_oformat;
218 while (fmt != NULL) {
219 score = 0;
220 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
221 score += 100;
222 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
223 score += 10;
224 if (filename && fmt->extensions &&
225 av_match_ext(filename, fmt->extensions)) {
226 score += 5;
227 }
228 if (score > score_max) {
229 score_max = score;
230 fmt_found = fmt;
231 }
232 fmt = fmt->next;
233 }
234 return fmt_found;
235 }
236
237 #if LIBAVFORMAT_VERSION_MAJOR < 53
238 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
239 const char *mime_type)
240 {
241 AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
242
243 if (fmt) {
244 AVOutputFormat *stream_fmt;
245 char stream_format_name[64];
246
247 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
248 stream_fmt = av_guess_format(stream_format_name, NULL, NULL);
249
250 if (stream_fmt)
251 fmt = stream_fmt;
252 }
253
254 return fmt;
255 }
256 #endif
257
258 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
259 const char *filename, const char *mime_type, enum CodecType type){
260 if(type == CODEC_TYPE_VIDEO){
261 enum CodecID codec_id= CODEC_ID_NONE;
262
263 #if CONFIG_IMAGE2_MUXER
264 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
265 codec_id= av_guess_image2_codec(filename);
266 }
267 #endif
268 if(codec_id == CODEC_ID_NONE)
269 codec_id= fmt->video_codec;
270 return codec_id;
271 }else if(type == CODEC_TYPE_AUDIO)
272 return fmt->audio_codec;
273 else
274 return CODEC_ID_NONE;
275 }
276
277 AVInputFormat *av_find_input_format(const char *short_name)
278 {
279 AVInputFormat *fmt;
280 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
281 if (match_format(short_name, fmt->name))
282 return fmt;
283 }
284 return NULL;
285 }
286
287 /* memory handling */
288
289
290 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
291 {
292 int ret= av_new_packet(pkt, size);
293
294 if(ret<0)
295 return ret;
296
297 pkt->pos= url_ftell(s);
298
299 ret= get_buffer(s, pkt->data, size);
300 if(ret<=0)
301 av_free_packet(pkt);
302 else
303 av_shrink_packet(pkt, ret);
304
305 return ret;
306 }
307
308
309 int av_filename_number_test(const char *filename)
310 {
311 char buf[1024];
312 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
313 }
314
315 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
316 {
317 AVInputFormat *fmt1, *fmt;
318 int score;
319
320 fmt = NULL;
321 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
322 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
323 continue;
324 score = 0;
325 if (fmt1->read_probe) {
326 score = fmt1->read_probe(pd);
327 } else if (fmt1->extensions) {
328 if (av_match_ext(pd->filename, fmt1->extensions)) {
329 score = 50;
330 }
331 }
332 if (score > *score_max) {
333 *score_max = score;
334 fmt = fmt1;
335 }else if (score == *score_max)
336 fmt = NULL;
337 }
338 return fmt;
339 }
340
341 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
342 int score=0;
343 return av_probe_input_format2(pd, is_opened, &score);
344 }
345
346 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
347 {
348 AVInputFormat *fmt;
349 fmt = av_probe_input_format2(pd, 1, &score);
350
351 if (fmt) {
352 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
353 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
354 if (!strcmp(fmt->name, "mp3")) {
355 st->codec->codec_id = CODEC_ID_MP3;
356 st->codec->codec_type = CODEC_TYPE_AUDIO;
357 } else if (!strcmp(fmt->name, "ac3")) {
358 st->codec->codec_id = CODEC_ID_AC3;
359 st->codec->codec_type = CODEC_TYPE_AUDIO;
360 } else if (!strcmp(fmt->name, "eac3")) {
361 st->codec->codec_id = CODEC_ID_EAC3;
362 st->codec->codec_type = CODEC_TYPE_AUDIO;
363 } else if (!strcmp(fmt->name, "mpegvideo")) {
364 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
365 st->codec->codec_type = CODEC_TYPE_VIDEO;
366 } else if (!strcmp(fmt->name, "m4v")) {
367 st->codec->codec_id = CODEC_ID_MPEG4;
368 st->codec->codec_type = CODEC_TYPE_VIDEO;
369 } else if (!strcmp(fmt->name, "h264")) {
370 st->codec->codec_id = CODEC_ID_H264;
371 st->codec->codec_type = CODEC_TYPE_VIDEO;
372 } else if (!strcmp(fmt->name, "dts")) {
373 st->codec->codec_id = CODEC_ID_DTS;
374 st->codec->codec_type = CODEC_TYPE_AUDIO;
375 }
376 }
377 return !!fmt;
378 }
379
380 /************************************************************/
381 /* input media file */
382
383 /**
384 * Open a media file from an IO stream. 'fmt' must be specified.
385 */
386 int av_open_input_stream(AVFormatContext **ic_ptr,
387 ByteIOContext *pb, const char *filename,
388 AVInputFormat *fmt, AVFormatParameters *ap)
389 {
390 int err;
391 AVFormatContext *ic;
392 AVFormatParameters default_ap;
393
394 if(!ap){
395 ap=&default_ap;
396 memset(ap, 0, sizeof(default_ap));
397 }
398
399 if(!ap->prealloced_context)
400 ic = avformat_alloc_context();
401 else
402 ic = *ic_ptr;
403 if (!ic) {
404 err = AVERROR(ENOMEM);
405 goto fail;
406 }
407 ic->iformat = fmt;
408 ic->pb = pb;
409 ic->duration = AV_NOPTS_VALUE;
410 ic->start_time = AV_NOPTS_VALUE;
411 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
412
413 /* allocate private data */
414 if (fmt->priv_data_size > 0) {
415 ic->priv_data = av_mallocz(fmt->priv_data_size);
416 if (!ic->priv_data) {
417 err = AVERROR(ENOMEM);
418 goto fail;
419 }
420 } else {
421 ic->priv_data = NULL;
422 }
423
424 if (ic->iformat->read_header) {
425 err = ic->iformat->read_header(ic, ap);
426 if (err < 0)
427 goto fail;
428 }
429
430 if (pb && !ic->data_offset)
431 ic->data_offset = url_ftell(ic->pb);
432
433 #if LIBAVFORMAT_VERSION_MAJOR < 53
434 ff_metadata_demux_compat(ic);
435 #endif
436
437 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
438
439 *ic_ptr = ic;
440 return 0;
441 fail:
442 if (ic) {
443 int i;
444 av_freep(&ic->priv_data);
445 for(i=0;i<ic->nb_streams;i++) {
446 AVStream *st = ic->streams[i];
447 if (st) {
448 av_free(st->priv_data);
449 av_free(st->codec->extradata);
450 }
451 av_free(st);
452 }
453 }
454 av_free(ic);
455 *ic_ptr = NULL;
456 return err;
457 }
458
459 /** size of probe buffer, for guessing file type from file contents */
460 #define PROBE_BUF_MIN 2048
461 #define PROBE_BUF_MAX (1<<20)
462
463 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
464 AVInputFormat *fmt,
465 int buf_size,
466 AVFormatParameters *ap)
467 {
468 int err, probe_size;
469 AVProbeData probe_data, *pd = &probe_data;
470 ByteIOContext *pb = NULL;
471 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
472
473 pd->filename = "";
474 if (filename)
475 pd->filename = filename;
476 pd->buf = NULL;
477 pd->buf_size = 0;
478
479 if (!fmt) {
480 /* guess format if no file can be opened */
481 fmt = av_probe_input_format(pd, 0);
482 }
483
484 /* Do not open file if the format does not need it. XXX: specific
485 hack needed to handle RTSP/TCP */
486 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
487 /* if no file needed do not try to open one */
488 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
489 goto fail;
490 }
491 if (buf_size > 0) {
492 url_setbufsize(pb, buf_size);
493 }
494
495 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
496 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
497 /* read probe data */
498 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
499 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
500
501 if ((int)pd->buf_size < 0) {
502 err = pd->buf_size;
503 goto fail;
504 }
505
506 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
507 if (url_fseek(pb, 0, SEEK_SET) < 0) {
508 url_fclose(pb);
509 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
510 pb = NULL;
511 err = AVERROR(EIO);
512 goto fail;
513 }
514 }
515 /* guess file format */
516 fmt = av_probe_input_format2(pd, 1, &score);
517 if(fmt){
518 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
519 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
520 }else
521 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
522 }
523 }
524 av_freep(&pd->buf);
525 }
526
527 /* if still no format found, error */
528 if (!fmt) {
529 err = AVERROR_NOFMT;
530 goto fail;
531 }
532
533 /* check filename in case an image number is expected */
534 if (fmt->flags & AVFMT_NEEDNUMBER) {
535 if (!av_filename_number_test(filename)) {
536 err = AVERROR_NUMEXPECTED;
537 goto fail;
538 }
539 }
540 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
541 if (err)
542 goto fail;
543 return 0;
544 fail:
545 av_freep(&pd->buf);
546 if (pb)
547 url_fclose(pb);
548 if (ap && ap->prealloced_context)
549 av_free(*ic_ptr);
550 *ic_ptr = NULL;
551 return err;
552
553 }
554
555 /*******************************************************/
556
557 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
558 AVPacketList **plast_pktl){
559 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
560 if (!pktl)
561 return NULL;
562
563 if (*packet_buffer)
564 (*plast_pktl)->next = pktl;
565 else
566 *packet_buffer = pktl;
567
568 /* add the packet in the buffered packet list */
569 *plast_pktl = pktl;
570 pktl->pkt= *pkt;
571 return &pktl->pkt;
572 }
573
574 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
575 {
576 int ret, i;
577 AVStream *st;
578
579 for(;;){
580 AVPacketList *pktl = s->raw_packet_buffer;
581
582 if (pktl) {
583 *pkt = pktl->pkt;
584 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
585 !s->streams[pkt->stream_index]->probe_packets ||
586 s->raw_packet_buffer_remaining_size < pkt->size){
587 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
588 av_freep(&pd->buf);
589 pd->buf_size = 0;
590 s->raw_packet_buffer = pktl->next;
591 s->raw_packet_buffer_remaining_size += pkt->size;
592 av_free(pktl);
593 return 0;
594 }
595 }
596
597 av_init_packet(pkt);
598 ret= s->iformat->read_packet(s, pkt);
599 if (ret < 0) {
600 if (!pktl || ret == AVERROR(EAGAIN))
601 return ret;
602 for (i = 0; i < s->nb_streams; i++)
603 s->streams[i]->probe_packets = 0;
604 continue;
605 }
606 st= s->streams[pkt->stream_index];
607
608 switch(st->codec->codec_type){
609 case CODEC_TYPE_VIDEO:
610 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
611 break;
612 case CODEC_TYPE_AUDIO:
613 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
614 break;
615 case CODEC_TYPE_SUBTITLE:
616 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
617 break;
618 }
619
620 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
621 !st->probe_packets))
622 return ret;
623
624 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
625 s->raw_packet_buffer_remaining_size -= pkt->size;
626
627 if(st->codec->codec_id == CODEC_ID_PROBE){
628 AVProbeData *pd = &st->probe_data;
629 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
630 --st->probe_packets;
631
632 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
633 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
634 pd->buf_size += pkt->size;
635 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
636
637 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
638 set_codec_from_probe_data(s, st, pd, 1);
639 if(st->codec->codec_id != CODEC_ID_PROBE){
640 pd->buf_size=0;
641 av_freep(&pd->buf);
642 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
643 }
644 }
645 }
646 }
647 }
648
649 /**********************************************************/
650
651 /**
652 * Get the number of samples of an audio frame. Return -1 on error.
653 */
654 static int get_audio_frame_size(AVCodecContext *enc, int size)
655 {
656 int frame_size;
657
658 if(enc->codec_id == CODEC_ID_VORBIS)
659 return -1;
660
661 if (enc->frame_size <= 1) {
662 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
663
664 if (bits_per_sample) {
665 if (enc->channels == 0)
666 return -1;
667 frame_size = (size << 3) / (bits_per_sample * enc->channels);
668 } else {
669 /* used for example by ADPCM codecs */
670 if (enc->bit_rate == 0)
671 return -1;
672 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
673 }
674 } else {
675 frame_size = enc->frame_size;
676 }
677 return frame_size;
678 }
679
680
681 /**
682 * Return the frame duration in seconds. Return 0 if not available.
683 */
684 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
685 AVCodecParserContext *pc, AVPacket *pkt)
686 {
687 int frame_size;
688
689 *pnum = 0;
690 *pden = 0;
691 switch(st->codec->codec_type) {
692 case CODEC_TYPE_VIDEO:
693 if(st->time_base.num*1000LL > st->time_base.den){
694 *pnum = st->time_base.num;
695 *pden = st->time_base.den;
696 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
697 *pnum = st->codec->time_base.num;
698 *pden = st->codec->time_base.den;
699 if (pc && pc->repeat_pict) {
700 *pnum = (*pnum) * (1 + pc->repeat_pict);
701 }
702 }
703 break;
704 case CODEC_TYPE_AUDIO:
705 frame_size = get_audio_frame_size(st->codec, pkt->size);
706 if (frame_size < 0)
707 break;
708 *pnum = frame_size;
709 *pden = st->codec->sample_rate;
710 break;
711 default:
712 break;
713 }
714 }
715
716 static int is_intra_only(AVCodecContext *enc){
717 if(enc->codec_type == CODEC_TYPE_AUDIO){
718 return 1;
719 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
720 switch(enc->codec_id){
721 case CODEC_ID_MJPEG:
722 case CODEC_ID_MJPEGB:
723 case CODEC_ID_LJPEG:
724 case CODEC_ID_RAWVIDEO:
725 case CODEC_ID_DVVIDEO:
726 case CODEC_ID_HUFFYUV:
727 case CODEC_ID_FFVHUFF:
728 case CODEC_ID_ASV1:
729 case CODEC_ID_ASV2:
730 case CODEC_ID_VCR1:
731 case CODEC_ID_DNXHD:
732 case CODEC_ID_JPEG2000:
733 return 1;
734 default: break;
735 }
736 }
737 return 0;
738 }
739
740 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
741 int64_t dts, int64_t pts)
742 {
743 AVStream *st= s->streams[stream_index];
744 AVPacketList *pktl= s->packet_buffer;
745
746 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
747 return;
748
749 st->first_dts= dts - st->cur_dts;
750 st->cur_dts= dts;
751
752 for(; pktl; pktl= pktl->next){
753 if(pktl->pkt.stream_index != stream_index)
754 continue;
755 //FIXME think more about this check
756 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
757 pktl->pkt.pts += st->first_dts;
758
759 if(pktl->pkt.dts != AV_NOPTS_VALUE)
760 pktl->pkt.dts += st->first_dts;
761
762 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
763 st->start_time= pktl->pkt.pts;
764 }
765 if (st->start_time == AV_NOPTS_VALUE)
766 st->start_time = pts;
767 }
768
769 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
770 {
771 AVPacketList *pktl= s->packet_buffer;
772 int64_t cur_dts= 0;
773
774 if(st->first_dts != AV_NOPTS_VALUE){
775 cur_dts= st->first_dts;
776 for(; pktl; pktl= pktl->next){
777 if(pktl->pkt.stream_index == pkt->stream_index){
778 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
779 break;
780 cur_dts -= pkt->duration;
781 }
782 }
783 pktl= s->packet_buffer;
784 st->first_dts = cur_dts;
785 }else if(st->cur_dts)
786 return;
787
788 for(; pktl; pktl= pktl->next){
789 if(pktl->pkt.stream_index != pkt->stream_index)
790 continue;
791 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
792 && !pktl->pkt.duration){
793 pktl->pkt.dts= cur_dts;
794 if(!st->codec->has_b_frames)
795 pktl->pkt.pts= cur_dts;
796 cur_dts += pkt->duration;
797 pktl->pkt.duration= pkt->duration;
798 }else
799 break;
800 }
801 if(st->first_dts == AV_NOPTS_VALUE)
802 st->cur_dts= cur_dts;
803 }
804
805 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
806 AVCodecParserContext *pc, AVPacket *pkt)
807 {
808 int num, den, presentation_delayed, delay, i;
809 int64_t offset;
810
811 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
812 pkt->dts= AV_NOPTS_VALUE;
813
814 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
815 //FIXME Set low_delay = 0 when has_b_frames = 1
816 st->codec->has_b_frames = 1;
817
818 /* do we have a video B-frame ? */
819 delay= st->codec->has_b_frames;
820 presentation_delayed = 0;
821 /* XXX: need has_b_frame, but cannot get it if the codec is
822 not initialized */
823 if (delay &&
824 pc && pc->pict_type != FF_B_TYPE)
825 presentation_delayed = 1;
826
827 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
828 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
829 pkt->dts -= 1LL<<st->pts_wrap_bits;
830 }
831
832 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
833 // we take the conservative approach and discard both
834 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
835 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
836 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
837 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
838 }
839
840 if (pkt->duration == 0) {
841 compute_frame_duration(&num, &den, st, pc, pkt);
842 if (den && num) {
843 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
844
845 if(pkt->duration != 0 && s->packet_buffer)
846 update_initial_durations(s, st, pkt);
847 }
848 }
849
850 /* correct timestamps with byte offset if demuxers only have timestamps
851 on packet boundaries */
852 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
853 /* this will estimate bitrate based on this frame's duration and size */
854 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
855 if(pkt->pts != AV_NOPTS_VALUE)
856 pkt->pts += offset;
857 if(pkt->dts != AV_NOPTS_VALUE)
858 pkt->dts += offset;
859 }
860
861 if (pc && pc->dts_sync_point >= 0) {
862 // we have synchronization info from the parser
863 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
864 if (den > 0) {
865 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
866 if (pkt->dts != AV_NOPTS_VALUE) {
867 // got DTS from the stream, update reference timestamp
868 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
869 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
870 } else if (st->reference_dts != AV_NOPTS_VALUE) {
871 // compute DTS based on reference timestamp
872 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
873 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
874 }
875 if (pc->dts_sync_point > 0)
876 st->reference_dts = pkt->dts; // new reference
877 }
878 }
879
880 /* This may be redundant, but it should not hurt. */
881 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
882 presentation_delayed = 1;
883
884 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
885 /* interpolate PTS and DTS if they are not present */
886 //We skip H264 currently because delay and has_b_frames are not reliably set
887 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
888 if (presentation_delayed) {
889 /* DTS = decompression timestamp */
890 /* PTS = presentation timestamp */
891 if (pkt->dts == AV_NOPTS_VALUE)
892 pkt->dts = st->last_IP_pts;
893 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
894 if (pkt->dts == AV_NOPTS_VALUE)
895 pkt->dts = st->cur_dts;
896
897 /* this is tricky: the dts must be incremented by the duration
898 of the frame we are displaying, i.e. the last I- or P-frame */
899 if (st->last_IP_duration == 0)
900 st->last_IP_duration = pkt->duration;
901 if(pkt->dts != AV_NOPTS_VALUE)
902 st->cur_dts = pkt->dts + st->last_IP_duration;
903 st->last_IP_duration = pkt->duration;
904 st->last_IP_pts= pkt->pts;
905 /* cannot compute PTS if not present (we can compute it only
906 by knowing the future */
907 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
908 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
909 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
910 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
911 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
912 pkt->pts += pkt->duration;
913 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
914 }
915 }
916
917 /* presentation is not delayed : PTS and DTS are the same */
918 if(pkt->pts == AV_NOPTS_VALUE)
919 pkt->pts = pkt->dts;
920 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
921 if(pkt->pts == AV_NOPTS_VALUE)
922 pkt->pts = st->cur_dts;
923 pkt->dts = pkt->pts;
924 if(pkt->pts != AV_NOPTS_VALUE)
925 st->cur_dts = pkt->pts + pkt->duration;
926 }
927 }
928
929 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
930 st->pts_buffer[0]= pkt->pts;
931 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
932 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
933 if(pkt->dts == AV_NOPTS_VALUE)
934 pkt->dts= st->pts_buffer[0];
935 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
936 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
937 }
938 if(pkt->dts > st->cur_dts)
939 st->cur_dts = pkt->dts;
940 }
941
942 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
943
944 /* update flags */
945 if(is_intra_only(st->codec))
946 pkt->flags |= PKT_FLAG_KEY;
947 else if (pc) {
948 pkt->flags = 0;
949 /* keyframe computation */
950 if (pc->key_frame == 1)
951 pkt->flags |= PKT_FLAG_KEY;
952 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
953 pkt->flags |= PKT_FLAG_KEY;
954 }
955 if (pc)
956 pkt->convergence_duration = pc->convergence_duration;
957 }
958
959
960 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
961 {
962 AVStream *st;
963 int len, ret, i;
964
965 av_init_packet(pkt);
966
967 for(;;) {
968 /* select current input stream component */
969 st = s->cur_st;
970 if (st) {
971 if (!st->need_parsing || !st->parser) {
972 /* no parsing needed: we just output the packet as is */
973 /* raw data support */
974 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
975 compute_pkt_fields(s, st, NULL, pkt);
976 s->cur_st = NULL;
977 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
978 (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
979 ff_reduce_index(s, st->index);
980 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
981 }
982 break;
983 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
984 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
985 st->cur_ptr, st->cur_len,
986 st->cur_pkt.pts, st->cur_pkt.dts,
987 st->cur_pkt.pos);
988 st->cur_pkt.pts = AV_NOPTS_VALUE;
989 st->cur_pkt.dts = AV_NOPTS_VALUE;
990 /* increment read pointer */
991 st->cur_ptr += len;
992 st->cur_len -= len;
993
994 /* return packet if any */
995 if (pkt->size) {
996 got_packet:
997 pkt->duration = 0;
998 pkt->stream_index = st->index;
999 pkt->pts = st->parser->pts;
1000 pkt->dts = st->parser->dts;
1001 pkt->pos = st->parser->pos;
1002 pkt->destruct = NULL;
1003 compute_pkt_fields(s, st, st->parser, pkt);
1004
1005 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
1006 ff_reduce_index(s, st->index);
1007 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1008 0, 0, AVINDEX_KEYFRAME);
1009 }
1010
1011 break;
1012 }
1013 } else {
1014 /* free packet */
1015 av_free_packet(&st->cur_pkt);
1016 s->cur_st = NULL;
1017 }
1018 } else {
1019 AVPacket cur_pkt;
1020 /* read next packet */
1021 ret = av_read_packet(s, &cur_pkt);
1022 if (ret < 0) {
1023 if (ret == AVERROR(EAGAIN))
1024 return ret;
1025 /* return the last frames, if any */
1026 for(i = 0; i < s->nb_streams; i++) {
1027 st = s->streams[i];
1028 if (st->parser && st->need_parsing) {
1029 av_parser_parse2(st->parser, st->codec,
1030 &pkt->data, &pkt->size,
1031 NULL, 0,
1032 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1033 AV_NOPTS_VALUE);
1034 if (pkt->size)
1035 goto got_packet;
1036 }
1037 }
1038 /* no more packets: really terminate parsing */
1039 return ret;
1040 }
1041 st = s->streams[cur_pkt.stream_index];
1042 st->cur_pkt= cur_pkt;
1043
1044 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1045 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1046 st->cur_pkt.pts < st->cur_pkt.dts){
1047 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1048 st->cur_pkt.stream_index,
1049 st->cur_pkt.pts,
1050 st->cur_pkt.dts,
1051 st->cur_pkt.size);
1052 // av_free_packet(&st->cur_pkt);
1053 // return -1;
1054 }
1055
1056 if(s->debug & FF_FDEBUG_TS)
1057 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1058 st->cur_pkt.stream_index,
1059 st->cur_pkt.pts,
1060 st->cur_pkt.dts,
1061 st->cur_pkt.size,
1062 st->cur_pkt.duration,
1063 st->cur_pkt.flags);
1064
1065 s->cur_st = st;
1066 st->cur_ptr = st->cur_pkt.data;
1067 st->cur_len = st->cur_pkt.size;
1068 if (st->need_parsing && !st->parser) {
1069 st->parser = av_parser_init(st->codec->codec_id);
1070 if (!st->parser) {
1071 /* no parser available: just output the raw packets */
1072 st->need_parsing = AVSTREAM_PARSE_NONE;
1073 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1074 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1075 }
1076 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1077 st->parser->next_frame_offset=
1078 st->parser->cur_offset= st->cur_pkt.pos;
1079 }
1080 }
1081 }
1082 }
1083 if(s->debug & FF_FDEBUG_TS)
1084 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1085 pkt->stream_index,
1086 pkt->pts,
1087 pkt->dts,
1088 pkt->size,
1089 pkt->duration,
1090 pkt->flags);
1091
1092 return 0;
1093 }
1094
1095 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1096 {
1097 AVPacketList *pktl;
1098 int eof=0;
1099 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1100
1101 for(;;){
1102 pktl = s->packet_buffer;
1103 if (pktl) {
1104 AVPacket *next_pkt= &pktl->pkt;
1105
1106 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1107 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1108 if( pktl->pkt.stream_index == next_pkt->stream_index
1109 && next_pkt->dts < pktl->pkt.dts
1110 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1111 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1112 next_pkt->pts= pktl->pkt.dts;
1113 }
1114 pktl= pktl->next;
1115 }
1116 pktl = s->packet_buffer;
1117 }
1118
1119 if( next_pkt->pts != AV_NOPTS_VALUE
1120 || next_pkt->dts == AV_NOPTS_VALUE
1121 || !genpts || eof){
1122 /* read packet from packet buffer, if there is data */
1123 *pkt = *next_pkt;
1124 s->packet_buffer = pktl->next;
1125 av_free(pktl);
1126 return 0;
1127 }
1128 }
1129 if(genpts){
1130 int ret= av_read_frame_internal(s, pkt);
1131 if(ret<0){
1132 if(pktl && ret != AVERROR(EAGAIN)){
1133 eof=1;
1134 continue;
1135 }else
1136 return ret;
1137 }
1138
1139 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1140 &s->packet_buffer_end)) < 0)
1141 return AVERROR(ENOMEM);
1142 }else{
1143 assert(!s->packet_buffer);
1144 return av_read_frame_internal(s, pkt);
1145 }
1146 }
1147 }
1148
1149 /* XXX: suppress the packet queue */
1150 static void flush_packet_queue(AVFormatContext *s)
1151 {
1152 AVPacketList *pktl;
1153
1154 for(;;) {
1155 pktl = s->packet_buffer;
1156 if (!pktl)
1157 break;
1158 s->packet_buffer = pktl->next;
1159 av_free_packet(&pktl->pkt);
1160 av_free(pktl);
1161 }
1162 while(s->raw_packet_buffer){
1163 pktl = s->raw_packet_buffer;
1164 s->raw_packet_buffer = pktl->next;
1165 av_free_packet(&pktl->pkt);
1166 av_free(pktl);
1167 }
1168 s->packet_buffer_end=
1169 s->raw_packet_buffer_end= NULL;
1170 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1171 }
1172
1173 /*******************************************************/
1174 /* seek support */
1175
1176 int av_find_default_stream_index(AVFormatContext *s)
1177 {
1178 int first_audio_index = -1;
1179 int i;
1180 AVStream *st;
1181
1182 if (s->nb_streams <= 0)
1183 return -1;
1184 for(i = 0; i < s->nb_streams; i++) {
1185 st = s->streams[i];
1186 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1187 return i;
1188 }
1189 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1190 first_audio_index = i;
1191 }
1192 return first_audio_index >= 0 ? first_audio_index : 0;
1193 }
1194
1195 /**
1196 * Flush the frame reader.
1197 */
1198 void av_read_frame_flush(AVFormatContext *s)
1199 {
1200 AVStream *st;
1201 int i, j;
1202
1203 flush_packet_queue(s);
1204
1205 s->cur_st = NULL;
1206
1207 /* for each stream, reset read state */
1208 for(i = 0; i < s->nb_streams; i++) {
1209 st = s->streams[i];
1210
1211 if (st->parser) {
1212 av_parser_close(st->parser);
1213 st->parser = NULL;
1214 av_free_packet(&st->cur_pkt);
1215 }
1216 st->last_IP_pts = AV_NOPTS_VALUE;
1217 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1218 st->reference_dts = AV_NOPTS_VALUE;
1219 /* fail safe */
1220 st->cur_ptr = NULL;
1221 st->cur_len = 0;
1222
1223 st->probe_packets = MAX_PROBE_PACKETS;
1224
1225 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1226 st->pts_buffer[j]= AV_NOPTS_VALUE;
1227 }
1228 }
1229
1230 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1231 int i;
1232
1233 for(i = 0; i < s->nb_streams; i++) {
1234 AVStream *st = s->streams[i];
1235
1236 st->cur_dts = av_rescale(timestamp,
1237 st->time_base.den * (int64_t)ref_st->time_base.num,
1238 st->time_base.num * (int64_t)ref_st->time_base.den);
1239 }
1240 }
1241
1242 void ff_reduce_index(AVFormatContext *s, int stream_index)
1243 {
1244 AVStream *st= s->streams[stream_index];
1245 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1246
1247 if((unsigned)st->nb_index_entries >= max_entries){
1248 int i;
1249 for(i=0; 2*i<st->nb_index_entries; i++)
1250 st->index_entries[i]= st->index_entries[2*i];
1251 st->nb_index_entries= i;
1252 }
1253 }
1254
1255 int av_add_index_entry(AVStream *st,
1256 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1257 {
1258 AVIndexEntry *entries, *ie;
1259 int index;
1260
1261 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1262 return -1;
1263
1264 entries = av_fast_realloc(st->index_entries,
1265 &st->index_entries_allocated_size,
1266 (st->nb_index_entries + 1) *
1267 sizeof(AVIndexEntry));
1268 if(!entries)
1269 return -1;
1270
1271 st->index_entries= entries;
1272
1273 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1274
1275 if(index<0){
1276 index= st->nb_index_entries++;
1277 ie= &entries[index];
1278 assert(index==0 || ie[-1].timestamp < timestamp);
1279 }else{
1280 ie= &entries[index];
1281 if(ie->timestamp != timestamp){
1282 if(ie->timestamp <= timestamp)
1283 return -1;
1284 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1285 st->nb_index_entries++;
1286 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1287 distance= ie->min_distance;
1288 }
1289
1290 ie->pos = pos;
1291 ie->timestamp = timestamp;
1292 ie->min_distance= distance;
1293 ie->size= size;
1294 ie->flags = flags;
1295
1296 return index;
1297 }
1298
1299 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1300 int flags)
1301 {
1302 AVIndexEntry *entries= st->index_entries;
1303 int nb_entries= st->nb_index_entries;
1304 int a, b, m;
1305 int64_t timestamp;
1306
1307 a = - 1;
1308 b = nb_entries;
1309
1310 while (b - a > 1) {
1311 m = (a + b) >> 1;
1312 timestamp = entries[m].timestamp;
1313 if(timestamp >= wanted_timestamp)
1314 b = m;
1315 if(timestamp <= wanted_timestamp)
1316 a = m;
1317 }
1318 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1319
1320 if(!(flags & AVSEEK_FLAG_ANY)){
1321 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1322 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1323 }
1324 }
1325
1326 if(m == nb_entries)
1327 return -1;
1328 return m;
1329 }
1330
1331 #define DEBUG_SEEK
1332
1333 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1334 AVInputFormat *avif= s->iformat;
1335 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1336 int64_t ts_min, ts_max, ts;
1337 int index;
1338 int64_t ret;
1339 AVStream *st;
1340
1341 if (stream_index < 0)
1342 return -1;
1343
1344 #ifdef DEBUG_SEEK
1345 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1346 #endif
1347
1348 ts_max=
1349 ts_min= AV_NOPTS_VALUE;
1350 pos_limit= -1; //gcc falsely says it may be uninitialized
1351
1352 st= s->streams[stream_index];
1353 if(st->index_entries){
1354 AVIndexEntry *e;
1355
1356 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1357 index= FFMAX(index, 0);
1358 e= &st->index_entries[index];
1359
1360 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1361 pos_min= e->pos;
1362 ts_min= e->timestamp;
1363 #ifdef DEBUG_SEEK
1364 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1365 pos_min,ts_min);
1366 #endif
1367 }else{
1368 assert(index==0);
1369 }
1370
1371 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1372 assert(index < st->nb_index_entries);
1373 if(index >= 0){
1374 e= &st->index_entries[index];
1375 assert(e->timestamp >= target_ts);
1376 pos_max= e->pos;
1377 ts_max= e->timestamp;
1378 pos_limit= pos_max - e->min_distance;
1379 #ifdef DEBUG_SEEK
1380 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1381 pos_max,pos_limit, ts_max);
1382 #endif
1383 }
1384 }
1385
1386 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1387 if(pos<0)
1388 return -1;
1389
1390 /* do the seek */
1391 if ((ret = url_fseek(s->pb, pos, SEEK_SET)) < 0)
1392 return ret;
1393
1394 av_update_cur_dts(s, st, ts);
1395
1396 return 0;
1397 }
1398
1399 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1400 int64_t pos, ts;
1401 int64_t start_pos, filesize;
1402 int no_change;
1403
1404 #ifdef DEBUG_SEEK
1405 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1406 #endif
1407
1408 if(ts_min == AV_NOPTS_VALUE){
1409 pos_min = s->data_offset;
1410 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1411 if (ts_min == AV_NOPTS_VALUE)
1412 return -1;
1413 }
1414
1415 if(ts_max == AV_NOPTS_VALUE){
1416 int step= 1024;
1417 filesize = url_fsize(s->pb);
1418 pos_max = filesize - 1;
1419 do{
1420 pos_max -= step;
1421 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1422 step += step;
1423 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1424 if (ts_max == AV_NOPTS_VALUE)
1425 return -1;
1426
1427 for(;;){
1428 int64_t tmp_pos= pos_max + 1;
1429 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1430 if(tmp_ts == AV_NOPTS_VALUE)
1431 break;
1432 ts_max= tmp_ts;
1433 pos_max= tmp_pos;
1434 if(tmp_pos >= filesize)
1435 break;
1436 }
1437 pos_limit= pos_max;
1438 }
1439
1440 if(ts_min > ts_max){
1441 return -1;
1442 }else if(ts_min == ts_max){
1443 pos_limit= pos_min;
1444 }
1445
1446 no_change=0;
1447 while (pos_min < pos_limit) {
1448 #ifdef DEBUG_SEEK
1449 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1450 pos_min, pos_max,
1451 ts_min, ts_max);
1452 #endif
1453 assert(pos_limit <= pos_max);
1454
1455 if(no_change==0){
1456 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1457 // interpolate position (better than dichotomy)
1458 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1459 + pos_min - approximate_keyframe_distance;
1460 }else if(no_change==1){
1461 // bisection, if interpolation failed to change min or max pos last time
1462 pos = (pos_min + pos_limit)>>1;
1463 }else{
1464 /* linear search if bisection failed, can only happen if there
1465 are very few or no keyframes between min/max */
1466 pos=pos_min;
1467 }
1468 if(pos <= pos_min)
1469 pos= pos_min + 1;
1470 else if(pos > pos_limit)
1471 pos= pos_limit;
1472 start_pos= pos;
1473
1474 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1475 if(pos == pos_max)
1476 no_change++;
1477 else
1478 no_change=0;
1479 #ifdef DEBUG_SEEK
1480 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1481 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1482 start_pos, no_change);
1483 #endif
1484 if(ts == AV_NOPTS_VALUE){
1485 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1486 return -1;
1487 }
1488 assert(ts != AV_NOPTS_VALUE);
1489 if (target_ts <= ts) {
1490 pos_limit = start_pos - 1;
1491 pos_max = pos;
1492 ts_max = ts;
1493 }
1494 if (target_ts >= ts) {
1495 pos_min = pos;
1496 ts_min = ts;
1497 }
1498 }
1499
1500 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1501 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1502 #ifdef DEBUG_SEEK
1503 pos_min = pos;
1504 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1505 pos_min++;
1506 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1507 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1508 pos, ts_min, target_ts, ts_max);
1509 #endif
1510 *ts_ret= ts;
1511 return pos;
1512 }
1513
1514 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1515 int64_t pos_min, pos_max;
1516 #if 0
1517 AVStream *st;
1518
1519 if (stream_index < 0)
1520 return -1;
1521
1522 st= s->streams[stream_index];
1523 #endif
1524
1525 pos_min = s->data_offset;
1526 pos_max = url_fsize(s->pb) - 1;
1527
1528 if (pos < pos_min) pos= pos_min;
1529 else if(pos > pos_max) pos= pos_max;
1530
1531 url_fseek(s->pb, pos, SEEK_SET);
1532
1533 #if 0
1534 av_update_cur_dts(s, st, ts);
1535 #endif
1536 return 0;
1537 }
1538
1539 static int av_seek_frame_generic(AVFormatContext *s,
1540 int stream_index, int64_t timestamp, int flags)
1541 {
1542 int index;
1543 int64_t ret;
1544 AVStream *st;
1545 AVIndexEntry *ie;
1546
1547 st = s->streams[stream_index];
1548
1549 index = av_index_search_timestamp(st, timestamp, flags);
1550
1551 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1552 return -1;
1553
1554 if(index < 0 || index==st->nb_index_entries-1){
1555 int i;
1556 AVPacket pkt;
1557
1558 if(st->nb_index_entries){
1559 assert(st->index_entries);
1560 ie= &st->index_entries[st->nb_index_entries-1];
1561 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1562 return ret;
1563 av_update_cur_dts(s, st, ie->timestamp);
1564 }else{
1565 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1566 return ret;
1567 }
1568 for(i=0;; i++) {
1569 int ret;
1570 do{
1571 ret = av_read_frame(s, &pkt);
1572 }while(ret == AVERROR(EAGAIN));
1573 if(ret<0)
1574 break;
1575 av_free_packet(&pkt);
1576 if(stream_index == pkt.stream_index){
1577 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1578 break;
1579 }
1580 }
1581 index = av_index_search_timestamp(st, timestamp, flags);
1582 }
1583 if (index < 0)
1584 return -1;
1585
1586 av_read_frame_flush(s);
1587 if (s->iformat->read_seek){
1588 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1589 return 0;
1590 }
1591 ie = &st->index_entries[index];
1592 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1593 return ret;
1594 av_update_cur_dts(s, st, ie->timestamp);
1595
1596 return 0;
1597 }
1598
1599 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1600 {
1601 int ret;
1602 AVStream *st;
1603
1604 av_read_frame_flush(s);
1605
1606 if(flags & AVSEEK_FLAG_BYTE)
1607 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1608
1609 if(stream_index < 0){
1610 stream_index= av_find_default_stream_index(s);
1611 if(stream_index < 0)
1612 return -1;
1613
1614 st= s->streams[stream_index];
1615 /* timestamp for default must be expressed in AV_TIME_BASE units */
1616 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1617 }
1618
1619 /* first, we try the format specific seek */
1620 if (s->iformat->read_seek)
1621 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1622 else
1623 ret = -1;
1624 if (ret >= 0) {
1625 return 0;
1626 }
1627
1628 if(s->iformat->read_timestamp)
1629 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1630 else
1631 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1632 }
1633
1634 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1635 {
1636 if(min_ts > ts || max_ts < ts)
1637 return -1;
1638
1639 av_read_frame_flush(s);
1640
1641 if (s->iformat->read_seek2)
1642 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1643
1644 if(s->iformat->read_timestamp){
1645 //try to seek via read_timestamp()
1646 }
1647
1648 //Fallback to old API if new is not implemented but old is
1649 //Note the old has somewat different sematics
1650 if(s->iformat->read_seek || 1)
1651 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1652
1653 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1654 }
1655
1656 /*******************************************************/
1657
1658 /**
1659 * Returns TRUE if the stream has accurate duration in any stream.
1660 *
1661 * @return TRUE if the stream has accurate duration for at least one component.
1662 */
1663 static int av_has_duration(AVFormatContext *ic)
1664 {
1665 int i;
1666 AVStream *st;
1667
1668 for(i = 0;i < ic->nb_streams; i++) {
1669 st = ic->streams[i];
1670 if (st->duration != AV_NOPTS_VALUE)
1671 return 1;
1672 }
1673 return 0;
1674 }
1675
1676 /**
1677 * Estimate the stream timings from the one of each components.
1678 *
1679 * Also computes the global bitrate if possible.
1680 */
1681 static void av_update_stream_timings(AVFormatContext *ic)
1682 {
1683 int64_t start_time, start_time1, end_time, end_time1;
1684 int64_t duration, duration1;
1685 int i;
1686 AVStream *st;
1687
1688 start_time = INT64_MAX;
1689 end_time = INT64_MIN;
1690 duration = INT64_MIN;
1691 for(i = 0;i < ic->nb_streams; i++) {
1692 st = ic->streams[i];
1693 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1694 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1695 if (start_time1 < start_time)
1696 start_time = start_time1;
1697 if (st->duration != AV_NOPTS_VALUE) {
1698 end_time1 = start_time1
1699 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1700 if (end_time1 > end_time)
1701 end_time = end_time1;
1702 }
1703 }
1704 if (st->duration != AV_NOPTS_VALUE) {
1705 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1706 if (duration1 > duration)
1707 duration = duration1;
1708 }
1709 }
1710 if (start_time != INT64_MAX) {
1711 ic->start_time = start_time;
1712 if (end_time != INT64_MIN) {
1713 if (end_time - start_time > duration)
1714 duration = end_time - start_time;
1715 }
1716 }
1717 if (duration != INT64_MIN) {
1718 ic->duration = duration;
1719 if (ic->file_size > 0) {
1720 /* compute the bitrate */
1721 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1722 (double)ic->duration;
1723 }
1724 }
1725 }
1726
1727 static void fill_all_stream_timings(AVFormatContext *ic)
1728 {
1729 int i;
1730 AVStream *st;
1731
1732 av_update_stream_timings(ic);
1733 for(i = 0;i < ic->nb_streams; i++) {
1734 st = ic->streams[i];
1735 if (st->start_time == AV_NOPTS_VALUE) {
1736 if(ic->start_time != AV_NOPTS_VALUE)
1737 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1738 if(ic->duration != AV_NOPTS_VALUE)
1739 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1740 }
1741 }
1742 }
1743
1744 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1745 {
1746 int64_t filesize, duration;
1747 int bit_rate, i;
1748 AVStream *st;
1749
1750 /* if bit_rate is already set, we believe it */
1751 if (ic->bit_rate == 0) {
1752 bit_rate = 0;
1753 for(i=0;i<ic->nb_streams;i++) {
1754 st = ic->streams[i];
1755 bit_rate += st->codec->bit_rate;
1756 }
1757 ic->bit_rate = bit_rate;
1758 }
1759
1760 /* if duration is already set, we believe it */
1761 if (ic->duration == AV_NOPTS_VALUE &&
1762 ic->bit_rate != 0 &&
1763 ic->file_size != 0) {
1764 filesize = ic->file_size;
1765 if (filesize > 0) {
1766 for(i = 0; i < ic->nb_streams; i++) {
1767 st = ic->streams[i];
1768 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1769 if (st->duration == AV_NOPTS_VALUE)
1770 st->duration = duration;
1771 }
1772 }
1773 }
1774 }
1775
1776 #define DURATION_MAX_READ_SIZE 250000
1777 #define DURATION_MAX_RETRY 3
1778
1779 /* only usable for MPEG-PS streams */
1780 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1781 {
1782 AVPacket pkt1, *pkt = &pkt1;
1783 AVStream *st;
1784 int read_size, i, ret;
1785 int64_t end_time, start_time[MAX_STREAMS];
1786 int64_t filesize, offset, duration;
1787 int retry=0;
1788
1789 ic->cur_st = NULL;
1790
1791 /* flush packet queue */
1792 flush_packet_queue(ic);
1793
1794 for(i=0;i<ic->nb_streams;i++) {
1795 st = ic->streams[i];
1796 if(st->start_time != AV_NOPTS_VALUE){
1797 start_time[i]= st->start_time;
1798 }else if(st->first_dts != AV_NOPTS_VALUE){
1799 start_time[i]= st->first_dts;
1800 }else
1801 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1802
1803 if (st->parser) {
1804 av_parser_close(st->parser);
1805 st->parser= NULL;
1806 av_free_packet(&st->cur_pkt);
1807 }
1808 }
1809
1810 /* estimate the end time (duration) */
1811 /* XXX: may need to support wrapping */
1812 filesize = ic->file_size;
1813 end_time = AV_NOPTS_VALUE;
1814 do{
1815 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1816 if (offset < 0)
1817 offset = 0;
1818
1819 url_fseek(ic->pb, offset, SEEK_SET);
1820 read_size = 0;
1821 for(;;) {
1822 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1823 break;
1824
1825 do{
1826 ret = av_read_packet(ic, pkt);
1827 }while(ret == AVERROR(EAGAIN));
1828 if (ret != 0)
1829 break;
1830 read_size += pkt->size;
1831 st = ic->streams[pkt->stream_index];
1832 if (pkt->pts != AV_NOPTS_VALUE &&
1833 start_time[pkt->stream_index] != AV_NOPTS_VALUE) {
1834 end_time = pkt->pts;
1835 duration = end_time - start_time[pkt->stream_index];
1836 if (duration < 0)
1837 duration += 1LL<<st->pts_wrap_bits;
1838 if (duration > 0) {
1839 if (st->duration == AV_NOPTS_VALUE ||
1840 st->duration < duration)
1841 st->duration = duration;
1842 }
1843 }
1844 av_free_packet(pkt);
1845 }
1846 }while( end_time==AV_NOPTS_VALUE
1847 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1848 && ++retry <= DURATION_MAX_RETRY);
1849
1850 fill_all_stream_timings(ic);
1851
1852 url_fseek(ic->pb, old_offset, SEEK_SET);
1853 for(i=0; i<ic->nb_streams; i++){
1854 st= ic->streams[i];
1855 st->cur_dts= st->first_dts;
1856 st->last_IP_pts = AV_NOPTS_VALUE;
1857 }
1858 }
1859
1860 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1861 {
1862 int64_t file_size;
1863
1864 /* get the file size, if possible */
1865 if (ic->iformat->flags & AVFMT_NOFILE) {
1866 file_size = 0;
1867 } else {
1868 file_size = url_fsize(ic->pb);
1869 if (file_size < 0)
1870 file_size = 0;
1871 }
1872 ic->file_size = file_size;
1873
1874 if ((!strcmp(ic->iformat->name, "mpeg") ||
1875 !strcmp(ic->iformat->name, "mpegts")) &&
1876 file_size && !url_is_streamed(ic->pb)) {
1877 /* get accurate estimate from the PTSes */
1878 av_estimate_timings_from_pts(ic, old_offset);
1879 } else if (av_has_duration(ic)) {
1880 /* at least one component has timings - we use them for all
1881 the components */
1882 fill_all_stream_timings(ic);
1883 } else {
1884 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
1885 /* less precise: use bitrate info */
1886 av_estimate_timings_from_bit_rate(ic);
1887 }
1888 av_update_stream_timings(ic);
1889
1890 #if 0
1891 {
1892 int i;
1893 AVStream *st;
1894 for(i = 0;i < ic->nb_streams; i++) {
1895 st = ic->streams[i];
1896 printf("%d: start_time: %0.3f duration: %0.3f\n",
1897 i, (double)st->start_time / AV_TIME_BASE,
1898 (double)st->duration / AV_TIME_BASE);
1899 }
1900 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1901 (double)ic->start_time / AV_TIME_BASE,
1902 (double)ic->duration / AV_TIME_BASE,
1903 ic->bit_rate / 1000);
1904 }
1905 #endif
1906 }
1907
1908 static int has_codec_parameters(AVCodecContext *enc)
1909 {
1910 int val;
1911 switch(enc->codec_type) {
1912 case CODEC_TYPE_AUDIO:
1913 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1914 if(!enc->frame_size &&
1915 (enc->codec_id == CODEC_ID_VORBIS ||
1916 enc->codec_id == CODEC_ID_AAC ||
1917 enc->codec_id == CODEC_ID_MP1 ||
1918 enc->codec_id == CODEC_ID_MP2 ||
1919 enc->codec_id == CODEC_ID_MP3 ||
1920 enc->codec_id == CODEC_ID_SPEEX))
1921 return 0;
1922 break;
1923 case CODEC_TYPE_VIDEO:
1924 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1925 break;
1926 default:
1927 val = 1;
1928 break;
1929 }
1930 return enc->codec_id != CODEC_ID_NONE && val != 0;
1931 }
1932
1933 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
1934 {
1935 int16_t *samples;
1936 AVCodec *codec;
1937 int got_picture, data_size, ret=0;
1938 AVFrame picture;
1939
1940 if(!st->codec->codec){
1941 codec = avcodec_find_decoder(st->codec->codec_id);
1942 if (!codec)
1943 return -1;
1944 ret = avcodec_open(st->codec, codec);
1945 if (ret < 0)
1946 return ret;
1947 }
1948
1949 if(!has_codec_parameters(st->codec)){
1950 switch(st->codec->codec_type) {
1951 case CODEC_TYPE_VIDEO:
1952 avcodec_get_frame_defaults(&picture);
1953 ret = avcodec_decode_video2(st->codec, &picture,
1954 &got_picture, avpkt);
1955 break;
1956 case CODEC_TYPE_AUDIO:
1957 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1958 samples = av_malloc(data_size);
1959 if (!samples)
1960 goto fail;
1961 ret = avcodec_decode_audio3(st->codec, samples,
1962 &data_size, avpkt);
1963 av_free(samples);
1964 break;
1965 default:
1966 break;
1967 }
1968 }
1969 fail:
1970 return ret;
1971 }
1972
1973 unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
1974 {
1975 while (tags->id != CODEC_ID_NONE) {
1976 if (tags->id == id)
1977 return tags->tag;
1978 tags++;
1979 }
1980 return 0;
1981 }
1982
1983 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1984 {
1985 int i;
1986 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1987 if(tag == tags[i].tag)
1988 return tags[i].id;
1989 }
1990 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1991 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1992 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1993 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1994 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1995 return tags[i].id;
1996 }
1997 return CODEC_ID_NONE;
1998 }
1999
2000 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2001 {
2002 int i;
2003 for(i=0; tags && tags[i]; i++){
2004 int tag= ff_codec_get_tag(tags[i], id);
2005 if(tag) return tag;
2006 }
2007 return 0;
2008 }
2009
2010 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2011 {
2012 int i;
2013 for(i=0; tags && tags[i]; i++){
2014 enum CodecID id= ff_codec_get_id(tags[i], tag);
2015 if(id!=CODEC_ID_NONE) return id;
2016 }
2017 return CODEC_ID_NONE;
2018 }
2019
2020 static void compute_chapters_end(AVFormatContext *s)
2021 {
2022 unsigned int i;
2023
2024 for (i=0; i+1<s->nb_chapters; i++)
2025 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2026 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
2027 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
2028 s->chapters[i]->end = s->chapters[i+1]->start;
2029 }
2030
2031 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
2032 assert(s->start_time != AV_NOPTS_VALUE);
2033 assert(s->duration > 0);
2034 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
2035 AV_TIME_BASE_Q,
2036 s->chapters[i]->time_base);
2037 }
2038 }
2039
2040 #define MAX_STD_TIMEBASES (60*12+5)
2041 static int get_std_framerate(int i){
2042 if(i<60*12) return i*1001;
2043 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2044 }
2045
2046 /*
2047 * Is the time base unreliable.
2048 * This is a heuristic to balance between quick acceptance of the values in
2049 * the headers vs. some extra checks.
2050 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2051 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2052 * And there are "variable" fps files this needs to detect as well.
2053 */
2054 static int tb_unreliable(AVCodecContext *c){
2055 if( c->time_base.den >= 101L*c->time_base.num
2056 || c->time_base.den < 5L*c->time_base.num
2057 /* || c->codec_tag == AV_RL32("DIVX")
2058 || c->codec_tag == AV_RL32("XVID")*/
2059 || c->codec_id == CODEC_ID_MPEG2VIDEO
2060 || c->codec_id == CODEC_ID_H264
2061 )
2062 return 1;
2063 return 0;
2064 }
2065
2066 int av_find_stream_info(AVFormatContext *ic)
2067 {
2068 int i, count, ret, read_size, j;
2069 AVStream *st;
2070 AVPacket pkt1, *pkt;
2071 int64_t last_dts[MAX_STREAMS];
2072 int64_t duration_gcd[MAX_STREAMS]={0};
2073 int duration_count[MAX_STREAMS]={0};
2074 double (*duration_error)[MAX_STD_TIMEBASES];
2075 int64_t old_offset = url_ftell(ic->pb);
2076 int64_t codec_info_duration[MAX_STREAMS]={0};
2077 int codec_info_nb_frames[MAX_STREAMS]={0};
2078
2079 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2080 if (!duration_error) return AVERROR(ENOMEM);
2081
2082 for(i=0;i<ic->nb_streams;i++) {
2083 st = ic->streams[i];
2084 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2085 /* if(!st->time_base.num)
2086 st->time_base= */
2087 if(!st->codec->time_base.num)
2088 st->codec->time_base= st->time_base;
2089 }
2090 //only for the split stuff
2091 if (!st->parser) {
2092 st->parser = av_parser_init(st->codec->codec_id);
2093 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2094 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2095 }
2096 }
2097 assert(!st->codec->codec);
2098 //try to just open decoders, in case this is enough to get parameters
2099 if(!has_codec_parameters(st->codec)){
2100 AVCodec *codec = avcodec_find_decoder(st->codec->codec_id);
2101 if (codec)
2102 avcodec_open(st->codec, codec);
2103 }
2104 }
2105
2106 for(i=0;i<MAX_STREAMS;i++){
2107 last_dts[i]= AV_NOPTS_VALUE;
2108 }
2109
2110 count = 0;
2111 read_size = 0;
2112 for(;;) {
2113 if(url_interrupt_cb()){
2114 ret= AVERROR(EINTR);
2115 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2116 break;
2117 }
2118
2119 /* check if one codec still needs to be handled */
2120 for(i=0;i<ic->nb_streams;i++) {
2121 st = ic->streams[i];
2122 if (!has_codec_parameters(st->codec))
2123 break;
2124 /* variable fps and no guess at the real fps */
2125 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2126 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2127 break;
2128 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2129 break;
2130 if(st->first_dts == AV_NOPTS_VALUE)
2131 break;
2132 }
2133 if (i == ic->nb_streams) {
2134 /* NOTE: if the format has no header, then we need to read
2135 some packets to get most of the streams, so we cannot
2136 stop here */
2137 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2138 /* if we found the info for all the codecs, we can stop */
2139 ret = count;
2140 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2141 break;
2142 }
2143 }
2144 /* we did not get all the codec info, but we read too much data */
2145 if (read_size >= ic->probesize) {
2146 ret = count;
2147 av_log(ic, AV_LOG_WARNING, "MAX_READ_SIZE:%d reached\n", ic->probesize);
2148 break;
2149 }
2150
2151 /* NOTE: a new stream can be added there if no header in file
2152 (AVFMTCTX_NOHEADER) */
2153 ret = av_read_frame_internal(ic, &pkt1);
2154 if(ret == AVERROR(EAGAIN))
2155 continue;
2156 if (ret < 0) {
2157 /* EOF or error */
2158 ret = -1; /* we could not have all the codec parameters before EOF */
2159 for(i=0;i<ic->nb_streams;i++) {
2160 st = ic->streams[i];
2161 if (!has_codec_parameters(st->codec)){
2162 char buf[256];
2163 avcodec_string(buf, sizeof(buf), st->codec, 0);
2164 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2165 } else {
2166 ret = 0;
2167 }
2168 }
2169 break;
2170 }
2171
2172 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2173 if(av_dup_packet(pkt) < 0) {
2174 av_free(duration_error);
2175 return AVERROR(ENOMEM);
2176 }
2177
2178 read_size += pkt->size;
2179
2180 st = ic->streams[pkt->stream_index];
2181 if(codec_info_nb_frames[st->index]>1) {
2182 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
2183 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2184 break;
2185 }
2186 codec_info_duration[st->index] += pkt->duration;
2187 }
2188 if (pkt->duration != 0)
2189 codec_info_nb_frames[st->index]++;
2190
2191 {
2192 int index= pkt->stream_index;
2193 int64_t last= last_dts[index];
2194 int64_t duration= pkt->dts - last;
2195
2196 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2197 double dur= duration * av_q2d(st->time_base);
2198
2199 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2200 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2201 if(duration_count[index] < 2)
2202 memset(duration_error[index], 0, sizeof(*duration_error));
2203 for(i=1; i<MAX_STD_TIMEBASES; i++){
2204 int framerate= get_std_framerate(i);
2205 int ticks= lrintf(dur*framerate/(1001*12));
2206 double error= dur - ticks*1001*12/(double)framerate;
2207 duration_error[index][i] += error*error;
2208 }
2209 duration_count[index]++;
2210 // ignore the first 4 values, they might have some random jitter
2211 if (duration_count[index] > 3)
2212 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2213 }
2214 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2215 last_dts[pkt->stream_index]= pkt->dts;
2216 }
2217 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2218 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2219 if(i){
2220 st->codec->extradata_size= i;
2221 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2222 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2223 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2224 }
2225 }
2226
2227 /* if still no information, we try to open the codec and to
2228 decompress the frame. We try to avoid that in most cases as
2229 it takes longer and uses more memory. For MPEG-4, we need to
2230 decompress for QuickTime. */
2231 if (!has_codec_parameters(st->codec))
2232 try_decode_frame(st, pkt);
2233
2234 count++;
2235 }
2236
2237 // close codecs which were opened in try_decode_frame()
2238 for(i=0;i<ic->nb_streams;i++) {
2239 st = ic->streams[i];
2240 if(st->codec->codec)
2241 avcodec_close(st->codec);
2242 }
2243 for(i=0;i<ic->nb_streams;i++) {
2244 st = ic->streams[i];
2245 if(codec_info_nb_frames[i]>2 && !st->avg_frame_rate.num)
2246 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2247 (codec_info_nb_frames[i]-2)*(int64_t)st->time_base.den,
2248 codec_info_duration[i] *(int64_t)st->time_base.num, 60000);
2249 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2250 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2251 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2252
2253 // the check for tb_unreliable() is not completely correct, since this is not about handling
2254 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2255 // ipmovie.c produces.
2256 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1 && !st->r_frame_rate.num)
2257 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2258 if(duration_count[i] && !st->r_frame_rate.num
2259 && tb_unreliable(st->codec) /*&&
2260 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2261 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2262 int num = 0;
2263 double best_error= 2*av_q2d(st->time_base);
2264 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2265
2266 for(j=1; j<MAX_STD_TIMEBASES; j++){
2267 double error= duration_error[i][j] * get_std_framerate(j);
2268 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2269 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2270 if(error < best_error){
2271 best_error= error;
2272 num = get_std_framerate(j);
2273 }
2274 }
2275 // do not increase frame rate by more than 1 % in order to match a standard rate.
2276 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2277 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2278 }
2279
2280 if (!st->r_frame_rate.num){
2281 if( st->codec->time_base.den * (int64_t)st->time_base.num
2282 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2283 st->r_frame_rate.num = st->codec->time_base.den;
2284 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2285 }else{
2286 st->r_frame_rate.num = st->time_base.den;
2287 st->r_frame_rate.den = st->time_base.num;
2288 }
2289 }
2290 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2291 if(!st->codec->bits_per_coded_sample)
2292 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2293 }
2294 }
2295
2296 av_estimate_timings(ic, old_offset);
2297
2298 compute_chapters_end(ic);
2299
2300 #if 0
2301 /* correct DTS for B-frame streams with no timestamps */
2302 for(i=0;i<ic->nb_streams;i++) {
2303 st = ic->streams[i];
2304 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2305 if(b-frames){
2306 ppktl = &ic->packet_buffer;
2307 while(ppkt1){
2308 if(ppkt1->stream_index != i)
2309 continue;
2310 if(ppkt1->pkt->dts < 0)
2311 break;
2312 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2313 break;
2314 ppkt1->pkt->dts -= delta;
2315 ppkt1= ppkt1->next;
2316 }
2317 if(ppkt1)
2318 continue;
2319 st->cur_dts -= delta;
2320 }
2321 }
2322 }
2323 #endif
2324
2325 av_free(duration_error);
2326
2327 return ret;
2328 }
2329
2330 /*******************************************************/
2331
2332 int av_read_play(AVFormatContext *s)
2333 {
2334 if (s->iformat->read_play)
2335 return s->iformat->read_play(s);
2336 if (s->pb)
2337 return av_url_read_fpause(s->pb, 0);
2338 return AVERROR(ENOSYS);
2339 }
2340
2341 int av_read_pause(AVFormatContext *s)
2342 {
2343 if (s->iformat->read_pause)
2344 return s->iformat->read_pause(s);
2345 if (s->pb)
2346 return av_url_read_fpause(s->pb, 1);
2347 return AVERROR(ENOSYS);
2348 }
2349
2350 void av_close_input_stream(AVFormatContext *s)
2351 {
2352 int i;
2353 AVStream *st;
2354
2355 if (s->iformat->read_close)
2356 s->iformat->read_close(s);
2357 for(i=0;i<s->nb_streams;i++) {
2358 /* free all data in a stream component */
2359 st = s->streams[i];
2360 if (st->parser) {
2361 av_parser_close(st->parser);
2362 av_free_packet(&st->cur_pkt);
2363 }
2364 av_metadata_free(&st->metadata);
2365 av_free(st->index_entries);
2366 av_free(st->codec->extradata);
2367 av_free(st->codec);
2368 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2369 av_free(st->filename);
2370 #endif
2371 av_free(st->priv_data);
2372 av_free(st);
2373 }
2374 for(i=s->nb_programs-1; i>=0; i--) {
2375 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2376 av_freep(&s->programs[i]->provider_name);
2377 av_freep(&s->programs[i]->name);
2378 #endif
2379 av_metadata_free(&s->programs[i]->metadata);
2380 av_freep(&s->programs[i]->stream_index);
2381 av_freep(&s->programs[i]);
2382 }
2383 av_freep(&s->programs);
2384 flush_packet_queue(s);
2385 av_freep(&s->priv_data);
2386 while(s->nb_chapters--) {
2387 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2388 av_free(s->chapters[s->nb_chapters]->title);
2389 #endif
2390 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2391 av_free(s->chapters[s->nb_chapters]);
2392 }
2393 av_freep(&s->chapters);
2394 av_metadata_free(&s->metadata);
2395 av_free(s);
2396 }
2397
2398 void av_close_input_file(AVFormatContext *s)
2399 {
2400 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2401 av_close_input_stream(s);
2402 if (pb)
2403 url_fclose(pb);
2404 }
2405
2406 AVStream *av_new_stream(AVFormatContext *s, int id)
2407 {
2408 AVStream *st;
2409 int i;
2410
2411 if (s->nb_streams >= MAX_STREAMS)
2412 return NULL;
2413
2414 st = av_mallocz(sizeof(AVStream));
2415 if (!st)
2416 return NULL;
2417
2418 st->codec= avcodec_alloc_context();
2419 if (s->iformat) {
2420 /* no default bitrate if decoding */
2421 st->codec->bit_rate = 0;
2422 }
2423 st->index = s->nb_streams;
2424 st->id = id;
2425 st->start_time = AV_NOPTS_VALUE;
2426 st->duration = AV_NOPTS_VALUE;
2427 /* we set the current DTS to 0 so that formats without any timestamps
2428 but durations get some timestamps, formats with some unknown
2429 timestamps have their first few packets buffered and the
2430 timestamps corrected before they are returned to the user */
2431 st->cur_dts = 0;
2432 st->first_dts = AV_NOPTS_VALUE;
2433 st->probe_packets = MAX_PROBE_PACKETS;
2434
2435 /* default pts setting is MPEG-like */
2436 av_set_pts_info(st, 33, 1, 90000);
2437 st->last_IP_pts = AV_NOPTS_VALUE;
2438 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2439 st->pts_buffer[i]= AV_NOPTS_VALUE;
2440 st->reference_dts = AV_NOPTS_VALUE;
2441
2442 st->sample_aspect_ratio = (AVRational){0,1};
2443
2444 s->streams[s->nb_streams++] = st;
2445 return st;
2446 }
2447
2448 AVProgram *av_new_program(AVFormatContext *ac, int id)
2449 {
2450 AVProgram *program=NULL;
2451 int i;
2452
2453 #ifdef DEBUG_SI
2454 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2455 #endif
2456
2457 for(i=0; i<ac->nb_programs; i++)
2458 if(ac->programs[i]->id == id)
2459 program = ac->programs[i];
2460
2461 if(!program){
2462 program = av_mallocz(sizeof(AVProgram));
2463 if (!program)
2464 return NULL;
2465 dynarray_add(&ac->programs, &ac->nb_programs, program);
2466 program->discard = AVDISCARD_NONE;
2467 }
2468 program->id = id;
2469
2470 return program;
2471 }
2472
2473 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2474 {
2475 AVChapter *chapter = NULL;
2476 int i;
2477
2478 for(i=0; i<s->nb_chapters; i++)
2479 if(s->chapters[i]->id == id)
2480 chapter = s->chapters[i];
2481
2482 if(!chapter){
2483 chapter= av_mallocz(sizeof(AVChapter));
2484 if(!chapter)
2485 return NULL;
2486 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2487 }
2488 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2489 av_free(chapter->title);
2490 #endif
2491 av_metadata_set(&chapter->metadata, "title", title);
2492 chapter->id = id;
2493 chapter->time_base= time_base;
2494 chapter->start = start;
2495 chapter->end = end;
2496
2497 return chapter;
2498 }
2499
2500 /************************************************************/
2501 /* output media file */
2502
2503 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2504 {
2505 int ret;
2506
2507 if (s->oformat->priv_data_size > 0) {
2508 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2509 if (!s->priv_data)
2510 return AVERROR(ENOMEM);
2511 } else
2512 s->priv_data = NULL;
2513
2514 if (s->oformat->set_parameters) {
2515 ret = s->oformat->set_parameters(s, ap);
2516 if (ret < 0)
2517 return ret;
2518 }
2519 return 0;
2520 }
2521
2522 int av_write_header(AVFormatContext *s)
2523 {
2524 int ret, i;
2525 AVStream *st;
2526
2527 // some sanity checks
2528 if (s->nb_streams == 0) {
2529 av_log(s, AV_LOG_ERROR, "no streams\n");
2530 return -1;
2531 }
2532
2533 for(i=0;i<s->nb_streams;i++) {
2534 st = s->streams[i];
2535
2536 switch (st->codec->codec_type) {
2537 case CODEC_TYPE_AUDIO:
2538 if(st->codec->sample_rate<=0){
2539 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2540 return -1;
2541 }
2542 if(!st->codec->block_align)
2543 st->codec->block_align = st->codec->channels *
2544 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2545 break;
2546 case CODEC_TYPE_VIDEO:
2547 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2548 av_log(s, AV_LOG_ERROR, "time base not set\n");
2549 return -1;
2550 }
2551 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2552 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2553 return -1;
2554 }
2555 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2556 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2557 return -1;
2558 }
2559 break;
2560 }
2561
2562 if(s->oformat->codec_tag){
2563 if(st->codec->codec_tag){
2564 //FIXME
2565 //check that tag + id is in the table
2566 //if neither is in the table -> OK
2567 //if tag is in the table with another id -> FAIL
2568 //if id is in the table with another tag -> FAIL unless strict < ?
2569 }else
2570 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2571 }
2572
2573 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2574 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2575 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2576 }
2577
2578 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2579 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2580 if (!s->priv_data)
2581 return AVERROR(ENOMEM);
2582 }
2583
2584 #if LIBAVFORMAT_VERSION_MAJOR < 53
2585 ff_metadata_mux_compat(s);
2586 #endif
2587
2588 /* set muxer identification string */
2589 if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2590 AVMetadata *m;
2591 AVMetadataTag *t;
2592
2593 if (!(m = av_mallocz(sizeof(AVMetadata))))
2594 return AVERROR(ENOMEM);
2595 av_metadata_set2(&m, "encoder", LIBAVFORMAT_IDENT, 0);
2596 metadata_conv(&m, s->oformat->metadata_conv, NULL);
2597 if ((t = av_metadata_get(m, "", NULL, AV_METADATA_IGNORE_SUFFIX)))
2598 av_metadata_set2(&s->metadata, t->key, t->value, 0);
2599 av_metadata_free(&m);
2600 }
2601
2602 if(s->oformat->write_header){
2603 ret = s->oformat->write_header(s);
2604 if (ret < 0)
2605 return ret;
2606 }
2607
2608 /* init PTS generation */
2609 for(i=0;i<s->nb_streams;i++) {
2610 int64_t den = AV_NOPTS_VALUE;
2611 st = s->streams[i];
2612
2613 switch (st->codec->codec_type) {
2614 case CODEC_TYPE_AUDIO:
2615 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2616 break;
2617 case CODEC_TYPE_VIDEO:
2618 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2619 break;
2620 default:
2621 break;
2622 }
2623 if (den != AV_NOPTS_VALUE) {
2624 if (den <= 0)
2625 return AVERROR_INVALIDDATA;
2626 av_frac_init(&st->pts, 0, 0, den);
2627 }
2628 }
2629 return 0;
2630 }
2631
2632 //FIXME merge with compute_pkt_fields
2633 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2634 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2635 int num, den, frame_size, i;
2636
2637 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2638
2639 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2640 return -1;*/
2641
2642 /* duration field */
2643 if (pkt->duration == 0) {
2644 compute_frame_duration(&num, &den, st, NULL, pkt);
2645 if (den && num) {
2646 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2647 }
2648 }
2649
2650 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2651 pkt->pts= pkt->dts;
2652
2653 //XXX/FIXME this is a temporary hack until all encoders output pts
2654 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2655 pkt->dts=
2656 // pkt->pts= st->cur_dts;
2657 pkt->pts= st->pts.val;
2658 }
2659
2660 //calculate dts from pts
2661 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2662 st->pts_buffer[0]= pkt->pts;
2663 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2664 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2665 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2666 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2667
2668 pkt->dts= st->pts_buffer[0];
2669 }
2670
2671 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2672 av_log(s, AV_LOG_ERROR,
2673 "st:%d error, non monotone timestamps %"PRId64" >= %"PRId64"\n",
2674 st->index, st->cur_dts, pkt->dts);
2675 return -1;
2676 }
2677 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2678 av_log(s, AV_LOG_ERROR, "st:%d error, pts < dts\n", st->index);
2679 return -1;
2680 }
2681
2682 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2683 st->cur_dts= pkt->dts;
2684 st->pts.val= pkt->dts;
2685
2686 /* update pts */
2687 switch (st->codec->codec_type) {
2688 case CODEC_TYPE_AUDIO:
2689 frame_size = get_audio_frame_size(st->codec, pkt->size);
2690
2691 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2692 likely equal to the encoder delay, but it would be better if we
2693 had the real timestamps from the encoder */
2694 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2695 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2696 }
2697 break;
2698 case CODEC_TYPE_VIDEO:
2699 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2700 break;
2701 default:
2702 break;
2703 }
2704 return 0;
2705 }
2706
2707 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2708 {
2709 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2710
2711 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2712 return ret;
2713
2714 ret= s->oformat->write_packet(s, pkt);
2715 if(!ret)
2716 ret= url_ferror(s->pb);
2717 return ret;
2718 }
2719
2720 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2721 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2722 {
2723 AVPacketList **next_point, *this_pktl;
2724
2725 this_pktl = av_mallocz(sizeof(AVPacketList));
2726 this_pktl->pkt= *pkt;
2727 pkt->destruct= NULL; // do not free original but only the copy
2728 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2729
2730 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
2731 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
2732 }else
2733 next_point = &s->packet_buffer;
2734
2735 if(*next_point){
2736 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
2737 while(!compare(s, &(*next_point)->pkt, pkt)){
2738 next_point= &(*next_point)->next;
2739 }
2740 goto next_non_null;
2741 }else{
2742 next_point = &(s->packet_buffer_end->next);
2743 }
2744 }
2745 assert(!*next_point);
2746
2747 s->packet_buffer_end= this_pktl;
2748 next_non_null:
2749
2750 this_pktl->next= *next_point;
2751
2752 s->streams[pkt->stream_index]->last_in_packet_buffer=
2753 *next_point= this_pktl;
2754 }
2755
2756 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2757 {
2758 AVStream *st = s->streams[ pkt ->stream_index];
2759 AVStream *st2= s->streams[ next->stream_index];
2760 int64_t a= st2->time_base.num * (int64_t)st ->time_base.den;
2761 int64_t b= st ->time_base.num * (int64_t)st2->time_base.den;
2762 return av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN) < next->dts;
2763 }
2764
2765 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2766 AVPacketList *pktl;
2767 int stream_count=0;
2768 int i;
2769
2770 if(pkt){
2771 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2772 }
2773
2774 for(i=0; i < s->nb_streams; i++)
2775 stream_count+= !!s->streams[i]->last_in_packet_buffer;
2776
2777 if(stream_count && (s->nb_streams == stream_count || flush)){
2778 pktl= s->packet_buffer;
2779 *out= pktl->pkt;
2780
2781 s->packet_buffer= pktl->next;
2782 if(!s->packet_buffer)
2783 s->packet_buffer_end= NULL;
2784
2785 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
2786 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
2787 av_freep(&pktl);
2788 return 1;
2789 }else{
2790 av_init_packet(out);
2791 return 0;
2792 }
2793 }
2794
2795 /**
2796 * Interleaves an AVPacket correctly so it can be muxed.
2797 * @param out the interleaved packet will be output here
2798 * @param in the input packet
2799 * @param flush 1 if no further packets are available as input and all
2800 * remaining packets should be output
2801 * @return 1 if a packet was output, 0 if no packet could be output,
2802 * < 0 if an error occurred
2803 */
2804 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2805 if(s->oformat->interleave_packet)
2806 return s->oformat->interleave_packet(s, out, in, flush);
2807 else
2808 return av_interleave_packet_per_dts(s, out, in, flush);
2809 }
2810
2811 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2812 AVStream *st= s->streams[ pkt->stream_index];
2813
2814 //FIXME/XXX/HACK drop zero sized packets
2815 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2816 return 0;
2817
2818 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2819 if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2820 return -1;
2821
2822 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2823 return -1;
2824
2825 for(;;){
2826 AVPacket opkt;
2827 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2828 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2829 return ret;
2830
2831 ret= s->oformat->write_packet(s, &opkt);
2832
2833 av_free_packet(&opkt);
2834 pkt= NULL;
2835
2836 if(ret<0)
2837 return ret;
2838 if(url_ferror(s->pb))
2839 return url_ferror(s->pb);
2840 }
2841 }
2842
2843 int av_write_trailer(AVFormatContext *s)
2844 {
2845 int ret, i;
2846
2847 for(;;){
2848 AVPacket pkt;
2849 ret= av_interleave_packet(s, &pkt, NULL, 1);
2850 if(ret<0) //FIXME cleanup needed for ret<0 ?
2851 goto fail;
2852 if(!ret)
2853 break;
2854
2855 ret= s->oformat->write_packet(s, &pkt);
2856
2857 av_free_packet(&pkt);
2858
2859 if(ret<0)
2860 goto fail;
2861 if(url_ferror(s->pb))
2862 goto fail;
2863 }
2864
2865 if(s->oformat->write_trailer)
2866 ret = s->oformat->write_trailer(s);
2867 fail:
2868 if(ret == 0)
2869 ret=url_ferror(s->pb);
2870 for(i=0;i<s->nb_streams;i++)
2871 av_freep(&s->streams[i]->priv_data);
2872 av_freep(&s->priv_data);
2873 return ret;
2874 }
2875
2876 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2877 {
2878 int i, j;
2879 AVProgram *program=NULL;
2880 void *tmp;
2881
2882 if (idx >= ac->nb_streams) {
2883 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2884 return;
2885 }
2886
2887 for(i=0; i<ac->nb_programs; i++){
2888 if(ac->programs[i]->id != progid)
2889 continue;
2890 program = ac->programs[i];
2891 for(j=0; j<program->nb_stream_indexes; j++)
2892 if(program->stream_index[j] == idx)
2893 return;
2894
2895 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2896 if(!tmp)
2897 return;
2898 program->stream_index = tmp;
2899 program->stream_index[program->nb_stream_indexes++] = idx;
2900 return;
2901 }
2902 }
2903
2904 static void print_fps(double d, const char *postfix){
2905 uint64_t v= lrintf(d*100);
2906 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2907 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2908 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2909 }
2910
2911 static void dump_metadata(void *ctx, AVMetadata *m, const char *indent)
2912 {
2913 if(m && !(m->count == 1 && av_metadata_get(m, "language", NULL, 0))){
2914 AVMetadataTag *tag=NULL;
2915
2916 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
2917 while((tag=av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
2918 if(strcmp("language", tag->key))
2919 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
2920 }
2921 }
2922 }
2923
2924 /* "user interface" functions */
2925 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2926 {
2927 char buf[256];
2928 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2929 AVStream *st = ic->streams[i];
2930 int g = av_gcd(st->time_base.num, st->time_base.den);
2931 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2932 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2933 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2934 /* the pid is an important information, so we display it */
2935 /* XXX: add a generic system */
2936 if (flags & AVFMT_SHOW_IDS)
2937 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2938 if (lang)
2939 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2940 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2941 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2942 if (st->sample_aspect_ratio.num && // default
2943 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2944 AVRational display_aspect_ratio;
2945 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2946 st->codec->width*st->sample_aspect_ratio.num,
2947 st->codec->height*st->sample_aspect_ratio.den,
2948 1024*1024);
2949 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2950 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2951 display_aspect_ratio.num, display_aspect_ratio.den);
2952 }
2953 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2954 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
2955 print_fps(av_q2d(st->avg_frame_rate), "fps");
2956 if(st->r_frame_rate.den && st->r_frame_rate.num)
2957 print_fps(av_q2d(st->r_frame_rate), "tbr");
2958 if(st->time_base.den && st->time_base.num)
2959 print_fps(1/av_q2d(st->time_base), "tbn");
2960 if(st->codec->time_base.den && st->codec->time_base.num)
2961 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2962 }
2963 av_log(NULL, AV_LOG_INFO, "\n");
2964 dump_metadata(NULL, st->metadata, " ");
2965 }
2966
2967 void dump_format(AVFormatContext *ic,
2968 int index,
2969 const char *url,
2970 int is_output)
2971 {
2972 int i;
2973 uint8_t *printed = av_mallocz(ic->nb_streams);
2974 if (ic->nb_streams && !printed)
2975 return;
2976
2977 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2978 is_output ? "Output" : "Input",
2979 index,
2980 is_output ? ic->oformat->name : ic->iformat->name,
2981 is_output ? "to" : "from", url);
2982 dump_metadata(NULL, ic->metadata, " ");
2983 if (!is_output) {
2984 av_log(NULL, AV_LOG_INFO, " Duration: ");
2985 if (ic->duration != AV_NOPTS_VALUE) {
2986 int hours, mins, secs, us;
2987 secs = ic->duration / AV_TIME_BASE;
2988 us = ic->duration % AV_TIME_BASE;
2989 mins = secs / 60;
2990 secs %= 60;
2991 hours = mins / 60;
2992 mins %= 60;
2993 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2994 (100 * us) / AV_TIME_BASE);
2995 } else {
2996 av_log(NULL, AV_LOG_INFO, "N/A");
2997 }
2998 if (ic->start_time != AV_NOPTS_VALUE) {
2999 int secs, us;
3000 av_log(NULL, AV_LOG_INFO, ", start: ");
3001 secs = ic->start_time / AV_TIME_BASE;
3002 us = ic->start_time % AV_TIME_BASE;
3003 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3004 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3005 }
3006 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3007 if (ic->bit_rate) {
3008 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3009 } else {
3010 av_log(NULL, AV_LOG_INFO, "N/A");
3011 }
3012 av_log(NULL, AV_LOG_INFO, "\n");
3013 }
3014 if(ic->nb_programs) {
3015 int j, k, total = 0;
3016 for(j=0; j<ic->nb_programs; j++) {
3017 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
3018 "name", NULL, 0);
3019 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3020 name ? name->value : "");
3021 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3022 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3023 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3024 printed[ic->programs[j]->stream_index[k]] = 1;
3025 }
3026 total += ic->programs[j]->nb_stream_indexes;
3027 }
3028 if (total < ic->nb_streams)
3029 av_log(NULL, AV_LOG_INFO, " No Program\n");
3030 }
3031 for(i=0;i<ic->nb_streams;i++)
3032 if (!printed[i])
3033 dump_stream_format(ic, i, index, is_output);
3034
3035 av_free(printed);
3036 }
3037
3038 #if LIBAVFORMAT_VERSION_MAJOR < 53
3039 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
3040 {
3041 return av_parse_video_frame_size(width_ptr, height_ptr, str);
3042 }
3043
3044 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
3045 {
3046 AVRational frame_rate;
3047 int ret = av_parse_video_frame_rate(&frame_rate, arg);
3048 *frame_rate_num= frame_rate.num;
3049 *frame_rate_den= frame_rate.den;
3050 return ret;
3051 }
3052 #endif
3053
3054 int64_t av_gettime(void)
3055 {
3056 struct timeval tv;
3057 gettimeofday(&tv,NULL);
3058 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3059 }
3060
3061 int64_t parse_date(const char *datestr, int duration)
3062 {
3063 const char *p;
3064 int64_t t;
3065 struct tm dt;
3066 int i;
3067 static const char * const date_fmt[] = {
3068 "%Y-%m-%d",
3069 "%Y%m%d",
3070 };
3071 static const char * const time_fmt[] = {
3072 "%H:%M:%S",
3073 "%H%M%S",
3074 };
3075 const char *q;
3076 int is_utc, len;
3077 char lastch;
3078 int negative = 0;
3079
3080 #undef time
3081 time_t now = time(0);
3082
3083 len = strlen(datestr);
3084 if (len > 0)
3085 lastch = datestr[len - 1];
3086 else
3087 lastch = '\0';
3088 is_utc = (lastch == 'z' || lastch == 'Z');
3089
3090 memset(&dt, 0, sizeof(dt));
3091
3092 p = datestr;
3093 q = NULL;
3094 if (!duration) {
3095 if (!strncasecmp(datestr, "now", len))
3096 return (int64_t) now * 1000000;
3097
3098 /* parse the year-month-day part */
3099 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
3100 q = small_strptime(p, date_fmt[i], &dt);
3101 if (q) {
3102 break;
3103 }
3104 }
3105