Fix mem leak when user preallocates an AVFormatContext, passes it to
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "internal.h"
23 #include "libavcodec/opt.h"
24 #include "metadata.h"
25 #include "libavutil/avstring.h"
26 #include "riff.h"
27 #include <sys/time.h>
28 #include <time.h>
29 #include <strings.h>
30
31 #undef NDEBUG
32 #include <assert.h>
33
34 /**
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
37 */
38
39 unsigned avformat_version(void)
40 {
41 return LIBAVFORMAT_VERSION_INT;
42 }
43
44 /* fraction handling */
45
46 /**
47 * f = val + (num / den) + 0.5.
48 *
49 * 'num' is normalized so that it is such as 0 <= num < den.
50 *
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
55 */
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
57 {
58 num += (den >> 1);
59 if (num >= den) {
60 val += num / den;
61 num = num % den;
62 }
63 f->val = val;
64 f->num = num;
65 f->den = den;
66 }
67
68 /**
69 * Fractional addition to f: f = f + (incr / f->den).
70 *
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
73 */
74 static void av_frac_add(AVFrac *f, int64_t incr)
75 {
76 int64_t num, den;
77
78 num = f->num + incr;
79 den = f->den;
80 if (num < 0) {
81 f->val += num / den;
82 num = num % den;
83 if (num < 0) {
84 num += den;
85 f->val--;
86 }
87 } else if (num >= den) {
88 f->val += num / den;
89 num = num % den;
90 }
91 f->num = num;
92 }
93
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
98
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
100 {
101 if(f) return f->next;
102 else return first_iformat;
103 }
104
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
106 {
107 if(f) return f->next;
108 else return first_oformat;
109 }
110
111 void av_register_input_format(AVInputFormat *format)
112 {
113 AVInputFormat **p;
114 p = &first_iformat;
115 while (*p != NULL) p = &(*p)->next;
116 *p = format;
117 format->next = NULL;
118 }
119
120 void av_register_output_format(AVOutputFormat *format)
121 {
122 AVOutputFormat **p;
123 p = &first_oformat;
124 while (*p != NULL) p = &(*p)->next;
125 *p = format;
126 format->next = NULL;
127 }
128
129 int match_ext(const char *filename, const char *extensions)
130 {
131 const char *ext, *p;
132 char ext1[32], *q;
133
134 if(!filename)
135 return 0;
136
137 ext = strrchr(filename, '.');
138 if (ext) {
139 ext++;
140 p = extensions;
141 for(;;) {
142 q = ext1;
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
144 *q++ = *p++;
145 *q = '\0';
146 if (!strcasecmp(ext1, ext))
147 return 1;
148 if (*p == '\0')
149 break;
150 p++;
151 }
152 }
153 return 0;
154 }
155
156 static int match_format(const char *name, const char *names)
157 {
158 const char *p;
159 int len, namelen;
160
161 if (!name || !names)
162 return 0;
163
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
168 return 1;
169 names = p+1;
170 }
171 return !strcasecmp(name, names);
172 }
173
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
176 {
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
179
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
186 }
187 #endif
188 /* Find the proper file type. */
189 fmt_found = NULL;
190 score_max = 0;
191 fmt = first_oformat;
192 while (fmt != NULL) {
193 score = 0;
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
195 score += 100;
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
197 score += 10;
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
200 score += 5;
201 }
202 if (score > score_max) {
203 score_max = score;
204 fmt_found = fmt;
205 }
206 fmt = fmt->next;
207 }
208 return fmt_found;
209 }
210
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
213 {
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
215
216 if (fmt) {
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
219
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
222
223 if (stream_fmt)
224 fmt = stream_fmt;
225 }
226
227 return fmt;
228 }
229
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
234
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
238 }
239 #endif
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
242 return codec_id;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
245 else
246 return CODEC_ID_NONE;
247 }
248
249 AVInputFormat *av_find_input_format(const char *short_name)
250 {
251 AVInputFormat *fmt;
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
254 return fmt;
255 }
256 return NULL;
257 }
258
259 /* memory handling */
260
261
262 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
263 {
264 int ret= av_new_packet(pkt, size);
265
266 if(ret<0)
267 return ret;
268
269 pkt->pos= url_ftell(s);
270
271 ret= get_buffer(s, pkt->data, size);
272 if(ret<=0)
273 av_free_packet(pkt);
274 else
275 av_shrink_packet(pkt, ret);
276
277 return ret;
278 }
279
280
281 int av_filename_number_test(const char *filename)
282 {
283 char buf[1024];
284 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
285 }
286
287 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
288 {
289 AVInputFormat *fmt1, *fmt;
290 int score;
291
292 fmt = NULL;
293 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
294 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
295 continue;
296 score = 0;
297 if (fmt1->read_probe) {
298 score = fmt1->read_probe(pd);
299 } else if (fmt1->extensions) {
300 if (match_ext(pd->filename, fmt1->extensions)) {
301 score = 50;
302 }
303 }
304 if (score > *score_max) {
305 *score_max = score;
306 fmt = fmt1;
307 }else if (score == *score_max)
308 fmt = NULL;
309 }
310 return fmt;
311 }
312
313 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
314 int score=0;
315 return av_probe_input_format2(pd, is_opened, &score);
316 }
317
318 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
319 {
320 AVInputFormat *fmt;
321 fmt = av_probe_input_format2(pd, 1, &score);
322
323 if (fmt) {
324 if (!strcmp(fmt->name, "mp3")) {
325 st->codec->codec_id = CODEC_ID_MP3;
326 st->codec->codec_type = CODEC_TYPE_AUDIO;
327 } else if (!strcmp(fmt->name, "ac3")) {
328 st->codec->codec_id = CODEC_ID_AC3;
329 st->codec->codec_type = CODEC_TYPE_AUDIO;
330 } else if (!strcmp(fmt->name, "mpegvideo")) {
331 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
332 st->codec->codec_type = CODEC_TYPE_VIDEO;
333 } else if (!strcmp(fmt->name, "m4v")) {
334 st->codec->codec_id = CODEC_ID_MPEG4;
335 st->codec->codec_type = CODEC_TYPE_VIDEO;
336 } else if (!strcmp(fmt->name, "h264")) {
337 st->codec->codec_id = CODEC_ID_H264;
338 st->codec->codec_type = CODEC_TYPE_VIDEO;
339 } else if (!strcmp(fmt->name, "dts")) {
340 st->codec->codec_id = CODEC_ID_DTS;
341 st->codec->codec_type = CODEC_TYPE_AUDIO;
342 }
343 }
344 return !!fmt;
345 }
346
347 /************************************************************/
348 /* input media file */
349
350 /**
351 * Open a media file from an IO stream. 'fmt' must be specified.
352 */
353 int av_open_input_stream(AVFormatContext **ic_ptr,
354 ByteIOContext *pb, const char *filename,
355 AVInputFormat *fmt, AVFormatParameters *ap)
356 {
357 int err;
358 AVFormatContext *ic;
359 AVFormatParameters default_ap;
360
361 if(!ap){
362 ap=&default_ap;
363 memset(ap, 0, sizeof(default_ap));
364 }
365
366 if(!ap->prealloced_context)
367 ic = avformat_alloc_context();
368 else
369 ic = *ic_ptr;
370 if (!ic) {
371 err = AVERROR(ENOMEM);
372 goto fail;
373 }
374 ic->iformat = fmt;
375 ic->pb = pb;
376 ic->duration = AV_NOPTS_VALUE;
377 ic->start_time = AV_NOPTS_VALUE;
378 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
379
380 /* allocate private data */
381 if (fmt->priv_data_size > 0) {
382 ic->priv_data = av_mallocz(fmt->priv_data_size);
383 if (!ic->priv_data) {
384 err = AVERROR(ENOMEM);
385 goto fail;
386 }
387 } else {
388 ic->priv_data = NULL;
389 }
390
391 if (ic->iformat->read_header) {
392 err = ic->iformat->read_header(ic, ap);
393 if (err < 0)
394 goto fail;
395 }
396
397 if (pb && !ic->data_offset)
398 ic->data_offset = url_ftell(ic->pb);
399
400 #if LIBAVFORMAT_VERSION_MAJOR < 53
401 ff_metadata_demux_compat(ic);
402 #endif
403
404 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
405
406 *ic_ptr = ic;
407 return 0;
408 fail:
409 if (ic) {
410 int i;
411 av_freep(&ic->priv_data);
412 for(i=0;i<ic->nb_streams;i++) {
413 AVStream *st = ic->streams[i];
414 if (st) {
415 av_free(st->priv_data);
416 av_free(st->codec->extradata);
417 }
418 av_free(st);
419 }
420 }
421 av_free(ic);
422 *ic_ptr = NULL;
423 return err;
424 }
425
426 /** size of probe buffer, for guessing file type from file contents */
427 #define PROBE_BUF_MIN 2048
428 #define PROBE_BUF_MAX (1<<20)
429
430 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
431 AVInputFormat *fmt,
432 int buf_size,
433 AVFormatParameters *ap)
434 {
435 int err, probe_size;
436 AVProbeData probe_data, *pd = &probe_data;
437 ByteIOContext *pb = NULL;
438
439 pd->filename = "";
440 if (filename)
441 pd->filename = filename;
442 pd->buf = NULL;
443 pd->buf_size = 0;
444
445 if (!fmt) {
446 /* guess format if no file can be opened */
447 fmt = av_probe_input_format(pd, 0);
448 }
449
450 /* Do not open file if the format does not need it. XXX: specific
451 hack needed to handle RTSP/TCP */
452 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
453 /* if no file needed do not try to open one */
454 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
455 goto fail;
456 }
457 if (buf_size > 0) {
458 url_setbufsize(pb, buf_size);
459 }
460
461 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
462 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
463 /* read probe data */
464 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
465 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
466 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
467 if (url_fseek(pb, 0, SEEK_SET) < 0) {
468 url_fclose(pb);
469 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
470 pb = NULL;
471 err = AVERROR(EIO);
472 goto fail;
473 }
474 }
475 /* guess file format */
476 fmt = av_probe_input_format2(pd, 1, &score);
477 }
478 av_freep(&pd->buf);
479 }
480
481 /* if still no format found, error */
482 if (!fmt) {
483 err = AVERROR_NOFMT;
484 goto fail;
485 }
486
487 /* check filename in case an image number is expected */
488 if (fmt->flags & AVFMT_NEEDNUMBER) {
489 if (!av_filename_number_test(filename)) {
490 err = AVERROR_NUMEXPECTED;
491 goto fail;
492 }
493 }
494 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
495 if (err)
496 goto fail;
497 return 0;
498 fail:
499 av_freep(&pd->buf);
500 if (pb)
501 url_fclose(pb);
502 av_freep(ic_ptr);
503 return err;
504
505 }
506
507 /*******************************************************/
508
509 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
510 AVPacketList **plast_pktl){
511 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
512 if (!pktl)
513 return NULL;
514
515 if (*packet_buffer)
516 (*plast_pktl)->next = pktl;
517 else
518 *packet_buffer = pktl;
519
520 /* add the packet in the buffered packet list */
521 *plast_pktl = pktl;
522 pktl->pkt= *pkt;
523 return &pktl->pkt;
524 }
525
526 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
527 {
528 int ret, i;
529 AVStream *st;
530
531 for(;;){
532 AVPacketList *pktl = s->raw_packet_buffer;
533
534 if (pktl) {
535 *pkt = pktl->pkt;
536 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
537 !s->streams[pkt->stream_index]->probe_packets ||
538 s->raw_packet_buffer_remaining_size < pkt->size){
539 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
540 av_freep(&pd->buf);
541 pd->buf_size = 0;
542 s->raw_packet_buffer = pktl->next;
543 s->raw_packet_buffer_remaining_size += pkt->size;
544 av_free(pktl);
545 return 0;
546 }
547 }
548
549 av_init_packet(pkt);
550 ret= s->iformat->read_packet(s, pkt);
551 if (ret < 0) {
552 if (!pktl || ret == AVERROR(EAGAIN))
553 return ret;
554 for (i = 0; i < s->nb_streams; i++)
555 s->streams[i]->probe_packets = 0;
556 continue;
557 }
558 st= s->streams[pkt->stream_index];
559
560 switch(st->codec->codec_type){
561 case CODEC_TYPE_VIDEO:
562 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
563 break;
564 case CODEC_TYPE_AUDIO:
565 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
566 break;
567 case CODEC_TYPE_SUBTITLE:
568 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
569 break;
570 }
571
572 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
573 !st->probe_packets))
574 return ret;
575
576 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
577 s->raw_packet_buffer_remaining_size -= pkt->size;
578
579 if(st->codec->codec_id == CODEC_ID_PROBE){
580 AVProbeData *pd = &st->probe_data;
581
582 --st->probe_packets;
583
584 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
585 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
586 pd->buf_size += pkt->size;
587 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
588
589 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
590 set_codec_from_probe_data(st, pd, 1);
591 if(st->codec->codec_id != CODEC_ID_PROBE){
592 pd->buf_size=0;
593 av_freep(&pd->buf);
594 }
595 }
596 }
597 }
598 }
599
600 /**********************************************************/
601
602 /**
603 * Get the number of samples of an audio frame. Return -1 on error.
604 */
605 static int get_audio_frame_size(AVCodecContext *enc, int size)
606 {
607 int frame_size;
608
609 if(enc->codec_id == CODEC_ID_VORBIS)
610 return -1;
611
612 if (enc->frame_size <= 1) {
613 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
614
615 if (bits_per_sample) {
616 if (enc->channels == 0)
617 return -1;
618 frame_size = (size << 3) / (bits_per_sample * enc->channels);
619 } else {
620 /* used for example by ADPCM codecs */
621 if (enc->bit_rate == 0)
622 return -1;
623 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
624 }
625 } else {
626 frame_size = enc->frame_size;
627 }
628 return frame_size;
629 }
630
631
632 /**
633 * Return the frame duration in seconds. Return 0 if not available.
634 */
635 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
636 AVCodecParserContext *pc, AVPacket *pkt)
637 {
638 int frame_size;
639
640 *pnum = 0;
641 *pden = 0;
642 switch(st->codec->codec_type) {
643 case CODEC_TYPE_VIDEO:
644 if(st->time_base.num*1000LL > st->time_base.den){
645 *pnum = st->time_base.num;
646 *pden = st->time_base.den;
647 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
648 *pnum = st->codec->time_base.num;
649 *pden = st->codec->time_base.den;
650 if (pc && pc->repeat_pict) {
651 *pnum = (*pnum) * (1 + pc->repeat_pict);
652 }
653 }
654 break;
655 case CODEC_TYPE_AUDIO:
656 frame_size = get_audio_frame_size(st->codec, pkt->size);
657 if (frame_size < 0)
658 break;
659 *pnum = frame_size;
660 *pden = st->codec->sample_rate;
661 break;
662 default:
663 break;
664 }
665 }
666
667 static int is_intra_only(AVCodecContext *enc){
668 if(enc->codec_type == CODEC_TYPE_AUDIO){
669 return 1;
670 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
671 switch(enc->codec_id){
672 case CODEC_ID_MJPEG:
673 case CODEC_ID_MJPEGB:
674 case CODEC_ID_LJPEG:
675 case CODEC_ID_RAWVIDEO:
676 case CODEC_ID_DVVIDEO:
677 case CODEC_ID_HUFFYUV:
678 case CODEC_ID_FFVHUFF:
679 case CODEC_ID_ASV1:
680 case CODEC_ID_ASV2:
681 case CODEC_ID_VCR1:
682 case CODEC_ID_DNXHD:
683 case CODEC_ID_JPEG2000:
684 return 1;
685 default: break;
686 }
687 }
688 return 0;
689 }
690
691 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
692 int64_t dts, int64_t pts)
693 {
694 AVStream *st= s->streams[stream_index];
695 AVPacketList *pktl= s->packet_buffer;
696
697 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
698 return;
699
700 st->first_dts= dts - st->cur_dts;
701 st->cur_dts= dts;
702
703 for(; pktl; pktl= pktl->next){
704 if(pktl->pkt.stream_index != stream_index)
705 continue;
706 //FIXME think more about this check
707 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
708 pktl->pkt.pts += st->first_dts;
709
710 if(pktl->pkt.dts != AV_NOPTS_VALUE)
711 pktl->pkt.dts += st->first_dts;
712
713 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
714 st->start_time= pktl->pkt.pts;
715 }
716 if (st->start_time == AV_NOPTS_VALUE)
717 st->start_time = pts;
718 }
719
720 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
721 {
722 AVPacketList *pktl= s->packet_buffer;
723 int64_t cur_dts= 0;
724
725 if(st->first_dts != AV_NOPTS_VALUE){
726 cur_dts= st->first_dts;
727 for(; pktl; pktl= pktl->next){
728 if(pktl->pkt.stream_index == pkt->stream_index){
729 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
730 break;
731 cur_dts -= pkt->duration;
732 }
733 }
734 pktl= s->packet_buffer;
735 st->first_dts = cur_dts;
736 }else if(st->cur_dts)
737 return;
738
739 for(; pktl; pktl= pktl->next){
740 if(pktl->pkt.stream_index != pkt->stream_index)
741 continue;
742 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
743 && !pktl->pkt.duration){
744 pktl->pkt.dts= cur_dts;
745 if(!st->codec->has_b_frames)
746 pktl->pkt.pts= cur_dts;
747 cur_dts += pkt->duration;
748 pktl->pkt.duration= pkt->duration;
749 }else
750 break;
751 }
752 if(st->first_dts == AV_NOPTS_VALUE)
753 st->cur_dts= cur_dts;
754 }
755
756 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
757 AVCodecParserContext *pc, AVPacket *pkt)
758 {
759 int num, den, presentation_delayed, delay, i;
760 int64_t offset;
761
762 if (pc && pc->pict_type == FF_B_TYPE)
763 st->codec->has_b_frames = 1;
764
765 /* do we have a video B-frame ? */
766 delay= st->codec->has_b_frames;
767 presentation_delayed = 0;
768 /* XXX: need has_b_frame, but cannot get it if the codec is
769 not initialized */
770 if (delay &&
771 pc && pc->pict_type != FF_B_TYPE)
772 presentation_delayed = 1;
773
774 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
775 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
776 pkt->dts -= 1LL<<st->pts_wrap_bits;
777 }
778
779 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
780 // we take the conservative approach and discard both
781 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
782 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
783 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
784 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
785 }
786
787 if (pkt->duration == 0) {
788 compute_frame_duration(&num, &den, st, pc, pkt);
789 if (den && num) {
790 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
791
792 if(pkt->duration != 0 && s->packet_buffer)
793 update_initial_durations(s, st, pkt);
794 }
795 }
796
797 /* correct timestamps with byte offset if demuxers only have timestamps
798 on packet boundaries */
799 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
800 /* this will estimate bitrate based on this frame's duration and size */
801 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
802 if(pkt->pts != AV_NOPTS_VALUE)
803 pkt->pts += offset;
804 if(pkt->dts != AV_NOPTS_VALUE)
805 pkt->dts += offset;
806 }
807
808 if (pc && pc->dts_sync_point >= 0) {
809 // we have synchronization info from the parser
810 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
811 if (den > 0) {
812 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
813 if (pkt->dts != AV_NOPTS_VALUE) {
814 // got DTS from the stream, update reference timestamp
815 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
816 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
817 } else if (st->reference_dts != AV_NOPTS_VALUE) {
818 // compute DTS based on reference timestamp
819 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
820 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
821 }
822 if (pc->dts_sync_point > 0)
823 st->reference_dts = pkt->dts; // new reference
824 }
825 }
826
827 /* This may be redundant, but it should not hurt. */
828 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
829 presentation_delayed = 1;
830
831 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
832 /* interpolate PTS and DTS if they are not present */
833 //We skip H264 currently because delay and has_b_frames are not reliably set
834 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
835 if (presentation_delayed) {
836 /* DTS = decompression timestamp */
837 /* PTS = presentation timestamp */
838 if (pkt->dts == AV_NOPTS_VALUE)
839 pkt->dts = st->last_IP_pts;
840 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
841 if (pkt->dts == AV_NOPTS_VALUE)
842 pkt->dts = st->cur_dts;
843
844 /* this is tricky: the dts must be incremented by the duration
845 of the frame we are displaying, i.e. the last I- or P-frame */
846 if (st->last_IP_duration == 0)
847 st->last_IP_duration = pkt->duration;
848 if(pkt->dts != AV_NOPTS_VALUE)
849 st->cur_dts = pkt->dts + st->last_IP_duration;
850 st->last_IP_duration = pkt->duration;
851 st->last_IP_pts= pkt->pts;
852 /* cannot compute PTS if not present (we can compute it only
853 by knowing the future */
854 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
855 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
856 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
857 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
858 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
859 pkt->pts += pkt->duration;
860 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
861 }
862 }
863
864 /* presentation is not delayed : PTS and DTS are the same */
865 if(pkt->pts == AV_NOPTS_VALUE)
866 pkt->pts = pkt->dts;
867 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
868 if(pkt->pts == AV_NOPTS_VALUE)
869 pkt->pts = st->cur_dts;
870 pkt->dts = pkt->pts;
871 if(pkt->pts != AV_NOPTS_VALUE)
872 st->cur_dts = pkt->pts + pkt->duration;
873 }
874 }
875
876 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
877 st->pts_buffer[0]= pkt->pts;
878 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
879 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
880 if(pkt->dts == AV_NOPTS_VALUE)
881 pkt->dts= st->pts_buffer[0];
882 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
883 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
884 }
885 if(pkt->dts > st->cur_dts)
886 st->cur_dts = pkt->dts;
887 }
888
889 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
890
891 /* update flags */
892 if(is_intra_only(st->codec))
893 pkt->flags |= PKT_FLAG_KEY;
894 else if (pc) {
895 pkt->flags = 0;
896 /* keyframe computation */
897 if (pc->key_frame == 1)
898 pkt->flags |= PKT_FLAG_KEY;
899 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
900 pkt->flags |= PKT_FLAG_KEY;
901 }
902 if (pc)
903 pkt->convergence_duration = pc->convergence_duration;
904 }
905
906
907 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
908 {
909 AVStream *st;
910 int len, ret, i;
911
912 av_init_packet(pkt);
913
914 for(;;) {
915 /* select current input stream component */
916 st = s->cur_st;
917 if (st) {
918 if (!st->need_parsing || !st->parser) {
919 /* no parsing needed: we just output the packet as is */
920 /* raw data support */
921 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
922 compute_pkt_fields(s, st, NULL, pkt);
923 s->cur_st = NULL;
924 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
925 (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
926 ff_reduce_index(s, st->index);
927 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
928 }
929 break;
930 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
931 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
932 st->cur_ptr, st->cur_len,
933 st->cur_pkt.pts, st->cur_pkt.dts,
934 st->cur_pkt.pos);
935 st->cur_pkt.pts = AV_NOPTS_VALUE;
936 st->cur_pkt.dts = AV_NOPTS_VALUE;
937 /* increment read pointer */
938 st->cur_ptr += len;
939 st->cur_len -= len;
940
941 /* return packet if any */
942 if (pkt->size) {
943 got_packet:
944 pkt->duration = 0;
945 pkt->stream_index = st->index;
946 pkt->pts = st->parser->pts;
947 pkt->dts = st->parser->dts;
948 pkt->pos = st->parser->pos;
949 pkt->destruct = NULL;
950 compute_pkt_fields(s, st, st->parser, pkt);
951
952 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
953 ff_reduce_index(s, st->index);
954 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
955 0, 0, AVINDEX_KEYFRAME);
956 }
957
958 break;
959 }
960 } else {
961 /* free packet */
962 av_free_packet(&st->cur_pkt);
963 s->cur_st = NULL;
964 }
965 } else {
966 AVPacket cur_pkt;
967 /* read next packet */
968 ret = av_read_packet(s, &cur_pkt);
969 if (ret < 0) {
970 if (ret == AVERROR(EAGAIN))
971 return ret;
972 /* return the last frames, if any */
973 for(i = 0; i < s->nb_streams; i++) {
974 st = s->streams[i];
975 if (st->parser && st->need_parsing) {
976 av_parser_parse2(st->parser, st->codec,
977 &pkt->data, &pkt->size,
978 NULL, 0,
979 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
980 AV_NOPTS_VALUE);
981 if (pkt->size)
982 goto got_packet;
983 }
984 }
985 /* no more packets: really terminate parsing */
986 return ret;
987 }
988 st = s->streams[cur_pkt.stream_index];
989 st->cur_pkt= cur_pkt;
990
991 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
992 st->cur_pkt.dts != AV_NOPTS_VALUE &&
993 st->cur_pkt.pts < st->cur_pkt.dts){
994 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
995 st->cur_pkt.stream_index,
996 st->cur_pkt.pts,
997 st->cur_pkt.dts,
998 st->cur_pkt.size);
999 // av_free_packet(&st->cur_pkt);
1000 // return -1;
1001 }
1002
1003 if(s->debug & FF_FDEBUG_TS)
1004 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1005 st->cur_pkt.stream_index,
1006 st->cur_pkt.pts,
1007 st->cur_pkt.dts,
1008 st->cur_pkt.size,
1009 st->cur_pkt.flags);
1010
1011 s->cur_st = st;
1012 st->cur_ptr = st->cur_pkt.data;
1013 st->cur_len = st->cur_pkt.size;
1014 if (st->need_parsing && !st->parser) {
1015 st->parser = av_parser_init(st->codec->codec_id);
1016 if (!st->parser) {
1017 /* no parser available: just output the raw packets */
1018 st->need_parsing = AVSTREAM_PARSE_NONE;
1019 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1020 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1021 }
1022 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1023 st->parser->next_frame_offset=
1024 st->parser->cur_offset= st->cur_pkt.pos;
1025 }
1026 }
1027 }
1028 }
1029 if(s->debug & FF_FDEBUG_TS)
1030 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1031 pkt->stream_index,
1032 pkt->pts,
1033 pkt->dts,
1034 pkt->size,
1035 pkt->flags);
1036
1037 return 0;
1038 }
1039
1040 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1041 {
1042 AVPacketList *pktl;
1043 int eof=0;
1044 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1045
1046 for(;;){
1047 pktl = s->packet_buffer;
1048 if (pktl) {
1049 AVPacket *next_pkt= &pktl->pkt;
1050
1051 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1052 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1053 if( pktl->pkt.stream_index == next_pkt->stream_index
1054 && next_pkt->dts < pktl->pkt.dts
1055 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1056 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1057 next_pkt->pts= pktl->pkt.dts;
1058 }
1059 pktl= pktl->next;
1060 }
1061 pktl = s->packet_buffer;
1062 }
1063
1064 if( next_pkt->pts != AV_NOPTS_VALUE
1065 || next_pkt->dts == AV_NOPTS_VALUE
1066 || !genpts || eof){
1067 /* read packet from packet buffer, if there is data */
1068 *pkt = *next_pkt;
1069 s->packet_buffer = pktl->next;
1070 av_free(pktl);
1071 return 0;
1072 }
1073 }
1074 if(genpts){
1075 int ret= av_read_frame_internal(s, pkt);
1076 if(ret<0){
1077 if(pktl && ret != AVERROR(EAGAIN)){
1078 eof=1;
1079 continue;
1080 }else
1081 return ret;
1082 }
1083
1084 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1085 &s->packet_buffer_end)) < 0)
1086 return AVERROR(ENOMEM);
1087 }else{
1088 assert(!s->packet_buffer);
1089 return av_read_frame_internal(s, pkt);
1090 }
1091 }
1092 }
1093
1094 /* XXX: suppress the packet queue */
1095 static void flush_packet_queue(AVFormatContext *s)
1096 {
1097 AVPacketList *pktl;
1098
1099 for(;;) {
1100 pktl = s->packet_buffer;
1101 if (!pktl)
1102 break;
1103 s->packet_buffer = pktl->next;
1104 av_free_packet(&pktl->pkt);
1105 av_free(pktl);
1106 }
1107 while(s->raw_packet_buffer){
1108 pktl = s->raw_packet_buffer;
1109 s->raw_packet_buffer = pktl->next;
1110 av_free_packet(&pktl->pkt);
1111 av_free(pktl);
1112 }
1113 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1114 }
1115
1116 /*******************************************************/
1117 /* seek support */
1118
1119 int av_find_default_stream_index(AVFormatContext *s)
1120 {
1121 int first_audio_index = -1;
1122 int i;
1123 AVStream *st;
1124
1125 if (s->nb_streams <= 0)
1126 return -1;
1127 for(i = 0; i < s->nb_streams; i++) {
1128 st = s->streams[i];
1129 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1130 return i;
1131 }
1132 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1133 first_audio_index = i;
1134 }
1135 return first_audio_index >= 0 ? first_audio_index : 0;
1136 }
1137
1138 /**
1139 * Flush the frame reader.
1140 */
1141 static void av_read_frame_flush(AVFormatContext *s)
1142 {
1143 AVStream *st;
1144 int i;
1145
1146 flush_packet_queue(s);
1147
1148 s->cur_st = NULL;
1149
1150 /* for each stream, reset read state */
1151 for(i = 0; i < s->nb_streams; i++) {
1152 st = s->streams[i];
1153
1154 if (st->parser) {
1155 av_parser_close(st->parser);
1156 st->parser = NULL;
1157 av_free_packet(&st->cur_pkt);
1158 }
1159 st->last_IP_pts = AV_NOPTS_VALUE;
1160 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1161 st->reference_dts = AV_NOPTS_VALUE;
1162 /* fail safe */
1163 st->cur_ptr = NULL;
1164 st->cur_len = 0;
1165
1166 st->probe_packets = MAX_PROBE_PACKETS;
1167 }
1168 }
1169
1170 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1171 int i;
1172
1173 for(i = 0; i < s->nb_streams; i++) {
1174 AVStream *st = s->streams[i];
1175
1176 st->cur_dts = av_rescale(timestamp,
1177 st->time_base.den * (int64_t)ref_st->time_base.num,
1178 st->time_base.num * (int64_t)ref_st->time_base.den);
1179 }
1180 }
1181
1182 void ff_reduce_index(AVFormatContext *s, int stream_index)
1183 {
1184 AVStream *st= s->streams[stream_index];
1185 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1186
1187 if((unsigned)st->nb_index_entries >= max_entries){
1188 int i;
1189 for(i=0; 2*i<st->nb_index_entries; i++)
1190 st->index_entries[i]= st->index_entries[2*i];
1191 st->nb_index_entries= i;
1192 }
1193 }
1194
1195 int av_add_index_entry(AVStream *st,
1196 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1197 {
1198 AVIndexEntry *entries, *ie;
1199 int index;
1200
1201 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1202 return -1;
1203
1204 entries = av_fast_realloc(st->index_entries,
1205 &st->index_entries_allocated_size,
1206 (st->nb_index_entries + 1) *
1207 sizeof(AVIndexEntry));
1208 if(!entries)
1209 return -1;
1210
1211 st->index_entries= entries;
1212
1213 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1214
1215 if(index<0){
1216 index= st->nb_index_entries++;
1217 ie= &entries[index];
1218 assert(index==0 || ie[-1].timestamp < timestamp);
1219 }else{
1220 ie= &entries[index];
1221 if(ie->timestamp != timestamp){
1222 if(ie->timestamp <= timestamp)
1223 return -1;
1224 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1225 st->nb_index_entries++;
1226 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1227 distance= ie->min_distance;
1228 }
1229
1230 ie->pos = pos;
1231 ie->timestamp = timestamp;
1232 ie->min_distance= distance;
1233 ie->size= size;
1234 ie->flags = flags;
1235
1236 return index;
1237 }
1238
1239 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1240 int flags)
1241 {
1242 AVIndexEntry *entries= st->index_entries;
1243 int nb_entries= st->nb_index_entries;
1244 int a, b, m;
1245 int64_t timestamp;
1246
1247 a = - 1;
1248 b = nb_entries;
1249
1250 while (b - a > 1) {
1251 m = (a + b) >> 1;
1252 timestamp = entries[m].timestamp;
1253 if(timestamp >= wanted_timestamp)
1254 b = m;
1255 if(timestamp <= wanted_timestamp)
1256 a = m;
1257 }
1258 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1259
1260 if(!(flags & AVSEEK_FLAG_ANY)){
1261 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1262 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1263 }
1264 }
1265
1266 if(m == nb_entries)
1267 return -1;
1268 return m;
1269 }
1270
1271 #define DEBUG_SEEK
1272
1273 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1274 AVInputFormat *avif= s->iformat;
1275 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1276 int64_t ts_min, ts_max, ts;
1277 int index;
1278 AVStream *st;
1279
1280 if (stream_index < 0)
1281 return -1;
1282
1283 #ifdef DEBUG_SEEK
1284 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1285 #endif
1286
1287 ts_max=
1288 ts_min= AV_NOPTS_VALUE;
1289 pos_limit= -1; //gcc falsely says it may be uninitialized
1290
1291 st= s->streams[stream_index];
1292 if(st->index_entries){
1293 AVIndexEntry *e;
1294
1295 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1296 index= FFMAX(index, 0);
1297 e= &st->index_entries[index];
1298
1299 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1300 pos_min= e->pos;
1301 ts_min= e->timestamp;
1302 #ifdef DEBUG_SEEK
1303 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1304 pos_min,ts_min);
1305 #endif
1306 }else{
1307 assert(index==0);
1308 }
1309
1310 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1311 assert(index < st->nb_index_entries);
1312 if(index >= 0){
1313 e= &st->index_entries[index];
1314 assert(e->timestamp >= target_ts);
1315 pos_max= e->pos;
1316 ts_max= e->timestamp;
1317 pos_limit= pos_max - e->min_distance;
1318 #ifdef DEBUG_SEEK
1319 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1320 pos_max,pos_limit, ts_max);
1321 #endif
1322 }
1323 }
1324
1325 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1326 if(pos<0)
1327 return -1;
1328
1329 /* do the seek */
1330 url_fseek(s->pb, pos, SEEK_SET);
1331
1332 av_update_cur_dts(s, st, ts);
1333
1334 return 0;
1335 }
1336
1337 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1338 int64_t pos, ts;
1339 int64_t start_pos, filesize;
1340 int no_change;
1341
1342 #ifdef DEBUG_SEEK
1343 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1344 #endif
1345
1346 if(ts_min == AV_NOPTS_VALUE){
1347 pos_min = s->data_offset;
1348 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1349 if (ts_min == AV_NOPTS_VALUE)
1350 return -1;
1351 }
1352
1353 if(ts_max == AV_NOPTS_VALUE){
1354 int step= 1024;
1355 filesize = url_fsize(s->pb);
1356 pos_max = filesize - 1;
1357 do{
1358 pos_max -= step;
1359 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1360 step += step;
1361 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1362 if (ts_max == AV_NOPTS_VALUE)
1363 return -1;
1364
1365 for(;;){
1366 int64_t tmp_pos= pos_max + 1;
1367 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1368 if(tmp_ts == AV_NOPTS_VALUE)
1369 break;
1370 ts_max= tmp_ts;
1371 pos_max= tmp_pos;
1372 if(tmp_pos >= filesize)
1373 break;
1374 }
1375 pos_limit= pos_max;
1376 }
1377
1378 if(ts_min > ts_max){
1379 return -1;
1380 }else if(ts_min == ts_max){
1381 pos_limit= pos_min;
1382 }
1383
1384 no_change=0;
1385 while (pos_min < pos_limit) {
1386 #ifdef DEBUG_SEEK
1387 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1388 pos_min, pos_max,
1389 ts_min, ts_max);
1390 #endif
1391 assert(pos_limit <= pos_max);
1392
1393 if(no_change==0){
1394 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1395 // interpolate position (better than dichotomy)
1396 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1397 + pos_min - approximate_keyframe_distance;
1398 }else if(no_change==1){
1399 // bisection, if interpolation failed to change min or max pos last time
1400 pos = (pos_min + pos_limit)>>1;
1401 }else{
1402 /* linear search if bisection failed, can only happen if there
1403 are very few or no keyframes between min/max */
1404 pos=pos_min;
1405 }
1406 if(pos <= pos_min)
1407 pos= pos_min + 1;
1408 else if(pos > pos_limit)
1409 pos= pos_limit;
1410 start_pos= pos;
1411
1412 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1413 if(pos == pos_max)
1414 no_change++;
1415 else
1416 no_change=0;
1417 #ifdef DEBUG_SEEK
1418 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1419 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1420 start_pos, no_change);
1421 #endif
1422 if(ts == AV_NOPTS_VALUE){
1423 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1424 return -1;
1425 }
1426 assert(ts != AV_NOPTS_VALUE);
1427 if (target_ts <= ts) {
1428 pos_limit = start_pos - 1;
1429 pos_max = pos;
1430 ts_max = ts;
1431 }
1432 if (target_ts >= ts) {
1433 pos_min = pos;
1434 ts_min = ts;
1435 }
1436 }
1437
1438 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1439 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1440 #ifdef DEBUG_SEEK
1441 pos_min = pos;
1442 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1443 pos_min++;
1444 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1445 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1446 pos, ts_min, target_ts, ts_max);
1447 #endif
1448 *ts_ret= ts;
1449 return pos;
1450 }
1451
1452 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1453 int64_t pos_min, pos_max;
1454 #if 0
1455 AVStream *st;
1456
1457 if (stream_index < 0)
1458 return -1;
1459
1460 st= s->streams[stream_index];
1461 #endif
1462
1463 pos_min = s->data_offset;
1464 pos_max = url_fsize(s->pb) - 1;
1465
1466 if (pos < pos_min) pos= pos_min;
1467 else if(pos > pos_max) pos= pos_max;
1468
1469 url_fseek(s->pb, pos, SEEK_SET);
1470
1471 #if 0
1472 av_update_cur_dts(s, st, ts);
1473 #endif
1474 return 0;
1475 }
1476
1477 static int av_seek_frame_generic(AVFormatContext *s,
1478 int stream_index, int64_t timestamp, int flags)
1479 {
1480 int index, ret;
1481 AVStream *st;
1482 AVIndexEntry *ie;
1483
1484 st = s->streams[stream_index];
1485
1486 index = av_index_search_timestamp(st, timestamp, flags);
1487
1488 if(index < 0 || index==st->nb_index_entries-1){
1489 int i;
1490 AVPacket pkt;
1491
1492 if(st->nb_index_entries){
1493 assert(st->index_entries);
1494 ie= &st->index_entries[st->nb_index_entries-1];
1495 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1496 return ret;
1497 av_update_cur_dts(s, st, ie->timestamp);
1498 }else{
1499 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1500 return ret;
1501 }
1502 for(i=0;; i++) {
1503 int ret;
1504 do{
1505 ret = av_read_frame(s, &pkt);
1506 }while(ret == AVERROR(EAGAIN));
1507 if(ret<0)
1508 break;
1509 av_free_packet(&pkt);
1510 if(stream_index == pkt.stream_index){
1511 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1512 break;
1513 }
1514 }
1515 index = av_index_search_timestamp(st, timestamp, flags);
1516 }
1517 if (index < 0)
1518 return -1;
1519
1520 av_read_frame_flush(s);
1521 if (s->iformat->read_seek){
1522 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1523 return 0;
1524 }
1525 ie = &st->index_entries[index];
1526 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1527 return ret;
1528 av_update_cur_dts(s, st, ie->timestamp);
1529
1530 return 0;
1531 }
1532
1533 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1534 {
1535 int ret;
1536 AVStream *st;
1537
1538 av_read_frame_flush(s);
1539
1540 if(flags & AVSEEK_FLAG_BYTE)
1541 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1542
1543 if(stream_index < 0){
1544 stream_index= av_find_default_stream_index(s);
1545 if(stream_index < 0)
1546 return -1;
1547
1548 st= s->streams[stream_index];
1549 /* timestamp for default must be expressed in AV_TIME_BASE units */
1550 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1551 }
1552
1553 /* first, we try the format specific seek */
1554 if (s->iformat->read_seek)
1555 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1556 else
1557 ret = -1;
1558 if (ret >= 0) {
1559 return 0;
1560 }
1561
1562 if(s->iformat->read_timestamp)
1563 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1564 else
1565 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1566 }
1567
1568 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1569 {
1570 if(min_ts > ts || max_ts < ts)
1571 return -1;
1572
1573 av_read_frame_flush(s);
1574
1575 if (s->iformat->read_seek2)
1576 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1577
1578 if(s->iformat->read_timestamp){
1579 //try to seek via read_timestamp()
1580 }
1581
1582 //Fallback to old API if new is not implemented but old is
1583 //Note the old has somewat different sematics
1584 if(s->iformat->read_seek || 1)
1585 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1586
1587 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1588 }
1589
1590 /*******************************************************/
1591
1592 /**
1593 * Returns TRUE if the stream has accurate duration in any stream.
1594 *
1595 * @return TRUE if the stream has accurate duration for at least one component.
1596 */
1597 static int av_has_duration(AVFormatContext *ic)
1598 {
1599 int i;
1600 AVStream *st;
1601
1602 for(i = 0;i < ic->nb_streams; i++) {
1603 st = ic->streams[i];
1604 if (st->duration != AV_NOPTS_VALUE)
1605 return 1;
1606 }
1607 return 0;
1608 }
1609
1610 /**
1611 * Estimate the stream timings from the one of each components.
1612 *
1613 * Also computes the global bitrate if possible.
1614 */
1615 static void av_update_stream_timings(AVFormatContext *ic)
1616 {
1617 int64_t start_time, start_time1, end_time, end_time1;
1618 int64_t duration, duration1;
1619 int i;
1620 AVStream *st;
1621
1622 start_time = INT64_MAX;
1623 end_time = INT64_MIN;
1624 duration = INT64_MIN;
1625 for(i = 0;i < ic->nb_streams; i++) {
1626 st = ic->streams[i];
1627 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1628 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1629 if (start_time1 < start_time)
1630 start_time = start_time1;
1631 if (st->duration != AV_NOPTS_VALUE) {
1632 end_time1 = start_time1
1633 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1634 if (end_time1 > end_time)
1635 end_time = end_time1;
1636 }
1637 }
1638 if (st->duration != AV_NOPTS_VALUE) {
1639 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1640 if (duration1 > duration)
1641 duration = duration1;
1642 }
1643 }
1644 if (start_time != INT64_MAX) {
1645 ic->start_time = start_time;
1646 if (end_time != INT64_MIN) {
1647 if (end_time - start_time > duration)
1648 duration = end_time - start_time;
1649 }
1650 }
1651 if (duration != INT64_MIN) {
1652 ic->duration = duration;
1653 if (ic->file_size > 0) {
1654 /* compute the bitrate */
1655 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1656 (double)ic->duration;
1657 }
1658 }
1659 }
1660
1661 static void fill_all_stream_timings(AVFormatContext *ic)
1662 {
1663 int i;
1664 AVStream *st;
1665
1666 av_update_stream_timings(ic);
1667 for(i = 0;i < ic->nb_streams; i++) {
1668 st = ic->streams[i];
1669 if (st->start_time == AV_NOPTS_VALUE) {
1670 if(ic->start_time != AV_NOPTS_VALUE)
1671 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1672 if(ic->duration != AV_NOPTS_VALUE)
1673 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1674 }
1675 }
1676 }
1677
1678 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1679 {
1680 int64_t filesize, duration;
1681 int bit_rate, i;
1682 AVStream *st;
1683
1684 /* if bit_rate is already set, we believe it */
1685 if (ic->bit_rate == 0) {
1686 bit_rate = 0;
1687 for(i=0;i<ic->nb_streams;i++) {
1688 st = ic->streams[i];
1689 bit_rate += st->codec->bit_rate;
1690 }
1691 ic->bit_rate = bit_rate;
1692 }
1693
1694 /* if duration is already set, we believe it */
1695 if (ic->duration == AV_NOPTS_VALUE &&
1696 ic->bit_rate != 0 &&
1697 ic->file_size != 0) {
1698 filesize = ic->file_size;
1699 if (filesize > 0) {
1700 for(i = 0; i < ic->nb_streams; i++) {
1701 st = ic->streams[i];
1702 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1703 if (st->duration == AV_NOPTS_VALUE)
1704 st->duration = duration;
1705 }
1706 }
1707 }
1708 }
1709
1710 #define DURATION_MAX_READ_SIZE 250000
1711
1712 /* only usable for MPEG-PS streams */
1713 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1714 {
1715 AVPacket pkt1, *pkt = &pkt1;
1716 AVStream *st;
1717 int read_size, i, ret;
1718 int64_t end_time;
1719 int64_t filesize, offset, duration;
1720
1721 ic->cur_st = NULL;
1722
1723 /* flush packet queue */
1724 flush_packet_queue(ic);
1725
1726 for(i=0;i<ic->nb_streams;i++) {
1727 st = ic->streams[i];
1728 if (st->parser) {
1729 av_parser_close(st->parser);
1730 st->parser= NULL;
1731 av_free_packet(&st->cur_pkt);
1732 }
1733 }
1734
1735 /* we read the first packets to get the first PTS (not fully
1736 accurate, but it is enough now) */
1737 url_fseek(ic->pb, 0, SEEK_SET);
1738 read_size = 0;
1739 for(;;) {
1740 if (read_size >= DURATION_MAX_READ_SIZE)
1741 break;
1742 /* if all info is available, we can stop */
1743 for(i = 0;i < ic->nb_streams; i++) {
1744 st = ic->streams[i];
1745 if (st->start_time == AV_NOPTS_VALUE)
1746 break;
1747 }
1748 if (i == ic->nb_streams)
1749 break;
1750
1751 do{
1752 ret = av_read_packet(ic, pkt);
1753 }while(ret == AVERROR(EAGAIN));
1754 if (ret != 0)
1755 break;
1756 read_size += pkt->size;
1757 st = ic->streams[pkt->stream_index];
1758 if (pkt->pts != AV_NOPTS_VALUE) {
1759 if (st->start_time == AV_NOPTS_VALUE)
1760 st->start_time = pkt->pts;
1761 }
1762 av_free_packet(pkt);
1763 }
1764
1765 /* estimate the end time (duration) */
1766 /* XXX: may need to support wrapping */
1767 filesize = ic->file_size;
1768 offset = filesize - DURATION_MAX_READ_SIZE;
1769 if (offset < 0)
1770 offset = 0;
1771
1772 url_fseek(ic->pb, offset, SEEK_SET);
1773 read_size = 0;
1774 for(;;) {
1775 if (read_size >= DURATION_MAX_READ_SIZE)
1776 break;
1777
1778 do{
1779 ret = av_read_packet(ic, pkt);
1780 }while(ret == AVERROR(EAGAIN));
1781 if (ret != 0)
1782 break;
1783 read_size += pkt->size;
1784 st = ic->streams[pkt->stream_index];
1785 if (pkt->pts != AV_NOPTS_VALUE &&
1786 st->start_time != AV_NOPTS_VALUE) {
1787 end_time = pkt->pts;
1788 duration = end_time - st->start_time;
1789 if (duration > 0) {
1790 if (st->duration == AV_NOPTS_VALUE ||
1791 st->duration < duration)
1792 st->duration = duration;
1793 }
1794 }
1795 av_free_packet(pkt);
1796 }
1797
1798 fill_all_stream_timings(ic);
1799
1800 url_fseek(ic->pb, old_offset, SEEK_SET);
1801 for(i=0; i<ic->nb_streams; i++){
1802 st= ic->streams[i];
1803 st->cur_dts= st->first_dts;
1804 st->last_IP_pts = AV_NOPTS_VALUE;
1805 }
1806 }
1807
1808 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1809 {
1810 int64_t file_size;
1811
1812 /* get the file size, if possible */
1813 if (ic->iformat->flags & AVFMT_NOFILE) {
1814 file_size = 0;
1815 } else {
1816 file_size = url_fsize(ic->pb);
1817 if (file_size < 0)
1818 file_size = 0;
1819 }
1820 ic->file_size = file_size;
1821
1822 if ((!strcmp(ic->iformat->name, "mpeg") ||
1823 !strcmp(ic->iformat->name, "mpegts")) &&
1824 file_size && !url_is_streamed(ic->pb)) {
1825 /* get accurate estimate from the PTSes */
1826 av_estimate_timings_from_pts(ic, old_offset);
1827 } else if (av_has_duration(ic)) {
1828 /* at least one component has timings - we use them for all
1829 the components */
1830 fill_all_stream_timings(ic);
1831 } else {
1832 /* less precise: use bitrate info */
1833 av_estimate_timings_from_bit_rate(ic);
1834 }
1835 av_update_stream_timings(ic);
1836
1837 #if 0
1838 {
1839 int i;
1840 AVStream *st;
1841 for(i = 0;i < ic->nb_streams; i++) {
1842 st = ic->streams[i];
1843 printf("%d: start_time: %0.3f duration: %0.3f\n",
1844 i, (double)st->start_time / AV_TIME_BASE,
1845 (double)st->duration / AV_TIME_BASE);
1846 }
1847 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1848 (double)ic->start_time / AV_TIME_BASE,
1849 (double)ic->duration / AV_TIME_BASE,
1850 ic->bit_rate / 1000);
1851 }
1852 #endif
1853 }
1854
1855 static int has_codec_parameters(AVCodecContext *enc)
1856 {
1857 int val;
1858 switch(enc->codec_type) {
1859 case CODEC_TYPE_AUDIO:
1860 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1861 if(!enc->frame_size &&
1862 (enc->codec_id == CODEC_ID_VORBIS ||
1863 enc->codec_id == CODEC_ID_AAC))
1864 return 0;
1865 break;
1866 case CODEC_TYPE_VIDEO:
1867 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1868 break;
1869 default:
1870 val = 1;
1871 break;
1872 }
1873 return enc->codec_id != CODEC_ID_NONE && val != 0;
1874 }
1875
1876 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
1877 {
1878 int16_t *samples;
1879 AVCodec *codec;
1880 int got_picture, data_size, ret=0;
1881 AVFrame picture;
1882
1883 if(!st->codec->codec){
1884 codec = avcodec_find_decoder(st->codec->codec_id);
1885 if (!codec)
1886 return -1;
1887 ret = avcodec_open(st->codec, codec);
1888 if (ret < 0)
1889 return ret;
1890 }
1891
1892 if(!has_codec_parameters(st->codec)){
1893 switch(st->codec->codec_type) {
1894 case CODEC_TYPE_VIDEO:
1895 avcodec_get_frame_defaults(&picture);
1896 ret = avcodec_decode_video2(st->codec, &picture,
1897 &got_picture, avpkt);
1898 break;
1899 case CODEC_TYPE_AUDIO:
1900 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1901 samples = av_malloc(data_size);
1902 if (!samples)
1903 goto fail;
1904 ret = avcodec_decode_audio3(st->codec, samples,
1905 &data_size, avpkt);
1906 av_free(samples);
1907 break;
1908 default:
1909 break;
1910 }
1911 }
1912 fail:
1913 return ret;
1914 }
1915
1916 unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
1917 {
1918 while (tags->id != CODEC_ID_NONE) {
1919 if (tags->id == id)
1920 return tags->tag;
1921 tags++;
1922 }
1923 return 0;
1924 }
1925
1926 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1927 {
1928 int i;
1929 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1930 if(tag == tags[i].tag)
1931 return tags[i].id;
1932 }
1933 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1934 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1935 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1936 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1937 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1938 return tags[i].id;
1939 }
1940 return CODEC_ID_NONE;
1941 }
1942
1943 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1944 {
1945 int i;
1946 for(i=0; tags && tags[i]; i++){
1947 int tag= ff_codec_get_tag(tags[i], id);
1948 if(tag) return tag;
1949 }
1950 return 0;
1951 }
1952
1953 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1954 {
1955 int i;
1956 for(i=0; tags && tags[i]; i++){
1957 enum CodecID id= ff_codec_get_id(tags[i], tag);
1958 if(id!=CODEC_ID_NONE) return id;
1959 }
1960 return CODEC_ID_NONE;
1961 }
1962
1963 static void compute_chapters_end(AVFormatContext *s)
1964 {
1965 unsigned int i;
1966
1967 for (i=0; i+1<s->nb_chapters; i++)
1968 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1969 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1970 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1971 s->chapters[i]->end = s->chapters[i+1]->start;
1972 }
1973
1974 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1975 assert(s->start_time != AV_NOPTS_VALUE);
1976 assert(s->duration > 0);
1977 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1978 AV_TIME_BASE_Q,
1979 s->chapters[i]->time_base);
1980 }
1981 }
1982
1983 #define MAX_STD_TIMEBASES (60*12+5)
1984 static int get_std_framerate(int i){
1985 if(i<60*12) return i*1001;
1986 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1987 }
1988
1989 /*
1990 * Is the time base unreliable.
1991 * This is a heuristic to balance between quick acceptance of the values in
1992 * the headers vs. some extra checks.
1993 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1994 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1995 * And there are "variable" fps files this needs to detect as well.
1996 */
1997 static int tb_unreliable(AVCodecContext *c){
1998 if( c->time_base.den >= 101L*c->time_base.num
1999 || c->time_base.den < 5L*c->time_base.num
2000 /* || c->codec_tag == AV_RL32("DIVX")
2001 || c->codec_tag == AV_RL32("XVID")*/
2002 || c->codec_id == CODEC_ID_MPEG2VIDEO
2003 || c->codec_id == CODEC_ID_H264
2004 )
2005 return 1;
2006 return 0;
2007 }
2008
2009 int av_find_stream_info(AVFormatContext *ic)
2010 {
2011 int i, count, ret, read_size, j;
2012 AVStream *st;
2013 AVPacket pkt1, *pkt;
2014 int64_t last_dts[MAX_STREAMS];
2015 int64_t duration_gcd[MAX_STREAMS]={0};
2016 int duration_count[MAX_STREAMS]={0};
2017 double (*duration_error)[MAX_STD_TIMEBASES];
2018 int64_t old_offset = url_ftell(ic->pb);
2019 int64_t codec_info_duration[MAX_STREAMS]={0};
2020 int codec_info_nb_frames[MAX_STREAMS]={0};
2021
2022 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2023 if (!duration_error) return AVERROR(ENOMEM);
2024
2025 for(i=0;i<ic->nb_streams;i++) {
2026 st = ic->streams[i];
2027 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2028 /* if(!st->time_base.num)
2029 st->time_base= */
2030 if(!st->codec->time_base.num)
2031 st->codec->time_base= st->time_base;
2032 }
2033 //only for the split stuff
2034 if (!st->parser) {
2035 st->parser = av_parser_init(st->codec->codec_id);
2036 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2037 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2038 }
2039 }
2040 }
2041
2042 for(i=0;i<MAX_STREAMS;i++){
2043 last_dts[i]= AV_NOPTS_VALUE;
2044 }
2045
2046 count = 0;
2047 read_size = 0;
2048 for(;;) {
2049 if(url_interrupt_cb()){
2050 ret= AVERROR(EINTR);
2051 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2052 break;
2053 }
2054
2055 /* check if one codec still needs to be handled */
2056 for(i=0;i<ic->nb_streams;i++) {
2057 st = ic->streams[i];
2058 if (!has_codec_parameters(st->codec))
2059 break;
2060 /* variable fps and no guess at the real fps */
2061 if( tb_unreliable(st->codec)
2062 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2063 break;
2064 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2065 break;
2066 if(st->first_dts == AV_NOPTS_VALUE)
2067 break;
2068 }
2069 if (i == ic->nb_streams) {
2070 /* NOTE: if the format has no header, then we need to read
2071 some packets to get most of the streams, so we cannot
2072 stop here */
2073 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2074 /* if we found the info for all the codecs, we can stop */
2075 ret = count;
2076 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2077 break;
2078 }
2079 }
2080 /* we did not get all the codec info, but we read too much data */
2081 if (read_size >= ic->probesize) {
2082 ret = count;
2083 av_log(ic, AV_LOG_DEBUG, "MAX_READ_SIZE:%d reached\n", ic->probesize);
2084 break;
2085 }
2086
2087 /* NOTE: a new stream can be added there if no header in file
2088 (AVFMTCTX_NOHEADER) */
2089 ret = av_read_frame_internal(ic, &pkt1);
2090 if(ret == AVERROR(EAGAIN))
2091 continue;
2092 if (ret < 0) {
2093 /* EOF or error */
2094 ret = -1; /* we could not have all the codec parameters before EOF */
2095 for(i=0;i<ic->nb_streams;i++) {
2096 st = ic->streams[i];
2097 if (!has_codec_parameters(st->codec)){
2098 char buf[256];
2099 avcodec_string(buf, sizeof(buf), st->codec, 0);
2100 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2101 } else {
2102 ret = 0;
2103 }
2104 }
2105 break;
2106 }
2107
2108 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2109 if(av_dup_packet(pkt) < 0) {
2110 av_free(duration_error);
2111 return AVERROR(ENOMEM);
2112 }
2113
2114 read_size += pkt->size;
2115
2116 st = ic->streams[pkt->stream_index];
2117 if(codec_info_nb_frames[st->index]>1) {
2118 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
2119 av_log(ic, AV_LOG_DEBUG, "max_analyze_duration reached\n");
2120 break;
2121 }
2122 codec_info_duration[st->index] += pkt->duration;
2123 }
2124 if (pkt->duration != 0)
2125 codec_info_nb_frames[st->index]++;
2126
2127 {
2128 int index= pkt->stream_index;
2129 int64_t last= last_dts[index];
2130 int64_t duration= pkt->dts - last;
2131
2132 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2133 double dur= duration * av_q2d(st->time_base);
2134
2135 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2136 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2137 if(duration_count[index] < 2)
2138 memset(duration_error[index], 0, sizeof(*duration_error));
2139 for(i=1; i<MAX_STD_TIMEBASES; i++){
2140 int framerate= get_std_framerate(i);
2141 int ticks= lrintf(dur*framerate/(1001*12));
2142 double error= dur - ticks*1001*12/(double)framerate;
2143 duration_error[index][i] += error*error;
2144 }
2145 duration_count[index]++;
2146 // ignore the first 4 values, they might have some random jitter
2147 if (duration_count[index] > 3)
2148 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2149 }
2150 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2151 last_dts[pkt->stream_index]= pkt->dts;
2152 }
2153 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2154 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2155 if(i){
2156 st->codec->extradata_size= i;
2157 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2158 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2159 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2160 }
2161 }
2162
2163 /* if still no information, we try to open the codec and to
2164 decompress the frame. We try to avoid that in most cases as
2165 it takes longer and uses more memory. For MPEG-4, we need to
2166 decompress for QuickTime. */
2167 if (!has_codec_parameters(st->codec) /*&&
2168 (st->codec->codec_id == CODEC_ID_FLV1 ||
2169 st->codec->codec_id == CODEC_ID_H264 ||
2170 st->codec->codec_id == CODEC_ID_H263 ||
2171 st->codec->codec_id == CODEC_ID_H261 ||
2172 st->codec->codec_id == CODEC_ID_VORBIS ||
2173 st->codec->codec_id == CODEC_ID_MJPEG ||
2174 st->codec->codec_id == CODEC_ID_PNG ||
2175 st->codec->codec_id == CODEC_ID_PAM ||
2176 st->codec->codec_id == CODEC_ID_PGM ||
2177 st->codec->codec_id == CODEC_ID_PGMYUV ||
2178 st->codec->codec_id == CODEC_ID_PBM ||
2179 st->codec->codec_id == CODEC_ID_PPM ||
2180 st->codec->codec_id == CODEC_ID_SHORTEN ||
2181 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2182 try_decode_frame(st, pkt);
2183
2184 count++;
2185 }
2186
2187 // close codecs which were opened in try_decode_frame()
2188 for(i=0;i<ic->nb_streams;i++) {
2189 st = ic->streams[i];
2190 if(st->codec->codec)
2191 avcodec_close(st->codec);
2192 }
2193 for(i=0;i<ic->nb_streams;i++) {
2194 st = ic->streams[i];
2195 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2196 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2197 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2198
2199 // the check for tb_unreliable() is not completely correct, since this is not about handling
2200 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2201 // ipmovie.c produces.
2202 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2203 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2204 if(duration_count[i]
2205 && tb_unreliable(st->codec) /*&&
2206 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2207 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2208 int num = 0;
2209 double best_error= 2*av_q2d(st->time_base);
2210 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2211
2212 for(j=1; j<MAX_STD_TIMEBASES; j++){
2213 double error= duration_error[i][j] * get_std_framerate(j);
2214 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2215 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2216 if(error < best_error){
2217 best_error= error;
2218 num = get_std_framerate(j);
2219 }
2220 }
2221 // do not increase frame rate by more than 1 % in order to match a standard rate.
2222 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2223 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2224 }
2225
2226 if (!st->r_frame_rate.num){
2227 if( st->codec->time_base.den * (int64_t)st->time_base.num
2228 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2229 st->r_frame_rate.num = st->codec->time_base.den;
2230 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2231 }else{
2232 st->r_frame_rate.num = st->time_base.den;
2233 st->r_frame_rate.den = st->time_base.num;
2234 }
2235 }
2236 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2237 if(!st->codec->bits_per_coded_sample)
2238 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2239 }
2240 }
2241
2242 av_estimate_timings(ic, old_offset);
2243
2244 compute_chapters_end(ic);
2245
2246 #if 0
2247 /* correct DTS for B-frame streams with no timestamps */
2248 for(i=0;i<ic->nb_streams;i++) {
2249 st = ic->streams[i];
2250 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2251 if(b-frames){
2252 ppktl = &ic->packet_buffer;
2253 while(ppkt1){
2254 if(ppkt1->stream_index != i)
2255 continue;
2256 if(ppkt1->pkt->dts < 0)
2257 break;
2258 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2259 break;
2260 ppkt1->pkt->dts -= delta;
2261 ppkt1= ppkt1->next;
2262 }
2263 if(ppkt1)
2264 continue;
2265 st->cur_dts -= delta;
2266 }
2267 }
2268 }
2269 #endif
2270
2271 av_free(duration_error);
2272
2273 return ret;
2274 }
2275
2276 /*******************************************************/
2277
2278 int av_read_play(AVFormatContext *s)
2279 {
2280 if (s->iformat->read_play)
2281 return s->iformat->read_play(s);
2282 if (s->pb)
2283 return av_url_read_fpause(s->pb, 0);
2284 return AVERROR(ENOSYS);
2285 }
2286
2287 int av_read_pause(AVFormatContext *s)
2288 {
2289 if (s->iformat->read_pause)
2290 return s->iformat->read_pause(s);
2291 if (s->pb)
2292 return av_url_read_fpause(s->pb, 1);
2293 return AVERROR(ENOSYS);
2294 }
2295
2296 void av_close_input_stream(AVFormatContext *s)
2297 {
2298 int i;
2299 AVStream *st;
2300
2301 if (s->iformat->read_close)
2302 s->iformat->read_close(s);
2303 for(i=0;i<s->nb_streams;i++) {
2304 /* free all data in a stream component */
2305 st = s->streams[i];
2306 if (st->parser) {
2307 av_parser_close(st->parser);
2308 av_free_packet(&st->cur_pkt);
2309 }
2310 av_metadata_free(&st->metadata);
2311 av_free(st->index_entries);
2312 av_free(st->codec->extradata);
2313 av_free(st->codec);
2314 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2315 av_free(st->filename);
2316 #endif
2317 av_free(st->priv_data);
2318 av_free(st);
2319 }
2320 for(i=s->nb_programs-1; i>=0; i--) {
2321 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2322 av_freep(&s->programs[i]->provider_name);
2323 av_freep(&s->programs[i]->name);
2324 #endif
2325 av_metadata_free(&s->programs[i]->metadata);
2326 av_freep(&s->programs[i]->stream_index);
2327 av_freep(&s->programs[i]);
2328 }
2329 av_freep(&s->programs);
2330 flush_packet_queue(s);
2331 av_freep(&s->priv_data);
2332 while(s->nb_chapters--) {
2333 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2334 av_free(s->chapters[s->nb_chapters]->title);
2335 #endif
2336 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2337 av_free(s->chapters[s->nb_chapters]);
2338 }
2339 av_freep(&s->chapters);
2340 av_metadata_free(&s->metadata);
2341 av_free(s);
2342 }
2343
2344 void av_close_input_file(AVFormatContext *s)
2345 {
2346 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2347 av_close_input_stream(s);
2348 if (pb)
2349 url_fclose(pb);
2350 }
2351
2352 AVStream *av_new_stream(AVFormatContext *s, int id)
2353 {
2354 AVStream *st;
2355 int i;
2356
2357 if (s->nb_streams >= MAX_STREAMS)
2358 return NULL;
2359
2360 st = av_mallocz(sizeof(AVStream));
2361 if (!st)
2362 return NULL;
2363
2364 st->codec= avcodec_alloc_context();
2365 if (s->iformat) {
2366 /* no default bitrate if decoding */
2367 st->codec->bit_rate = 0;
2368 }
2369 st->index = s->nb_streams;
2370 st->id = id;
2371 st->start_time = AV_NOPTS_VALUE;
2372 st->duration = AV_NOPTS_VALUE;
2373 /* we set the current DTS to 0 so that formats without any timestamps
2374 but durations get some timestamps, formats with some unknown
2375 timestamps have their first few packets buffered and the
2376 timestamps corrected before they are returned to the user */
2377 st->cur_dts = 0;
2378 st->first_dts = AV_NOPTS_VALUE;
2379 st->probe_packets = MAX_PROBE_PACKETS;
2380
2381 /* default pts setting is MPEG-like */
2382 av_set_pts_info(st, 33, 1, 90000);
2383 st->last_IP_pts = AV_NOPTS_VALUE;
2384 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2385 st->pts_buffer[i]= AV_NOPTS_VALUE;
2386 st->reference_dts = AV_NOPTS_VALUE;
2387
2388 st->sample_aspect_ratio = (AVRational){0,1};
2389
2390 s->streams[s->nb_streams++] = st;
2391 return st;
2392 }
2393
2394 AVProgram *av_new_program(AVFormatContext *ac, int id)
2395 {
2396 AVProgram *program=NULL;
2397 int i;
2398
2399 #ifdef DEBUG_SI
2400 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2401 #endif
2402
2403 for(i=0; i<ac->nb_programs; i++)
2404 if(ac->programs[i]->id == id)
2405 program = ac->programs[i];
2406
2407 if(!program){
2408 program = av_mallocz(sizeof(AVProgram));
2409 if (!program)
2410 return NULL;
2411 dynarray_add(&ac->programs, &ac->nb_programs, program);
2412 program->discard = AVDISCARD_NONE;
2413 }
2414 program->id = id;
2415
2416 return program;
2417 }
2418
2419 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2420 {
2421 AVChapter *chapter = NULL;
2422 int i;
2423
2424 for(i=0; i<s->nb_chapters; i++)
2425 if(s->chapters[i]->id == id)
2426 chapter = s->chapters[i];
2427
2428 if(!chapter){
2429 chapter= av_mallocz(sizeof(AVChapter));
2430 if(!chapter)
2431 return NULL;
2432 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2433 }
2434 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2435 av_free(chapter->title);
2436 #endif
2437 av_metadata_set(&chapter->metadata, "title", title);
2438 chapter->id = id;
2439 chapter->time_base= time_base;
2440 chapter->start = start;
2441 chapter->end = end;
2442
2443 return chapter;
2444 }
2445
2446 /************************************************************/
2447 /* output media file */
2448
2449 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2450 {
2451 int ret;
2452
2453 if (s->oformat->priv_data_size > 0) {
2454 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2455 if (!s->priv_data)
2456 return AVERROR(ENOMEM);
2457 } else
2458 s->priv_data = NULL;
2459
2460 if (s->oformat->set_parameters) {
2461 ret = s->oformat->set_parameters(s, ap);
2462 if (ret < 0)
2463 return ret;
2464 }
2465 return 0;
2466 }
2467
2468 int av_write_header(AVFormatContext *s)
2469 {
2470 int ret, i;
2471 AVStream *st;
2472
2473 // some sanity checks
2474 for(i=0;i<s->nb_streams;i++) {
2475 st = s->streams[i];
2476
2477 switch (st->codec->codec_type) {
2478 case CODEC_TYPE_AUDIO:
2479 if(st->codec->sample_rate<=0){
2480 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2481 return -1;
2482 }
2483 if(!st->codec->block_align)
2484 st->codec->block_align = st->codec->channels *
2485 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2486 break;
2487 case CODEC_TYPE_VIDEO:
2488 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2489 av_log(s, AV_LOG_ERROR, "time base not set\n");
2490 return -1;
2491 }
2492 if(st->codec->width<=0 || st->codec->height<=0){
2493 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2494 return -1;
2495 }
2496 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2497 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2498 return -1;
2499 }
2500 break;
2501 }
2502
2503 if(s->oformat->codec_tag){
2504 if(st->codec->codec_tag){
2505 //FIXME
2506 //check that tag + id is in the table
2507 //if neither is in the table -> OK
2508 //if tag is in the table with another id -> FAIL
2509 //if id is in the table with another tag -> FAIL unless strict < ?
2510 }else
2511 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2512 }
2513
2514 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2515 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2516 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2517 }
2518
2519 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2520 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2521 if (!s->priv_data)
2522 return AVERROR(ENOMEM);
2523 }
2524
2525 #if LIBAVFORMAT_VERSION_MAJOR < 53
2526 ff_metadata_mux_compat(s);
2527 #endif
2528
2529 if(s->oformat->write_header){
2530 ret = s->oformat->write_header(s);
2531 if (ret < 0)
2532 return ret;
2533 }
2534
2535 /* init PTS generation */
2536 for(i=0;i<s->nb_streams;i++) {
2537 int64_t den = AV_NOPTS_VALUE;
2538 st = s->streams[i];
2539
2540 switch (st->codec->codec_type) {
2541 case CODEC_TYPE_AUDIO:
2542 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2543 break;
2544 case CODEC_TYPE_VIDEO:
2545 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2546 break;
2547 default:
2548 break;
2549 }
2550 if (den != AV_NOPTS_VALUE) {
2551 if (den <= 0)
2552 return AVERROR_INVALIDDATA;
2553 av_frac_init(&st->pts, 0, 0, den);
2554 }
2555 }
2556 return 0;
2557 }
2558
2559 //FIXME merge with compute_pkt_fields
2560 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2561 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2562 int num, den, frame_size, i;
2563
2564 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2565
2566 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2567 return -1;*/
2568
2569 /* duration field */
2570 if (pkt->duration == 0) {
2571 compute_frame_duration(&num, &den, st, NULL, pkt);
2572 if (den && num) {
2573 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2574 }
2575 }
2576
2577 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2578 pkt->pts= pkt->dts;
2579
2580 //XXX/FIXME this is a temporary hack until all encoders output pts
2581 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2582 pkt->dts=
2583 // pkt->pts= st->cur_dts;
2584 pkt->pts= st->pts.val;
2585 }
2586
2587 //calculate dts from pts
2588 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2589 st->pts_buffer[0]= pkt->pts;
2590 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2591 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2592 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2593 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2594
2595 pkt->dts= st->pts_buffer[0];
2596 }
2597
2598 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2599 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2600 return -1;
2601 }
2602 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2603 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2604 return -1;
2605 }
2606
2607 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2608 st->cur_dts= pkt->dts;
2609 st->pts.val= pkt->dts;
2610
2611 /* update pts */
2612 switch (st->codec->codec_type) {
2613 case CODEC_TYPE_AUDIO:
2614 frame_size = get_audio_frame_size(st->codec, pkt->size);
2615
2616 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2617 likely equal to the encoder delay, but it would be better if we
2618 had the real timestamps from the encoder */
2619 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2620 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2621 }
2622 break;
2623 case CODEC_TYPE_VIDEO:
2624 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2625 break;
2626 default:
2627 break;
2628 }
2629 return 0;
2630 }
2631
2632 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2633 {
2634 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2635
2636 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2637 return ret;
2638
2639 ret= s->oformat->write_packet(s, pkt);
2640 if(!ret)
2641 ret= url_ferror(s->pb);
2642 return ret;
2643 }
2644
2645 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2646 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2647 {
2648 AVPacketList **next_point, *this_pktl;
2649
2650 this_pktl = av_mallocz(sizeof(AVPacketList));
2651 this_pktl->pkt= *pkt;
2652 pkt->destruct= NULL; // do not free original but only the copy
2653 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2654
2655 next_point = &s->packet_buffer;
2656 while(*next_point){
2657 if(compare(s, &(*next_point)->pkt, pkt))
2658 break;
2659 next_point= &(*next_point)->next;
2660 }
2661 this_pktl->next= *next_point;
2662 *next_point= this_pktl;
2663 }
2664
2665 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2666 {
2667 AVStream *st = s->streams[ pkt ->stream_index];
2668 AVStream *st2= s->streams[ next->stream_index];
2669 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2670 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2671
2672 if (pkt->dts == AV_NOPTS_VALUE)
2673 return 0;
2674
2675 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2676 }
2677
2678 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2679 AVPacketList *pktl;
2680 int stream_count=0;
2681 int streams[MAX_STREAMS];
2682
2683 if(pkt){
2684 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2685 }
2686
2687 memset(streams, 0, sizeof(streams));
2688 pktl= s->packet_buffer;
2689 while(pktl){
2690 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2691 if(streams[ pktl->pkt.stream_index ] == 0)
2692 stream_count++;
2693 streams[ pktl->pkt.stream_index ]++;
2694 pktl= pktl->next;
2695 }
2696
2697 if(stream_count && (s->nb_streams == stream_count || flush)){
2698 pktl= s->packet_buffer;
2699 *out= pktl->pkt;
2700
2701 s->packet_buffer= pktl->next;
2702 av_freep(&pktl);
2703 return 1;
2704 }else{
2705 av_init_packet(out);
2706 return 0;
2707 }
2708 }
2709
2710 /**
2711 * Interleaves an AVPacket correctly so it can be muxed.
2712 * @param out the interleaved packet will be output here
2713 * @param in the input packet
2714 * @param flush 1 if no further packets are available as input and all
2715 * remaining packets should be output
2716 * @return 1 if a packet was output, 0 if no packet could be output,
2717 * < 0 if an error occurred
2718 */
2719 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2720 if(s->oformat->interleave_packet)
2721 return s->oformat->interleave_packet(s, out, in, flush);
2722 else
2723 return av_interleave_packet_per_dts(s, out, in, flush);
2724 }
2725
2726 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2727 AVStream *st= s->streams[ pkt->stream_index];
2728
2729 //FIXME/XXX/HACK drop zero sized packets
2730 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2731 return 0;
2732
2733 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2734 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2735 return -1;
2736
2737 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2738 return -1;
2739
2740 for(;;){
2741 AVPacket opkt;
2742 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2743 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2744 return ret;
2745
2746 ret= s->oformat->write_packet(s, &opkt);
2747
2748 av_free_packet(&opkt);
2749 pkt= NULL;
2750
2751 if(ret<0)
2752 return ret;
2753 if(url_ferror(s->pb))
2754 return url_ferror(s->pb);
2755 }
2756 }
2757
2758 int av_write_trailer(AVFormatContext *s)
2759 {
2760 int ret, i;
2761
2762 for(;;){
2763 AVPacket pkt;
2764 ret= av_interleave_packet(s, &pkt, NULL, 1);
2765 if(ret<0) //FIXME cleanup needed for ret<0 ?
2766 goto fail;
2767 if(!ret)
2768 break;
2769
2770 ret= s->oformat->write_packet(s, &pkt);
2771
2772 av_free_packet(&pkt);
2773
2774 if(ret<0)
2775 goto fail;
2776 if(url_ferror(s->pb))
2777 goto fail;
2778 }
2779
2780 if(s->oformat->write_trailer)
2781 ret = s->oformat->write_trailer(s);
2782 fail:
2783 if(ret == 0)
2784 ret=url_ferror(s->pb);
2785 for(i=0;i<s->nb_streams;i++)
2786 av_freep(&s->streams[i]->priv_data);
2787 av_freep(&s->priv_data);
2788 return ret;
2789 }
2790
2791 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2792 {
2793 int i, j;
2794 AVProgram *program=NULL;
2795 void *tmp;
2796
2797 for(i=0; i<ac->nb_programs; i++){
2798 if(ac->programs[i]->id != progid)
2799 continue;
2800 program = ac->programs[i];
2801 for(j=0; j<program->nb_stream_indexes; j++)
2802 if(program->stream_index[j] == idx)
2803 return;
2804
2805 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2806 if(!tmp)
2807 return;
2808 program->stream_index = tmp;
2809 program->stream_index[program->nb_stream_indexes++] = idx;
2810 return;
2811 }
2812 }
2813
2814 static void print_fps(double d, const char *postfix){
2815 uint64_t v= lrintf(d*100);
2816 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2817 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2818 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2819 }
2820
2821 /* "user interface" functions */
2822 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2823 {
2824 char buf[256];
2825 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2826 AVStream *st = ic->streams[i];
2827 int g = av_gcd(st->time_base.num, st->time_base.den);
2828 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2829 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2830 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2831 /* the pid is an important information, so we display it */
2832 /* XXX: add a generic system */
2833 if (flags & AVFMT_SHOW_IDS)
2834 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2835 if (lang)
2836 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2837 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2838 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2839 if (st->sample_aspect_ratio.num && // default
2840 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2841 AVRational display_aspect_ratio;
2842 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2843 st->codec->width*st->sample_aspect_ratio.num,
2844 st->codec->height*st->sample_aspect_ratio.den,
2845 1024*1024);
2846 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2847 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2848 display_aspect_ratio.num, display_aspect_ratio.den);
2849 }
2850 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2851 if(st->r_frame_rate.den && st->r_frame_rate.num)
2852 print_fps(av_q2d(st->r_frame_rate), "tbr");
2853 if(st->time_base.den && st->time_base.num)
2854 print_fps(1/av_q2d(st->time_base), "tbn");
2855 if(st->codec->time_base.den && st->codec->time_base.num)
2856 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2857 }
2858 av_log(NULL, AV_LOG_INFO, "\n");
2859 }
2860
2861 void dump_format(AVFormatContext *ic,
2862 int index,
2863 const char *url,
2864 int is_output)
2865 {
2866 int i;
2867
2868 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2869 is_output ? "Output" : "Input",
2870 index,
2871 is_output ? ic->oformat->name : ic->iformat->name,
2872 is_output ? "to" : "from", url);
2873 if (!is_output) {
2874 av_log(NULL, AV_LOG_INFO, " Duration: ");
2875 if (ic->duration != AV_NOPTS_VALUE) {
2876 int hours, mins, secs, us;
2877 secs = ic->duration / AV_TIME_BASE;
2878 us = ic->duration % AV_TIME_BASE;
2879 mins = secs / 60;
2880 secs %= 60;
2881 hours = mins / 60;
2882 mins %= 60;
2883 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2884 (100 * us) / AV_TIME_BASE);
2885 } else {
2886 av_log(NULL, AV_LOG_INFO, "N/A");
2887 }
2888 if (ic->start_time != AV_NOPTS_VALUE) {
2889 int secs, us;
2890 av_log(NULL, AV_LOG_INFO, ", start: ");
2891 secs = ic->start_time / AV_TIME_BASE;
2892 us = ic->start_time % AV_TIME_BASE;
2893 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2894 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2895 }
2896 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2897 if (ic->bit_rate) {
2898 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2899 } else {
2900 av_log(NULL, AV_LOG_INFO, "N/A");
2901 }
2902 av_log(NULL, AV_LOG_INFO, "\n");
2903 }
2904 if(ic->nb_programs) {
2905 int j, k;
2906 for(j=0; j<ic->nb_programs; j++) {
2907 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
2908 "name", NULL, 0);
2909 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2910 name ? name->value : "");
2911 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2912 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2913 }
2914 } else
2915 for(i=0;i<ic->nb_streams;i++)
2916 dump_stream_format(ic, i, index, is_output);
2917 }
2918
2919 #if LIBAVFORMAT_VERSION_MAJOR < 53
2920 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2921 {
2922 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2923 }
2924
2925 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2926 {
2927 AVRational frame_rate;
2928 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2929 *frame_rate_num= frame_rate.num;
2930 *frame_rate_den= frame_rate.den;
2931 return ret;
2932 }
2933 #endif
2934
2935 int64_t av_gettime(void)
2936 {
2937 struct timeval tv;
2938 gettimeofday(&tv,NULL);
2939 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2940 }
2941
2942 int64_t parse_date(const char *datestr, int duration)
2943 {
2944 const char *p;
2945 int64_t t;
2946 struct tm dt;
2947 int i;
2948 static const char * const date_fmt[] = {
2949 "%Y-%m-%d",
2950 "%Y%m%d",
2951 };
2952 static const char * const time_fmt[] = {
2953 "%H:%M:%S",
2954 "%H%M%S",
2955 };
2956 const char *q;
2957 int is_utc, len;
2958 char lastch;
2959 int negative = 0;
2960
2961 #undef time
2962 time_t now = time(0);
2963
2964 len = strlen(datestr);
2965 if (len > 0)
2966 lastch = datestr[len - 1];
2967 else
2968 lastch = '\0';
2969 is_utc = (lastch == 'z' || lastch == 'Z');
2970
2971 memset(&dt, 0, sizeof(dt));
2972
2973 p = datestr;
2974 q = NULL;
2975 if (!duration) {
2976 if (!strncasecmp(datestr, "now", len))
2977 return (int64_t) now * 1000000;
2978
2979 /* parse the year-month-day part */
2980 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2981 q = small_strptime(p, date_fmt[i], &dt);
2982 if (q) {
2983 break;
2984 }
2985 }
2986
2987 /* if the year-month-day part is missing, then take the
2988 * current year-month-day time */
2989 if (!q) {
2990 if (is_utc) {
2991 dt = *gmtime(&now);
2992 } else {
2993 dt = *localtime(&now);
2994 }
2995 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2996 } else {
2997 p = q;
2998 }
2999
3000 if (*p == 'T' || *p == 't' || *p == ' ')
3001 p++;
3002
3003 /* parse the hour-minute-second part */
3004 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
3005 q = small_strptime(p, time_fmt[i], &dt);
3006 if (q) {
3007 break;
3008 }
3009 }
3010 } else {
3011 /* parse datestr as a duration */
3012 if (p[0] == '-') {
3013 negative = 1;
3014 ++p;
3015 }
3016 /* parse datestr as HH:MM:SS */
3017 q = small_strptime(p, time_fmt[0], &dt);
3018 if (!q) {
3019 /* parse datestr as S+ */
3020 dt.tm_sec = strtol(p, (char **)&q, 10);
3021 if (q == p)
3022 /* the parsing didn't succeed */
3023 return INT64_MIN;
3024 dt.tm_min = 0;
3025 dt.tm_hour = 0;
3026 }
3027 }
3028
3029 /* Now we have all the fields that we can get */
3030 if (!q) {
3031 return INT64_MIN;
3032 }
3033
3034 if (duration) {
3035 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3036 } else {
3037 dt.tm_isdst = -1; /* unknown */
3038 if (is_utc) {
3039 t = mktimegm(&dt);
3040 } else {
3041 t = mktime(&dt);
3042 }
3043 }
3044
3045 t *= 1000000;
3046
3047 /* parse the .m... part */
3048 if (*q == '.') {
3049 int val, n;
3050 q++;
3051 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3052 if (!isdigit(*q))
3053 break;
3054 val += n * (*q - '0');
3055 }
3056 t += val;
3057 }
3058 return negative ? -t : t;
3059 }
3060
3061 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3062 {
3063 const char *p;
3064 char tag[128], *q;
3065
3066 p = info;
3067 if (*p == '?')
3068 p++;
3069 for(;;) {
3070 q = tag;
3071 while (*p != '\0' && *p != '=' && *p != '&') {
3072 if ((q - tag) < sizeof(tag) - 1)
3073 *q++ = *p;
3074 p++;
3075 }
3076 *q = '\0';
3077 q = arg;
3078 if (*p == '=') {
3079 p++;
3080 while (*p != '&' && *p != '\0') {
3081 if ((q - arg) < arg_size - 1) {
3082 if (*p == '+')
3083 *q++ = ' ';
3084 else
3085 *q++ = *p;
3086 }
3087 p++;
3088 }
3089 *q = '\0';
3090 }
3091 if (!strcmp(tag, tag1))
3092 return 1;
3093 if (*p != '&')
3094 break;
3095 p++;
3096 }
3097 return 0;
3098 }
3099
3100 int av_get_frame_filename(char *buf, int buf_size,
3101 const char *path, int number)
3102 {
3103 const char *p;
3104 char *q, buf1[20], c;
3105 int nd, len, percentd_found;
3106
3107 q = buf;
3108 p = path;
3109 percentd_found = 0;
3110 for(;;) {
3111 c = *p++;
3112 if (c == '\0')
3113 break;
3114 if (c == '%') {
3115 do {
3116 nd = 0;
3117 while (isdigit(*p)) {
3118 nd = nd * 10 + *p++ - '0';
3119 }
3120 c = *p++;
3121 } while (isdigit(c));
3122
3123 switch(c) {
3124 case '%':
3125 goto addchar;
3126 case 'd':
3127 if (percentd_found)
3128 goto fail;
3129 percentd_found = 1;
3130 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3131 len = strlen(buf1);
3132 if ((q - buf + len) > buf_size - 1)
3133 goto fail;
3134 memcpy(q, buf1, len