Only free '*ic_ptr' when a caller has pre-allocated a context and passed it in
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "internal.h"
23 #include "libavcodec/opt.h"
24 #include "metadata.h"
25 #include "libavutil/avstring.h"
26 #include "riff.h"
27 #include <sys/time.h>
28 #include <time.h>
29 #include <strings.h>
30
31 #undef NDEBUG
32 #include <assert.h>
33
34 /**
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
37 */
38
39 unsigned avformat_version(void)
40 {
41 return LIBAVFORMAT_VERSION_INT;
42 }
43
44 /* fraction handling */
45
46 /**
47 * f = val + (num / den) + 0.5.
48 *
49 * 'num' is normalized so that it is such as 0 <= num < den.
50 *
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
55 */
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
57 {
58 num += (den >> 1);
59 if (num >= den) {
60 val += num / den;
61 num = num % den;
62 }
63 f->val = val;
64 f->num = num;
65 f->den = den;
66 }
67
68 /**
69 * Fractional addition to f: f = f + (incr / f->den).
70 *
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
73 */
74 static void av_frac_add(AVFrac *f, int64_t incr)
75 {
76 int64_t num, den;
77
78 num = f->num + incr;
79 den = f->den;
80 if (num < 0) {
81 f->val += num / den;
82 num = num % den;
83 if (num < 0) {
84 num += den;
85 f->val--;
86 }
87 } else if (num >= den) {
88 f->val += num / den;
89 num = num % den;
90 }
91 f->num = num;
92 }
93
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
98
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
100 {
101 if(f) return f->next;
102 else return first_iformat;
103 }
104
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
106 {
107 if(f) return f->next;
108 else return first_oformat;
109 }
110
111 void av_register_input_format(AVInputFormat *format)
112 {
113 AVInputFormat **p;
114 p = &first_iformat;
115 while (*p != NULL) p = &(*p)->next;
116 *p = format;
117 format->next = NULL;
118 }
119
120 void av_register_output_format(AVOutputFormat *format)
121 {
122 AVOutputFormat **p;
123 p = &first_oformat;
124 while (*p != NULL) p = &(*p)->next;
125 *p = format;
126 format->next = NULL;
127 }
128
129 int match_ext(const char *filename, const char *extensions)
130 {
131 const char *ext, *p;
132 char ext1[32], *q;
133
134 if(!filename)
135 return 0;
136
137 ext = strrchr(filename, '.');
138 if (ext) {
139 ext++;
140 p = extensions;
141 for(;;) {
142 q = ext1;
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
144 *q++ = *p++;
145 *q = '\0';
146 if (!strcasecmp(ext1, ext))
147 return 1;
148 if (*p == '\0')
149 break;
150 p++;
151 }
152 }
153 return 0;
154 }
155
156 static int match_format(const char *name, const char *names)
157 {
158 const char *p;
159 int len, namelen;
160
161 if (!name || !names)
162 return 0;
163
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
168 return 1;
169 names = p+1;
170 }
171 return !strcasecmp(name, names);
172 }
173
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
176 {
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
179
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
186 }
187 #endif
188 /* Find the proper file type. */
189 fmt_found = NULL;
190 score_max = 0;
191 fmt = first_oformat;
192 while (fmt != NULL) {
193 score = 0;
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
195 score += 100;
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
197 score += 10;
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
200 score += 5;
201 }
202 if (score > score_max) {
203 score_max = score;
204 fmt_found = fmt;
205 }
206 fmt = fmt->next;
207 }
208 return fmt_found;
209 }
210
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
213 {
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
215
216 if (fmt) {
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
219
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
222
223 if (stream_fmt)
224 fmt = stream_fmt;
225 }
226
227 return fmt;
228 }
229
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
234
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
238 }
239 #endif
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
242 return codec_id;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
245 else
246 return CODEC_ID_NONE;
247 }
248
249 AVInputFormat *av_find_input_format(const char *short_name)
250 {
251 AVInputFormat *fmt;
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
254 return fmt;
255 }
256 return NULL;
257 }
258
259 /* memory handling */
260
261
262 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
263 {
264 int ret= av_new_packet(pkt, size);
265
266 if(ret<0)
267 return ret;
268
269 pkt->pos= url_ftell(s);
270
271 ret= get_buffer(s, pkt->data, size);
272 if(ret<=0)
273 av_free_packet(pkt);
274 else
275 av_shrink_packet(pkt, ret);
276
277 return ret;
278 }
279
280
281 int av_filename_number_test(const char *filename)
282 {
283 char buf[1024];
284 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
285 }
286
287 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
288 {
289 AVInputFormat *fmt1, *fmt;
290 int score;
291
292 fmt = NULL;
293 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
294 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
295 continue;
296 score = 0;
297 if (fmt1->read_probe) {
298 score = fmt1->read_probe(pd);
299 } else if (fmt1->extensions) {
300 if (match_ext(pd->filename, fmt1->extensions)) {
301 score = 50;
302 }
303 }
304 if (score > *score_max) {
305 *score_max = score;
306 fmt = fmt1;
307 }else if (score == *score_max)
308 fmt = NULL;
309 }
310 return fmt;
311 }
312
313 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
314 int score=0;
315 return av_probe_input_format2(pd, is_opened, &score);
316 }
317
318 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
319 {
320 AVInputFormat *fmt;
321 fmt = av_probe_input_format2(pd, 1, &score);
322
323 if (fmt) {
324 if (!strcmp(fmt->name, "mp3")) {
325 st->codec->codec_id = CODEC_ID_MP3;
326 st->codec->codec_type = CODEC_TYPE_AUDIO;
327 } else if (!strcmp(fmt->name, "ac3")) {
328 st->codec->codec_id = CODEC_ID_AC3;
329 st->codec->codec_type = CODEC_TYPE_AUDIO;
330 } else if (!strcmp(fmt->name, "mpegvideo")) {
331 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
332 st->codec->codec_type = CODEC_TYPE_VIDEO;
333 } else if (!strcmp(fmt->name, "m4v")) {
334 st->codec->codec_id = CODEC_ID_MPEG4;
335 st->codec->codec_type = CODEC_TYPE_VIDEO;
336 } else if (!strcmp(fmt->name, "h264")) {
337 st->codec->codec_id = CODEC_ID_H264;
338 st->codec->codec_type = CODEC_TYPE_VIDEO;
339 } else if (!strcmp(fmt->name, "dts")) {
340 st->codec->codec_id = CODEC_ID_DTS;
341 st->codec->codec_type = CODEC_TYPE_AUDIO;
342 }
343 }
344 return !!fmt;
345 }
346
347 /************************************************************/
348 /* input media file */
349
350 /**
351 * Open a media file from an IO stream. 'fmt' must be specified.
352 */
353 int av_open_input_stream(AVFormatContext **ic_ptr,
354 ByteIOContext *pb, const char *filename,
355 AVInputFormat *fmt, AVFormatParameters *ap)
356 {
357 int err;
358 AVFormatContext *ic;
359 AVFormatParameters default_ap;
360
361 if(!ap){
362 ap=&default_ap;
363 memset(ap, 0, sizeof(default_ap));
364 }
365
366 if(!ap->prealloced_context)
367 ic = avformat_alloc_context();
368 else
369 ic = *ic_ptr;
370 if (!ic) {
371 err = AVERROR(ENOMEM);
372 goto fail;
373 }
374 ic->iformat = fmt;
375 ic->pb = pb;
376 ic->duration = AV_NOPTS_VALUE;
377 ic->start_time = AV_NOPTS_VALUE;
378 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
379
380 /* allocate private data */
381 if (fmt->priv_data_size > 0) {
382 ic->priv_data = av_mallocz(fmt->priv_data_size);
383 if (!ic->priv_data) {
384 err = AVERROR(ENOMEM);
385 goto fail;
386 }
387 } else {
388 ic->priv_data = NULL;
389 }
390
391 if (ic->iformat->read_header) {
392 err = ic->iformat->read_header(ic, ap);
393 if (err < 0)
394 goto fail;
395 }
396
397 if (pb && !ic->data_offset)
398 ic->data_offset = url_ftell(ic->pb);
399
400 #if LIBAVFORMAT_VERSION_MAJOR < 53
401 ff_metadata_demux_compat(ic);
402 #endif
403
404 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
405
406 *ic_ptr = ic;
407 return 0;
408 fail:
409 if (ic) {
410 int i;
411 av_freep(&ic->priv_data);
412 for(i=0;i<ic->nb_streams;i++) {
413 AVStream *st = ic->streams[i];
414 if (st) {
415 av_free(st->priv_data);
416 av_free(st->codec->extradata);
417 }
418 av_free(st);
419 }
420 }
421 av_free(ic);
422 *ic_ptr = NULL;
423 return err;
424 }
425
426 /** size of probe buffer, for guessing file type from file contents */
427 #define PROBE_BUF_MIN 2048
428 #define PROBE_BUF_MAX (1<<20)
429
430 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
431 AVInputFormat *fmt,
432 int buf_size,
433 AVFormatParameters *ap)
434 {
435 int err, probe_size;
436 AVProbeData probe_data, *pd = &probe_data;
437 ByteIOContext *pb = NULL;
438
439 pd->filename = "";
440 if (filename)
441 pd->filename = filename;
442 pd->buf = NULL;
443 pd->buf_size = 0;
444
445 if (!fmt) {
446 /* guess format if no file can be opened */
447 fmt = av_probe_input_format(pd, 0);
448 }
449
450 /* Do not open file if the format does not need it. XXX: specific
451 hack needed to handle RTSP/TCP */
452 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
453 /* if no file needed do not try to open one */
454 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
455 goto fail;
456 }
457 if (buf_size > 0) {
458 url_setbufsize(pb, buf_size);
459 }
460
461 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
462 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
463 /* read probe data */
464 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
465 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
466 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
467 if (url_fseek(pb, 0, SEEK_SET) < 0) {
468 url_fclose(pb);
469 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
470 pb = NULL;
471 err = AVERROR(EIO);
472 goto fail;
473 }
474 }
475 /* guess file format */
476 fmt = av_probe_input_format2(pd, 1, &score);
477 }
478 av_freep(&pd->buf);
479 }
480
481 /* if still no format found, error */
482 if (!fmt) {
483 err = AVERROR_NOFMT;
484 goto fail;
485 }
486
487 /* check filename in case an image number is expected */
488 if (fmt->flags & AVFMT_NEEDNUMBER) {
489 if (!av_filename_number_test(filename)) {
490 err = AVERROR_NUMEXPECTED;
491 goto fail;
492 }
493 }
494 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
495 if (err)
496 goto fail;
497 return 0;
498 fail:
499 av_freep(&pd->buf);
500 if (pb)
501 url_fclose(pb);
502 if (ap && ap->prealloced_context)
503 av_free(*ic_ptr);
504 *ic_ptr = NULL;
505 return err;
506
507 }
508
509 /*******************************************************/
510
511 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
512 AVPacketList **plast_pktl){
513 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
514 if (!pktl)
515 return NULL;
516
517 if (*packet_buffer)
518 (*plast_pktl)->next = pktl;
519 else
520 *packet_buffer = pktl;
521
522 /* add the packet in the buffered packet list */
523 *plast_pktl = pktl;
524 pktl->pkt= *pkt;
525 return &pktl->pkt;
526 }
527
528 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
529 {
530 int ret, i;
531 AVStream *st;
532
533 for(;;){
534 AVPacketList *pktl = s->raw_packet_buffer;
535
536 if (pktl) {
537 *pkt = pktl->pkt;
538 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
539 !s->streams[pkt->stream_index]->probe_packets ||
540 s->raw_packet_buffer_remaining_size < pkt->size){
541 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
542 av_freep(&pd->buf);
543 pd->buf_size = 0;
544 s->raw_packet_buffer = pktl->next;
545 s->raw_packet_buffer_remaining_size += pkt->size;
546 av_free(pktl);
547 return 0;
548 }
549 }
550
551 av_init_packet(pkt);
552 ret= s->iformat->read_packet(s, pkt);
553 if (ret < 0) {
554 if (!pktl || ret == AVERROR(EAGAIN))
555 return ret;
556 for (i = 0; i < s->nb_streams; i++)
557 s->streams[i]->probe_packets = 0;
558 continue;
559 }
560 st= s->streams[pkt->stream_index];
561
562 switch(st->codec->codec_type){
563 case CODEC_TYPE_VIDEO:
564 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
565 break;
566 case CODEC_TYPE_AUDIO:
567 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
568 break;
569 case CODEC_TYPE_SUBTITLE:
570 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
571 break;
572 }
573
574 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
575 !st->probe_packets))
576 return ret;
577
578 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
579 s->raw_packet_buffer_remaining_size -= pkt->size;
580
581 if(st->codec->codec_id == CODEC_ID_PROBE){
582 AVProbeData *pd = &st->probe_data;
583
584 --st->probe_packets;
585
586 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
587 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
588 pd->buf_size += pkt->size;
589 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
590
591 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
592 set_codec_from_probe_data(st, pd, 1);
593 if(st->codec->codec_id != CODEC_ID_PROBE){
594 pd->buf_size=0;
595 av_freep(&pd->buf);
596 }
597 }
598 }
599 }
600 }
601
602 /**********************************************************/
603
604 /**
605 * Get the number of samples of an audio frame. Return -1 on error.
606 */
607 static int get_audio_frame_size(AVCodecContext *enc, int size)
608 {
609 int frame_size;
610
611 if(enc->codec_id == CODEC_ID_VORBIS)
612 return -1;
613
614 if (enc->frame_size <= 1) {
615 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
616
617 if (bits_per_sample) {
618 if (enc->channels == 0)
619 return -1;
620 frame_size = (size << 3) / (bits_per_sample * enc->channels);
621 } else {
622 /* used for example by ADPCM codecs */
623 if (enc->bit_rate == 0)
624 return -1;
625 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
626 }
627 } else {
628 frame_size = enc->frame_size;
629 }
630 return frame_size;
631 }
632
633
634 /**
635 * Return the frame duration in seconds. Return 0 if not available.
636 */
637 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
638 AVCodecParserContext *pc, AVPacket *pkt)
639 {
640 int frame_size;
641
642 *pnum = 0;
643 *pden = 0;
644 switch(st->codec->codec_type) {
645 case CODEC_TYPE_VIDEO:
646 if(st->time_base.num*1000LL > st->time_base.den){
647 *pnum = st->time_base.num;
648 *pden = st->time_base.den;
649 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
650 *pnum = st->codec->time_base.num;
651 *pden = st->codec->time_base.den;
652 if (pc && pc->repeat_pict) {
653 *pnum = (*pnum) * (1 + pc->repeat_pict);
654 }
655 }
656 break;
657 case CODEC_TYPE_AUDIO:
658 frame_size = get_audio_frame_size(st->codec, pkt->size);
659 if (frame_size < 0)
660 break;
661 *pnum = frame_size;
662 *pden = st->codec->sample_rate;
663 break;
664 default:
665 break;
666 }
667 }
668
669 static int is_intra_only(AVCodecContext *enc){
670 if(enc->codec_type == CODEC_TYPE_AUDIO){
671 return 1;
672 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
673 switch(enc->codec_id){
674 case CODEC_ID_MJPEG:
675 case CODEC_ID_MJPEGB:
676 case CODEC_ID_LJPEG:
677 case CODEC_ID_RAWVIDEO:
678 case CODEC_ID_DVVIDEO:
679 case CODEC_ID_HUFFYUV:
680 case CODEC_ID_FFVHUFF:
681 case CODEC_ID_ASV1:
682 case CODEC_ID_ASV2:
683 case CODEC_ID_VCR1:
684 case CODEC_ID_DNXHD:
685 case CODEC_ID_JPEG2000:
686 return 1;
687 default: break;
688 }
689 }
690 return 0;
691 }
692
693 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
694 int64_t dts, int64_t pts)
695 {
696 AVStream *st= s->streams[stream_index];
697 AVPacketList *pktl= s->packet_buffer;
698
699 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
700 return;
701
702 st->first_dts= dts - st->cur_dts;
703 st->cur_dts= dts;
704
705 for(; pktl; pktl= pktl->next){
706 if(pktl->pkt.stream_index != stream_index)
707 continue;
708 //FIXME think more about this check
709 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
710 pktl->pkt.pts += st->first_dts;
711
712 if(pktl->pkt.dts != AV_NOPTS_VALUE)
713 pktl->pkt.dts += st->first_dts;
714
715 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
716 st->start_time= pktl->pkt.pts;
717 }
718 if (st->start_time == AV_NOPTS_VALUE)
719 st->start_time = pts;
720 }
721
722 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
723 {
724 AVPacketList *pktl= s->packet_buffer;
725 int64_t cur_dts= 0;
726
727 if(st->first_dts != AV_NOPTS_VALUE){
728 cur_dts= st->first_dts;
729 for(; pktl; pktl= pktl->next){
730 if(pktl->pkt.stream_index == pkt->stream_index){
731 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
732 break;
733 cur_dts -= pkt->duration;
734 }
735 }
736 pktl= s->packet_buffer;
737 st->first_dts = cur_dts;
738 }else if(st->cur_dts)
739 return;
740
741 for(; pktl; pktl= pktl->next){
742 if(pktl->pkt.stream_index != pkt->stream_index)
743 continue;
744 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
745 && !pktl->pkt.duration){
746 pktl->pkt.dts= cur_dts;
747 if(!st->codec->has_b_frames)
748 pktl->pkt.pts= cur_dts;
749 cur_dts += pkt->duration;
750 pktl->pkt.duration= pkt->duration;
751 }else
752 break;
753 }
754 if(st->first_dts == AV_NOPTS_VALUE)
755 st->cur_dts= cur_dts;
756 }
757
758 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
759 AVCodecParserContext *pc, AVPacket *pkt)
760 {
761 int num, den, presentation_delayed, delay, i;
762 int64_t offset;
763
764 if (pc && pc->pict_type == FF_B_TYPE)
765 st->codec->has_b_frames = 1;
766
767 /* do we have a video B-frame ? */
768 delay= st->codec->has_b_frames;
769 presentation_delayed = 0;
770 /* XXX: need has_b_frame, but cannot get it if the codec is
771 not initialized */
772 if (delay &&
773 pc && pc->pict_type != FF_B_TYPE)
774 presentation_delayed = 1;
775
776 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
777 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
778 pkt->dts -= 1LL<<st->pts_wrap_bits;
779 }
780
781 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
782 // we take the conservative approach and discard both
783 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
784 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
785 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
786 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
787 }
788
789 if (pkt->duration == 0) {
790 compute_frame_duration(&num, &den, st, pc, pkt);
791 if (den && num) {
792 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
793
794 if(pkt->duration != 0 && s->packet_buffer)
795 update_initial_durations(s, st, pkt);
796 }
797 }
798
799 /* correct timestamps with byte offset if demuxers only have timestamps
800 on packet boundaries */
801 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
802 /* this will estimate bitrate based on this frame's duration and size */
803 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
804 if(pkt->pts != AV_NOPTS_VALUE)
805 pkt->pts += offset;
806 if(pkt->dts != AV_NOPTS_VALUE)
807 pkt->dts += offset;
808 }
809
810 if (pc && pc->dts_sync_point >= 0) {
811 // we have synchronization info from the parser
812 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
813 if (den > 0) {
814 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
815 if (pkt->dts != AV_NOPTS_VALUE) {
816 // got DTS from the stream, update reference timestamp
817 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
818 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
819 } else if (st->reference_dts != AV_NOPTS_VALUE) {
820 // compute DTS based on reference timestamp
821 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
822 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
823 }
824 if (pc->dts_sync_point > 0)
825 st->reference_dts = pkt->dts; // new reference
826 }
827 }
828
829 /* This may be redundant, but it should not hurt. */
830 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
831 presentation_delayed = 1;
832
833 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
834 /* interpolate PTS and DTS if they are not present */
835 //We skip H264 currently because delay and has_b_frames are not reliably set
836 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
837 if (presentation_delayed) {
838 /* DTS = decompression timestamp */
839 /* PTS = presentation timestamp */
840 if (pkt->dts == AV_NOPTS_VALUE)
841 pkt->dts = st->last_IP_pts;
842 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
843 if (pkt->dts == AV_NOPTS_VALUE)
844 pkt->dts = st->cur_dts;
845
846 /* this is tricky: the dts must be incremented by the duration
847 of the frame we are displaying, i.e. the last I- or P-frame */
848 if (st->last_IP_duration == 0)
849 st->last_IP_duration = pkt->duration;
850 if(pkt->dts != AV_NOPTS_VALUE)
851 st->cur_dts = pkt->dts + st->last_IP_duration;
852 st->last_IP_duration = pkt->duration;
853 st->last_IP_pts= pkt->pts;
854 /* cannot compute PTS if not present (we can compute it only
855 by knowing the future */
856 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
857 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
858 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
859 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
860 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
861 pkt->pts += pkt->duration;
862 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
863 }
864 }
865
866 /* presentation is not delayed : PTS and DTS are the same */
867 if(pkt->pts == AV_NOPTS_VALUE)
868 pkt->pts = pkt->dts;
869 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
870 if(pkt->pts == AV_NOPTS_VALUE)
871 pkt->pts = st->cur_dts;
872 pkt->dts = pkt->pts;
873 if(pkt->pts != AV_NOPTS_VALUE)
874 st->cur_dts = pkt->pts + pkt->duration;
875 }
876 }
877
878 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
879 st->pts_buffer[0]= pkt->pts;
880 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
881 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
882 if(pkt->dts == AV_NOPTS_VALUE)
883 pkt->dts= st->pts_buffer[0];
884 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
885 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
886 }
887 if(pkt->dts > st->cur_dts)
888 st->cur_dts = pkt->dts;
889 }
890
891 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
892
893 /* update flags */
894 if(is_intra_only(st->codec))
895 pkt->flags |= PKT_FLAG_KEY;
896 else if (pc) {
897 pkt->flags = 0;
898 /* keyframe computation */
899 if (pc->key_frame == 1)
900 pkt->flags |= PKT_FLAG_KEY;
901 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
902 pkt->flags |= PKT_FLAG_KEY;
903 }
904 if (pc)
905 pkt->convergence_duration = pc->convergence_duration;
906 }
907
908
909 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
910 {
911 AVStream *st;
912 int len, ret, i;
913
914 av_init_packet(pkt);
915
916 for(;;) {
917 /* select current input stream component */
918 st = s->cur_st;
919 if (st) {
920 if (!st->need_parsing || !st->parser) {
921 /* no parsing needed: we just output the packet as is */
922 /* raw data support */
923 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
924 compute_pkt_fields(s, st, NULL, pkt);
925 s->cur_st = NULL;
926 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
927 (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
928 ff_reduce_index(s, st->index);
929 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
930 }
931 break;
932 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
933 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
934 st->cur_ptr, st->cur_len,
935 st->cur_pkt.pts, st->cur_pkt.dts,
936 st->cur_pkt.pos);
937 st->cur_pkt.pts = AV_NOPTS_VALUE;
938 st->cur_pkt.dts = AV_NOPTS_VALUE;
939 /* increment read pointer */
940 st->cur_ptr += len;
941 st->cur_len -= len;
942
943 /* return packet if any */
944 if (pkt->size) {
945 got_packet:
946 pkt->duration = 0;
947 pkt->stream_index = st->index;
948 pkt->pts = st->parser->pts;
949 pkt->dts = st->parser->dts;
950 pkt->pos = st->parser->pos;
951 pkt->destruct = NULL;
952 compute_pkt_fields(s, st, st->parser, pkt);
953
954 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
955 ff_reduce_index(s, st->index);
956 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
957 0, 0, AVINDEX_KEYFRAME);
958 }
959
960 break;
961 }
962 } else {
963 /* free packet */
964 av_free_packet(&st->cur_pkt);
965 s->cur_st = NULL;
966 }
967 } else {
968 AVPacket cur_pkt;
969 /* read next packet */
970 ret = av_read_packet(s, &cur_pkt);
971 if (ret < 0) {
972 if (ret == AVERROR(EAGAIN))
973 return ret;
974 /* return the last frames, if any */
975 for(i = 0; i < s->nb_streams; i++) {
976 st = s->streams[i];
977 if (st->parser && st->need_parsing) {
978 av_parser_parse2(st->parser, st->codec,
979 &pkt->data, &pkt->size,
980 NULL, 0,
981 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
982 AV_NOPTS_VALUE);
983 if (pkt->size)
984 goto got_packet;
985 }
986 }
987 /* no more packets: really terminate parsing */
988 return ret;
989 }
990 st = s->streams[cur_pkt.stream_index];
991 st->cur_pkt= cur_pkt;
992
993 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
994 st->cur_pkt.dts != AV_NOPTS_VALUE &&
995 st->cur_pkt.pts < st->cur_pkt.dts){
996 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
997 st->cur_pkt.stream_index,
998 st->cur_pkt.pts,
999 st->cur_pkt.dts,
1000 st->cur_pkt.size);
1001 // av_free_packet(&st->cur_pkt);
1002 // return -1;
1003 }
1004
1005 if(s->debug & FF_FDEBUG_TS)
1006 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1007 st->cur_pkt.stream_index,
1008 st->cur_pkt.pts,
1009 st->cur_pkt.dts,
1010 st->cur_pkt.size,
1011 st->cur_pkt.flags);
1012
1013 s->cur_st = st;
1014 st->cur_ptr = st->cur_pkt.data;
1015 st->cur_len = st->cur_pkt.size;
1016 if (st->need_parsing && !st->parser) {
1017 st->parser = av_parser_init(st->codec->codec_id);
1018 if (!st->parser) {
1019 /* no parser available: just output the raw packets */
1020 st->need_parsing = AVSTREAM_PARSE_NONE;
1021 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1022 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1023 }
1024 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1025 st->parser->next_frame_offset=
1026 st->parser->cur_offset= st->cur_pkt.pos;
1027 }
1028 }
1029 }
1030 }
1031 if(s->debug & FF_FDEBUG_TS)
1032 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1033 pkt->stream_index,
1034 pkt->pts,
1035 pkt->dts,
1036 pkt->size,
1037 pkt->flags);
1038
1039 return 0;
1040 }
1041
1042 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1043 {
1044 AVPacketList *pktl;
1045 int eof=0;
1046 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1047
1048 for(;;){
1049 pktl = s->packet_buffer;
1050 if (pktl) {
1051 AVPacket *next_pkt= &pktl->pkt;
1052
1053 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1054 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1055 if( pktl->pkt.stream_index == next_pkt->stream_index
1056 && next_pkt->dts < pktl->pkt.dts
1057 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1058 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1059 next_pkt->pts= pktl->pkt.dts;
1060 }
1061 pktl= pktl->next;
1062 }
1063 pktl = s->packet_buffer;
1064 }
1065
1066 if( next_pkt->pts != AV_NOPTS_VALUE
1067 || next_pkt->dts == AV_NOPTS_VALUE
1068 || !genpts || eof){
1069 /* read packet from packet buffer, if there is data */
1070 *pkt = *next_pkt;
1071 s->packet_buffer = pktl->next;
1072 av_free(pktl);
1073 return 0;
1074 }
1075 }
1076 if(genpts){
1077 int ret= av_read_frame_internal(s, pkt);
1078 if(ret<0){
1079 if(pktl && ret != AVERROR(EAGAIN)){
1080 eof=1;
1081 continue;
1082 }else
1083 return ret;
1084 }
1085
1086 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1087 &s->packet_buffer_end)) < 0)
1088 return AVERROR(ENOMEM);
1089 }else{
1090 assert(!s->packet_buffer);
1091 return av_read_frame_internal(s, pkt);
1092 }
1093 }
1094 }
1095
1096 /* XXX: suppress the packet queue */
1097 static void flush_packet_queue(AVFormatContext *s)
1098 {
1099 AVPacketList *pktl;
1100
1101 for(;;) {
1102 pktl = s->packet_buffer;
1103 if (!pktl)
1104 break;
1105 s->packet_buffer = pktl->next;
1106 av_free_packet(&pktl->pkt);
1107 av_free(pktl);
1108 }
1109 while(s->raw_packet_buffer){
1110 pktl = s->raw_packet_buffer;
1111 s->raw_packet_buffer = pktl->next;
1112 av_free_packet(&pktl->pkt);
1113 av_free(pktl);
1114 }
1115 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1116 }
1117
1118 /*******************************************************/
1119 /* seek support */
1120
1121 int av_find_default_stream_index(AVFormatContext *s)
1122 {
1123 int first_audio_index = -1;
1124 int i;
1125 AVStream *st;
1126
1127 if (s->nb_streams <= 0)
1128 return -1;
1129 for(i = 0; i < s->nb_streams; i++) {
1130 st = s->streams[i];
1131 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1132 return i;
1133 }
1134 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1135 first_audio_index = i;
1136 }
1137 return first_audio_index >= 0 ? first_audio_index : 0;
1138 }
1139
1140 /**
1141 * Flush the frame reader.
1142 */
1143 static void av_read_frame_flush(AVFormatContext *s)
1144 {
1145 AVStream *st;
1146 int i;
1147
1148 flush_packet_queue(s);
1149
1150 s->cur_st = NULL;
1151
1152 /* for each stream, reset read state */
1153 for(i = 0; i < s->nb_streams; i++) {
1154 st = s->streams[i];
1155
1156 if (st->parser) {
1157 av_parser_close(st->parser);
1158 st->parser = NULL;
1159 av_free_packet(&st->cur_pkt);
1160 }
1161 st->last_IP_pts = AV_NOPTS_VALUE;
1162 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1163 st->reference_dts = AV_NOPTS_VALUE;
1164 /* fail safe */
1165 st->cur_ptr = NULL;
1166 st->cur_len = 0;
1167
1168 st->probe_packets = MAX_PROBE_PACKETS;
1169 }
1170 }
1171
1172 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1173 int i;
1174
1175 for(i = 0; i < s->nb_streams; i++) {
1176 AVStream *st = s->streams[i];
1177
1178 st->cur_dts = av_rescale(timestamp,
1179 st->time_base.den * (int64_t)ref_st->time_base.num,
1180 st->time_base.num * (int64_t)ref_st->time_base.den);
1181 }
1182 }
1183
1184 void ff_reduce_index(AVFormatContext *s, int stream_index)
1185 {
1186 AVStream *st= s->streams[stream_index];
1187 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1188
1189 if((unsigned)st->nb_index_entries >= max_entries){
1190 int i;
1191 for(i=0; 2*i<st->nb_index_entries; i++)
1192 st->index_entries[i]= st->index_entries[2*i];
1193 st->nb_index_entries= i;
1194 }
1195 }
1196
1197 int av_add_index_entry(AVStream *st,
1198 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1199 {
1200 AVIndexEntry *entries, *ie;
1201 int index;
1202
1203 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1204 return -1;
1205
1206 entries = av_fast_realloc(st->index_entries,
1207 &st->index_entries_allocated_size,
1208 (st->nb_index_entries + 1) *
1209 sizeof(AVIndexEntry));
1210 if(!entries)
1211 return -1;
1212
1213 st->index_entries= entries;
1214
1215 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1216
1217 if(index<0){
1218 index= st->nb_index_entries++;
1219 ie= &entries[index];
1220 assert(index==0 || ie[-1].timestamp < timestamp);
1221 }else{
1222 ie= &entries[index];
1223 if(ie->timestamp != timestamp){
1224 if(ie->timestamp <= timestamp)
1225 return -1;
1226 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1227 st->nb_index_entries++;
1228 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1229 distance= ie->min_distance;
1230 }
1231
1232 ie->pos = pos;
1233 ie->timestamp = timestamp;
1234 ie->min_distance= distance;
1235 ie->size= size;
1236 ie->flags = flags;
1237
1238 return index;
1239 }
1240
1241 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1242 int flags)
1243 {
1244 AVIndexEntry *entries= st->index_entries;
1245 int nb_entries= st->nb_index_entries;
1246 int a, b, m;
1247 int64_t timestamp;
1248
1249 a = - 1;
1250 b = nb_entries;
1251
1252 while (b - a > 1) {
1253 m = (a + b) >> 1;
1254 timestamp = entries[m].timestamp;
1255 if(timestamp >= wanted_timestamp)
1256 b = m;
1257 if(timestamp <= wanted_timestamp)
1258 a = m;
1259 }
1260 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1261
1262 if(!(flags & AVSEEK_FLAG_ANY)){
1263 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1264 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1265 }
1266 }
1267
1268 if(m == nb_entries)
1269 return -1;
1270 return m;
1271 }
1272
1273 #define DEBUG_SEEK
1274
1275 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1276 AVInputFormat *avif= s->iformat;
1277 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1278 int64_t ts_min, ts_max, ts;
1279 int index;
1280 AVStream *st;
1281
1282 if (stream_index < 0)
1283 return -1;
1284
1285 #ifdef DEBUG_SEEK
1286 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1287 #endif
1288
1289 ts_max=
1290 ts_min= AV_NOPTS_VALUE;
1291 pos_limit= -1; //gcc falsely says it may be uninitialized
1292
1293 st= s->streams[stream_index];
1294 if(st->index_entries){
1295 AVIndexEntry *e;
1296
1297 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1298 index= FFMAX(index, 0);
1299 e= &st->index_entries[index];
1300
1301 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1302 pos_min= e->pos;
1303 ts_min= e->timestamp;
1304 #ifdef DEBUG_SEEK
1305 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1306 pos_min,ts_min);
1307 #endif
1308 }else{
1309 assert(index==0);
1310 }
1311
1312 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1313 assert(index < st->nb_index_entries);
1314 if(index >= 0){
1315 e= &st->index_entries[index];
1316 assert(e->timestamp >= target_ts);
1317 pos_max= e->pos;
1318 ts_max= e->timestamp;
1319 pos_limit= pos_max - e->min_distance;
1320 #ifdef DEBUG_SEEK
1321 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1322 pos_max,pos_limit, ts_max);
1323 #endif
1324 }
1325 }
1326
1327 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1328 if(pos<0)
1329 return -1;
1330
1331 /* do the seek */
1332 url_fseek(s->pb, pos, SEEK_SET);
1333
1334 av_update_cur_dts(s, st, ts);
1335
1336 return 0;
1337 }
1338
1339 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1340 int64_t pos, ts;
1341 int64_t start_pos, filesize;
1342 int no_change;
1343
1344 #ifdef DEBUG_SEEK
1345 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1346 #endif
1347
1348 if(ts_min == AV_NOPTS_VALUE){
1349 pos_min = s->data_offset;
1350 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1351 if (ts_min == AV_NOPTS_VALUE)
1352 return -1;
1353 }
1354
1355 if(ts_max == AV_NOPTS_VALUE){
1356 int step= 1024;
1357 filesize = url_fsize(s->pb);
1358 pos_max = filesize - 1;
1359 do{
1360 pos_max -= step;
1361 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1362 step += step;
1363 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1364 if (ts_max == AV_NOPTS_VALUE)
1365 return -1;
1366
1367 for(;;){
1368 int64_t tmp_pos= pos_max + 1;
1369 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1370 if(tmp_ts == AV_NOPTS_VALUE)
1371 break;
1372 ts_max= tmp_ts;
1373 pos_max= tmp_pos;
1374 if(tmp_pos >= filesize)
1375 break;
1376 }
1377 pos_limit= pos_max;
1378 }
1379
1380 if(ts_min > ts_max){
1381 return -1;
1382 }else if(ts_min == ts_max){
1383 pos_limit= pos_min;
1384 }
1385
1386 no_change=0;
1387 while (pos_min < pos_limit) {
1388 #ifdef DEBUG_SEEK
1389 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1390 pos_min, pos_max,
1391 ts_min, ts_max);
1392 #endif
1393 assert(pos_limit <= pos_max);
1394
1395 if(no_change==0){
1396 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1397 // interpolate position (better than dichotomy)
1398 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1399 + pos_min - approximate_keyframe_distance;
1400 }else if(no_change==1){
1401 // bisection, if interpolation failed to change min or max pos last time
1402 pos = (pos_min + pos_limit)>>1;
1403 }else{
1404 /* linear search if bisection failed, can only happen if there
1405 are very few or no keyframes between min/max */
1406 pos=pos_min;
1407 }
1408 if(pos <= pos_min)
1409 pos= pos_min + 1;
1410 else if(pos > pos_limit)
1411 pos= pos_limit;
1412 start_pos= pos;
1413
1414 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1415 if(pos == pos_max)
1416 no_change++;
1417 else
1418 no_change=0;
1419 #ifdef DEBUG_SEEK
1420 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1421 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1422 start_pos, no_change);
1423 #endif
1424 if(ts == AV_NOPTS_VALUE){
1425 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1426 return -1;
1427 }
1428 assert(ts != AV_NOPTS_VALUE);
1429 if (target_ts <= ts) {
1430 pos_limit = start_pos - 1;
1431 pos_max = pos;
1432 ts_max = ts;
1433 }
1434 if (target_ts >= ts) {
1435 pos_min = pos;
1436 ts_min = ts;
1437 }
1438 }
1439
1440 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1441 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1442 #ifdef DEBUG_SEEK
1443 pos_min = pos;
1444 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1445 pos_min++;
1446 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1447 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1448 pos, ts_min, target_ts, ts_max);
1449 #endif
1450 *ts_ret= ts;
1451 return pos;
1452 }
1453
1454 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1455 int64_t pos_min, pos_max;
1456 #if 0
1457 AVStream *st;
1458
1459 if (stream_index < 0)
1460 return -1;
1461
1462 st= s->streams[stream_index];
1463 #endif
1464
1465 pos_min = s->data_offset;
1466 pos_max = url_fsize(s->pb) - 1;
1467
1468 if (pos < pos_min) pos= pos_min;
1469 else if(pos > pos_max) pos= pos_max;
1470
1471 url_fseek(s->pb, pos, SEEK_SET);
1472
1473 #if 0
1474 av_update_cur_dts(s, st, ts);
1475 #endif
1476 return 0;
1477 }
1478
1479 static int av_seek_frame_generic(AVFormatContext *s,
1480 int stream_index, int64_t timestamp, int flags)
1481 {
1482 int index, ret;
1483 AVStream *st;
1484 AVIndexEntry *ie;
1485
1486 st = s->streams[stream_index];
1487
1488 index = av_index_search_timestamp(st, timestamp, flags);
1489
1490 if(index < 0 || index==st->nb_index_entries-1){
1491 int i;
1492 AVPacket pkt;
1493
1494 if(st->nb_index_entries){
1495 assert(st->index_entries);
1496 ie= &st->index_entries[st->nb_index_entries-1];
1497 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1498 return ret;
1499 av_update_cur_dts(s, st, ie->timestamp);
1500 }else{
1501 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1502 return ret;
1503 }
1504 for(i=0;; i++) {
1505 int ret;
1506 do{
1507 ret = av_read_frame(s, &pkt);
1508 }while(ret == AVERROR(EAGAIN));
1509 if(ret<0)
1510 break;
1511 av_free_packet(&pkt);
1512 if(stream_index == pkt.stream_index){
1513 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1514 break;
1515 }
1516 }
1517 index = av_index_search_timestamp(st, timestamp, flags);
1518 }
1519 if (index < 0)
1520 return -1;
1521
1522 av_read_frame_flush(s);
1523 if (s->iformat->read_seek){
1524 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1525 return 0;
1526 }
1527 ie = &st->index_entries[index];
1528 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1529 return ret;
1530 av_update_cur_dts(s, st, ie->timestamp);
1531
1532 return 0;
1533 }
1534
1535 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1536 {
1537 int ret;
1538 AVStream *st;
1539
1540 av_read_frame_flush(s);
1541
1542 if(flags & AVSEEK_FLAG_BYTE)
1543 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1544
1545 if(stream_index < 0){
1546 stream_index= av_find_default_stream_index(s);
1547 if(stream_index < 0)
1548 return -1;
1549
1550 st= s->streams[stream_index];
1551 /* timestamp for default must be expressed in AV_TIME_BASE units */
1552 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1553 }
1554
1555 /* first, we try the format specific seek */
1556 if (s->iformat->read_seek)
1557 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1558 else
1559 ret = -1;
1560 if (ret >= 0) {
1561 return 0;
1562 }
1563
1564 if(s->iformat->read_timestamp)
1565 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1566 else
1567 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1568 }
1569
1570 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1571 {
1572 if(min_ts > ts || max_ts < ts)
1573 return -1;
1574
1575 av_read_frame_flush(s);
1576
1577 if (s->iformat->read_seek2)
1578 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1579
1580 if(s->iformat->read_timestamp){
1581 //try to seek via read_timestamp()
1582 }
1583
1584 //Fallback to old API if new is not implemented but old is
1585 //Note the old has somewat different sematics
1586 if(s->iformat->read_seek || 1)
1587 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1588
1589 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1590 }
1591
1592 /*******************************************************/
1593
1594 /**
1595 * Returns TRUE if the stream has accurate duration in any stream.
1596 *
1597 * @return TRUE if the stream has accurate duration for at least one component.
1598 */
1599 static int av_has_duration(AVFormatContext *ic)
1600 {
1601 int i;
1602 AVStream *st;
1603
1604 for(i = 0;i < ic->nb_streams; i++) {
1605 st = ic->streams[i];
1606 if (st->duration != AV_NOPTS_VALUE)
1607 return 1;
1608 }
1609 return 0;
1610 }
1611
1612 /**
1613 * Estimate the stream timings from the one of each components.
1614 *
1615 * Also computes the global bitrate if possible.
1616 */
1617 static void av_update_stream_timings(AVFormatContext *ic)
1618 {
1619 int64_t start_time, start_time1, end_time, end_time1;
1620 int64_t duration, duration1;
1621 int i;
1622 AVStream *st;
1623
1624 start_time = INT64_MAX;
1625 end_time = INT64_MIN;
1626 duration = INT64_MIN;
1627 for(i = 0;i < ic->nb_streams; i++) {
1628 st = ic->streams[i];
1629 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1630 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1631 if (start_time1 < start_time)
1632 start_time = start_time1;
1633 if (st->duration != AV_NOPTS_VALUE) {
1634 end_time1 = start_time1
1635 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1636 if (end_time1 > end_time)
1637 end_time = end_time1;
1638 }
1639 }
1640 if (st->duration != AV_NOPTS_VALUE) {
1641 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1642 if (duration1 > duration)
1643 duration = duration1;
1644 }
1645 }
1646 if (start_time != INT64_MAX) {
1647 ic->start_time = start_time;
1648 if (end_time != INT64_MIN) {
1649 if (end_time - start_time > duration)
1650 duration = end_time - start_time;
1651 }
1652 }
1653 if (duration != INT64_MIN) {
1654 ic->duration = duration;
1655 if (ic->file_size > 0) {
1656 /* compute the bitrate */
1657 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1658 (double)ic->duration;
1659 }
1660 }
1661 }
1662
1663 static void fill_all_stream_timings(AVFormatContext *ic)
1664 {
1665 int i;
1666 AVStream *st;
1667
1668 av_update_stream_timings(ic);
1669 for(i = 0;i < ic->nb_streams; i++) {
1670 st = ic->streams[i];
1671 if (st->start_time == AV_NOPTS_VALUE) {
1672 if(ic->start_time != AV_NOPTS_VALUE)
1673 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1674 if(ic->duration != AV_NOPTS_VALUE)
1675 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1676 }
1677 }
1678 }
1679
1680 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1681 {
1682 int64_t filesize, duration;
1683 int bit_rate, i;
1684 AVStream *st;
1685
1686 /* if bit_rate is already set, we believe it */
1687 if (ic->bit_rate == 0) {
1688 bit_rate = 0;
1689 for(i=0;i<ic->nb_streams;i++) {
1690 st = ic->streams[i];
1691 bit_rate += st->codec->bit_rate;
1692 }
1693 ic->bit_rate = bit_rate;
1694 }
1695
1696 /* if duration is already set, we believe it */
1697 if (ic->duration == AV_NOPTS_VALUE &&
1698 ic->bit_rate != 0 &&
1699 ic->file_size != 0) {
1700 filesize = ic->file_size;
1701 if (filesize > 0) {
1702 for(i = 0; i < ic->nb_streams; i++) {
1703 st = ic->streams[i];
1704 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1705 if (st->duration == AV_NOPTS_VALUE)
1706 st->duration = duration;
1707 }
1708 }
1709 }
1710 }
1711
1712 #define DURATION_MAX_READ_SIZE 250000
1713
1714 /* only usable for MPEG-PS streams */
1715 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1716 {
1717 AVPacket pkt1, *pkt = &pkt1;
1718 AVStream *st;
1719 int read_size, i, ret;
1720 int64_t end_time;
1721 int64_t filesize, offset, duration;
1722
1723 ic->cur_st = NULL;
1724
1725 /* flush packet queue */
1726 flush_packet_queue(ic);
1727
1728 for(i=0;i<ic->nb_streams;i++) {
1729 st = ic->streams[i];
1730 if (st->parser) {
1731 av_parser_close(st->parser);
1732 st->parser= NULL;
1733 av_free_packet(&st->cur_pkt);
1734 }
1735 }
1736
1737 /* we read the first packets to get the first PTS (not fully
1738 accurate, but it is enough now) */
1739 url_fseek(ic->pb, 0, SEEK_SET);
1740 read_size = 0;
1741 for(;;) {
1742 if (read_size >= DURATION_MAX_READ_SIZE)
1743 break;
1744 /* if all info is available, we can stop */
1745 for(i = 0;i < ic->nb_streams; i++) {
1746 st = ic->streams[i];
1747 if (st->start_time == AV_NOPTS_VALUE)
1748 break;
1749 }
1750 if (i == ic->nb_streams)
1751 break;
1752
1753 do{
1754 ret = av_read_packet(ic, pkt);
1755 }while(ret == AVERROR(EAGAIN));
1756 if (ret != 0)
1757 break;
1758 read_size += pkt->size;
1759 st = ic->streams[pkt->stream_index];
1760 if (pkt->pts != AV_NOPTS_VALUE) {
1761 if (st->start_time == AV_NOPTS_VALUE)
1762 st->start_time = pkt->pts;
1763 }
1764 av_free_packet(pkt);
1765 }
1766
1767 /* estimate the end time (duration) */
1768 /* XXX: may need to support wrapping */
1769 filesize = ic->file_size;
1770 offset = filesize - DURATION_MAX_READ_SIZE;
1771 if (offset < 0)
1772 offset = 0;
1773
1774 url_fseek(ic->pb, offset, SEEK_SET);
1775 read_size = 0;
1776 for(;;) {
1777 if (read_size >= DURATION_MAX_READ_SIZE)
1778 break;
1779
1780 do{
1781 ret = av_read_packet(ic, pkt);
1782 }while(ret == AVERROR(EAGAIN));
1783 if (ret != 0)
1784 break;
1785 read_size += pkt->size;
1786 st = ic->streams[pkt->stream_index];
1787 if (pkt->pts != AV_NOPTS_VALUE &&
1788 st->start_time != AV_NOPTS_VALUE) {
1789 end_time = pkt->pts;
1790 duration = end_time - st->start_time;
1791 if (duration > 0) {
1792 if (st->duration == AV_NOPTS_VALUE ||
1793 st->duration < duration)
1794 st->duration = duration;
1795 }
1796 }
1797 av_free_packet(pkt);
1798 }
1799
1800 fill_all_stream_timings(ic);
1801
1802 url_fseek(ic->pb, old_offset, SEEK_SET);
1803 for(i=0; i<ic->nb_streams; i++){
1804 st= ic->streams[i];
1805 st->cur_dts= st->first_dts;
1806 st->last_IP_pts = AV_NOPTS_VALUE;
1807 }
1808 }
1809
1810 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1811 {
1812 int64_t file_size;
1813
1814 /* get the file size, if possible */
1815 if (ic->iformat->flags & AVFMT_NOFILE) {
1816 file_size = 0;
1817 } else {
1818 file_size = url_fsize(ic->pb);
1819 if (file_size < 0)
1820 file_size = 0;
1821 }
1822 ic->file_size = file_size;
1823
1824 if ((!strcmp(ic->iformat->name, "mpeg") ||
1825 !strcmp(ic->iformat->name, "mpegts")) &&
1826 file_size && !url_is_streamed(ic->pb)) {
1827 /* get accurate estimate from the PTSes */
1828 av_estimate_timings_from_pts(ic, old_offset);
1829 } else if (av_has_duration(ic)) {
1830 /* at least one component has timings - we use them for all
1831 the components */
1832 fill_all_stream_timings(ic);
1833 } else {
1834 /* less precise: use bitrate info */
1835 av_estimate_timings_from_bit_rate(ic);
1836 }
1837 av_update_stream_timings(ic);
1838
1839 #if 0
1840 {
1841 int i;
1842 AVStream *st;
1843 for(i = 0;i < ic->nb_streams; i++) {
1844 st = ic->streams[i];
1845 printf("%d: start_time: %0.3f duration: %0.3f\n",
1846 i, (double)st->start_time / AV_TIME_BASE,
1847 (double)st->duration / AV_TIME_BASE);
1848 }
1849 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1850 (double)ic->start_time / AV_TIME_BASE,
1851 (double)ic->duration / AV_TIME_BASE,
1852 ic->bit_rate / 1000);
1853 }
1854 #endif
1855 }
1856
1857 static int has_codec_parameters(AVCodecContext *enc)
1858 {
1859 int val;
1860 switch(enc->codec_type) {
1861 case CODEC_TYPE_AUDIO:
1862 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1863 if(!enc->frame_size &&
1864 (enc->codec_id == CODEC_ID_VORBIS ||
1865 enc->codec_id == CODEC_ID_AAC))
1866 return 0;
1867 break;
1868 case CODEC_TYPE_VIDEO:
1869 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1870 break;
1871 default:
1872 val = 1;
1873 break;
1874 }
1875 return enc->codec_id != CODEC_ID_NONE && val != 0;
1876 }
1877
1878 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
1879 {
1880 int16_t *samples;
1881 AVCodec *codec;
1882 int got_picture, data_size, ret=0;
1883 AVFrame picture;
1884
1885 if(!st->codec->codec){
1886 codec = avcodec_find_decoder(st->codec->codec_id);
1887 if (!codec)
1888 return -1;
1889 ret = avcodec_open(st->codec, codec);
1890 if (ret < 0)
1891 return ret;
1892 }
1893
1894 if(!has_codec_parameters(st->codec)){
1895 switch(st->codec->codec_type) {
1896 case CODEC_TYPE_VIDEO:
1897 avcodec_get_frame_defaults(&picture);
1898 ret = avcodec_decode_video2(st->codec, &picture,
1899 &got_picture, avpkt);
1900 break;
1901 case CODEC_TYPE_AUDIO:
1902 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1903 samples = av_malloc(data_size);
1904 if (!samples)
1905 goto fail;
1906 ret = avcodec_decode_audio3(st->codec, samples,
1907 &data_size, avpkt);
1908 av_free(samples);
1909 break;
1910 default:
1911 break;
1912 }
1913 }
1914 fail:
1915 return ret;
1916 }
1917
1918 unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
1919 {
1920 while (tags->id != CODEC_ID_NONE) {
1921 if (tags->id == id)
1922 return tags->tag;
1923 tags++;
1924 }
1925 return 0;
1926 }
1927
1928 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1929 {
1930 int i;
1931 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1932 if(tag == tags[i].tag)
1933 return tags[i].id;
1934 }
1935 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1936 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1937 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1938 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1939 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1940 return tags[i].id;
1941 }
1942 return CODEC_ID_NONE;
1943 }
1944
1945 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1946 {
1947 int i;
1948 for(i=0; tags && tags[i]; i++){
1949 int tag= ff_codec_get_tag(tags[i], id);
1950 if(tag) return tag;
1951 }
1952 return 0;
1953 }
1954
1955 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1956 {
1957 int i;
1958 for(i=0; tags && tags[i]; i++){
1959 enum CodecID id= ff_codec_get_id(tags[i], tag);
1960 if(id!=CODEC_ID_NONE) return id;
1961 }
1962 return CODEC_ID_NONE;
1963 }
1964
1965 static void compute_chapters_end(AVFormatContext *s)
1966 {
1967 unsigned int i;
1968
1969 for (i=0; i+1<s->nb_chapters; i++)
1970 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1971 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1972 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1973 s->chapters[i]->end = s->chapters[i+1]->start;
1974 }
1975
1976 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1977 assert(s->start_time != AV_NOPTS_VALUE);
1978 assert(s->duration > 0);
1979 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1980 AV_TIME_BASE_Q,
1981 s->chapters[i]->time_base);
1982 }
1983 }
1984
1985 #define MAX_STD_TIMEBASES (60*12+5)
1986 static int get_std_framerate(int i){
1987 if(i<60*12) return i*1001;
1988 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1989 }
1990
1991 /*
1992 * Is the time base unreliable.
1993 * This is a heuristic to balance between quick acceptance of the values in
1994 * the headers vs. some extra checks.
1995 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1996 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1997 * And there are "variable" fps files this needs to detect as well.
1998 */
1999 static int tb_unreliable(AVCodecContext *c){
2000 if( c->time_base.den >= 101L*c->time_base.num
2001 || c->time_base.den < 5L*c->time_base.num
2002 /* || c->codec_tag == AV_RL32("DIVX")
2003 || c->codec_tag == AV_RL32("XVID")*/
2004 || c->codec_id == CODEC_ID_MPEG2VIDEO
2005 || c->codec_id == CODEC_ID_H264
2006 )
2007 return 1;
2008 return 0;
2009 }
2010
2011 int av_find_stream_info(AVFormatContext *ic)
2012 {
2013 int i, count, ret, read_size, j;
2014 AVStream *st;
2015 AVPacket pkt1, *pkt;
2016 int64_t last_dts[MAX_STREAMS];
2017 int64_t duration_gcd[MAX_STREAMS]={0};
2018 int duration_count[MAX_STREAMS]={0};
2019 double (*duration_error)[MAX_STD_TIMEBASES];
2020 int64_t old_offset = url_ftell(ic->pb);
2021 int64_t codec_info_duration[MAX_STREAMS]={0};
2022 int codec_info_nb_frames[MAX_STREAMS]={0};
2023
2024 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2025 if (!duration_error) return AVERROR(ENOMEM);
2026
2027 for(i=0;i<ic->nb_streams;i++) {
2028 st = ic->streams[i];
2029 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2030 /* if(!st->time_base.num)
2031 st->time_base= */
2032 if(!st->codec->time_base.num)
2033 st->codec->time_base= st->time_base;
2034 }
2035 //only for the split stuff
2036 if (!st->parser) {
2037 st->parser = av_parser_init(st->codec->codec_id);
2038 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2039 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2040 }
2041 }
2042 }
2043
2044 for(i=0;i<MAX_STREAMS;i++){
2045 last_dts[i]= AV_NOPTS_VALUE;
2046 }
2047
2048 count = 0;
2049 read_size = 0;
2050 for(;;) {
2051 if(url_interrupt_cb()){
2052 ret= AVERROR(EINTR);
2053 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2054 break;
2055 }
2056
2057 /* check if one codec still needs to be handled */
2058 for(i=0;i<ic->nb_streams;i++) {
2059 st = ic->streams[i];
2060 if (!has_codec_parameters(st->codec))
2061 break;
2062 /* variable fps and no guess at the real fps */
2063 if( tb_unreliable(st->codec)
2064 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2065 break;
2066 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2067 break;
2068 if(st->first_dts == AV_NOPTS_VALUE)
2069 break;
2070 }
2071 if (i == ic->nb_streams) {
2072 /* NOTE: if the format has no header, then we need to read
2073 some packets to get most of the streams, so we cannot
2074 stop here */
2075 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2076 /* if we found the info for all the codecs, we can stop */
2077 ret = count;
2078 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2079 break;
2080 }
2081 }
2082 /* we did not get all the codec info, but we read too much data */
2083 if (read_size >= ic->probesize) {
2084 ret = count;
2085 av_log(ic, AV_LOG_DEBUG, "MAX_READ_SIZE:%d reached\n", ic->probesize);
2086 break;
2087 }
2088
2089 /* NOTE: a new stream can be added there if no header in file
2090 (AVFMTCTX_NOHEADER) */
2091 ret = av_read_frame_internal(ic, &pkt1);
2092 if(ret == AVERROR(EAGAIN))
2093 continue;
2094 if (ret < 0) {
2095 /* EOF or error */
2096 ret = -1; /* we could not have all the codec parameters before EOF */
2097 for(i=0;i<ic->nb_streams;i++) {
2098 st = ic->streams[i];
2099 if (!has_codec_parameters(st->codec)){
2100 char buf[256];
2101 avcodec_string(buf, sizeof(buf), st->codec, 0);
2102 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2103 } else {
2104 ret = 0;
2105 }
2106 }
2107 break;
2108 }
2109
2110 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2111 if(av_dup_packet(pkt) < 0) {
2112 av_free(duration_error);
2113 return AVERROR(ENOMEM);
2114 }
2115
2116 read_size += pkt->size;
2117
2118 st = ic->streams[pkt->stream_index];
2119 if(codec_info_nb_frames[st->index]>1) {
2120 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
2121 av_log(ic, AV_LOG_DEBUG, "max_analyze_duration reached\n");
2122 break;
2123 }
2124 codec_info_duration[st->index] += pkt->duration;
2125 }
2126 if (pkt->duration != 0)
2127 codec_info_nb_frames[st->index]++;
2128
2129 {
2130 int index= pkt->stream_index;
2131 int64_t last= last_dts[index];
2132 int64_t duration= pkt->dts - last;
2133
2134 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2135 double dur= duration * av_q2d(st->time_base);
2136
2137 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2138 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2139 if(duration_count[index] < 2)
2140 memset(duration_error[index], 0, sizeof(*duration_error));
2141 for(i=1; i<MAX_STD_TIMEBASES; i++){
2142 int framerate= get_std_framerate(i);
2143 int ticks= lrintf(dur*framerate/(1001*12));
2144 double error= dur - ticks*1001*12/(double)framerate;
2145 duration_error[index][i] += error*error;
2146 }
2147 duration_count[index]++;
2148 // ignore the first 4 values, they might have some random jitter
2149 if (duration_count[index] > 3)
2150 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2151 }
2152 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2153 last_dts[pkt->stream_index]= pkt->dts;
2154 }
2155 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2156 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2157 if(i){
2158 st->codec->extradata_size= i;
2159 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2160 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2161 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2162 }
2163 }
2164
2165 /* if still no information, we try to open the codec and to
2166 decompress the frame. We try to avoid that in most cases as
2167 it takes longer and uses more memory. For MPEG-4, we need to
2168 decompress for QuickTime. */
2169 if (!has_codec_parameters(st->codec) /*&&
2170 (st->codec->codec_id == CODEC_ID_FLV1 ||
2171 st->codec->codec_id == CODEC_ID_H264 ||
2172 st->codec->codec_id == CODEC_ID_H263 ||
2173 st->codec->codec_id == CODEC_ID_H261 ||
2174 st->codec->codec_id == CODEC_ID_VORBIS ||
2175 st->codec->codec_id == CODEC_ID_MJPEG ||
2176 st->codec->codec_id == CODEC_ID_PNG ||
2177 st->codec->codec_id == CODEC_ID_PAM ||
2178 st->codec->codec_id == CODEC_ID_PGM ||
2179 st->codec->codec_id == CODEC_ID_PGMYUV ||
2180 st->codec->codec_id == CODEC_ID_PBM ||
2181 st->codec->codec_id == CODEC_ID_PPM ||
2182 st->codec->codec_id == CODEC_ID_SHORTEN ||
2183 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2184 try_decode_frame(st, pkt);
2185
2186 count++;
2187 }
2188
2189 // close codecs which were opened in try_decode_frame()
2190 for(i=0;i<ic->nb_streams;i++) {
2191 st = ic->streams[i];
2192 if(st->codec->codec)
2193 avcodec_close(st->codec);
2194 }
2195 for(i=0;i<ic->nb_streams;i++) {
2196 st = ic->streams[i];
2197 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2198 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2199 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2200
2201 // the check for tb_unreliable() is not completely correct, since this is not about handling
2202 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2203 // ipmovie.c produces.
2204 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2205 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2206 if(duration_count[i]
2207 && tb_unreliable(st->codec) /*&&
2208 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2209 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2210 int num = 0;
2211 double best_error= 2*av_q2d(st->time_base);
2212 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2213
2214 for(j=1; j<MAX_STD_TIMEBASES; j++){
2215 double error= duration_error[i][j] * get_std_framerate(j);
2216 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2217 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2218 if(error < best_error){
2219 best_error= error;
2220 num = get_std_framerate(j);
2221 }
2222 }
2223 // do not increase frame rate by more than 1 % in order to match a standard rate.
2224 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2225 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2226 }
2227
2228 if (!st->r_frame_rate.num){
2229 if( st->codec->time_base.den * (int64_t)st->time_base.num
2230 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2231 st->r_frame_rate.num = st->codec->time_base.den;
2232 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2233 }else{
2234 st->r_frame_rate.num = st->time_base.den;
2235 st->r_frame_rate.den = st->time_base.num;
2236 }
2237 }
2238 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2239 if(!st->codec->bits_per_coded_sample)
2240 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2241 }
2242 }
2243
2244 av_estimate_timings(ic, old_offset);
2245
2246 compute_chapters_end(ic);
2247
2248 #if 0
2249 /* correct DTS for B-frame streams with no timestamps */
2250 for(i=0;i<ic->nb_streams;i++) {
2251 st = ic->streams[i];
2252 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2253 if(b-frames){
2254 ppktl = &ic->packet_buffer;
2255 while(ppkt1){
2256 if(ppkt1->stream_index != i)
2257 continue;
2258 if(ppkt1->pkt->dts < 0)
2259 break;
2260 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2261 break;
2262 ppkt1->pkt->dts -= delta;
2263 ppkt1= ppkt1->next;
2264 }
2265 if(ppkt1)
2266 continue;
2267 st->cur_dts -= delta;
2268 }
2269 }
2270 }
2271 #endif
2272
2273 av_free(duration_error);
2274
2275 return ret;
2276 }
2277
2278 /*******************************************************/
2279
2280 int av_read_play(AVFormatContext *s)
2281 {
2282 if (s->iformat->read_play)
2283 return s->iformat->read_play(s);
2284 if (s->pb)
2285 return av_url_read_fpause(s->pb, 0);
2286 return AVERROR(ENOSYS);
2287 }
2288
2289 int av_read_pause(AVFormatContext *s)
2290 {
2291 if (s->iformat->read_pause)
2292 return s->iformat->read_pause(s);
2293 if (s->pb)
2294 return av_url_read_fpause(s->pb, 1);
2295 return AVERROR(ENOSYS);
2296 }
2297
2298 void av_close_input_stream(AVFormatContext *s)
2299 {
2300 int i;
2301 AVStream *st;
2302
2303 if (s->iformat->read_close)
2304 s->iformat->read_close(s);
2305 for(i=0;i<s->nb_streams;i++) {
2306 /* free all data in a stream component */
2307 st = s->streams[i];
2308 if (st->parser) {
2309 av_parser_close(st->parser);
2310 av_free_packet(&st->cur_pkt);
2311 }
2312 av_metadata_free(&st->metadata);
2313 av_free(st->index_entries);
2314 av_free(st->codec->extradata);
2315 av_free(st->codec);
2316 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2317 av_free(st->filename);
2318 #endif
2319 av_free(st->priv_data);
2320 av_free(st);
2321 }
2322 for(i=s->nb_programs-1; i>=0; i--) {
2323 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2324 av_freep(&s->programs[i]->provider_name);
2325 av_freep(&s->programs[i]->name);
2326 #endif
2327 av_metadata_free(&s->programs[i]->metadata);
2328 av_freep(&s->programs[i]->stream_index);
2329 av_freep(&s->programs[i]);
2330 }
2331 av_freep(&s->programs);
2332 flush_packet_queue(s);
2333 av_freep(&s->priv_data);
2334 while(s->nb_chapters--) {
2335 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2336 av_free(s->chapters[s->nb_chapters]->title);
2337 #endif
2338 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2339 av_free(s->chapters[s->nb_chapters]);
2340 }
2341 av_freep(&s->chapters);
2342 av_metadata_free(&s->metadata);
2343 av_free(s);
2344 }
2345
2346 void av_close_input_file(AVFormatContext *s)
2347 {
2348 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2349 av_close_input_stream(s);
2350 if (pb)
2351 url_fclose(pb);
2352 }
2353
2354 AVStream *av_new_stream(AVFormatContext *s, int id)
2355 {
2356 AVStream *st;
2357 int i;
2358
2359 if (s->nb_streams >= MAX_STREAMS)
2360 return NULL;
2361
2362 st = av_mallocz(sizeof(AVStream));
2363 if (!st)
2364 return NULL;
2365
2366 st->codec= avcodec_alloc_context();
2367 if (s->iformat) {
2368 /* no default bitrate if decoding */
2369 st->codec->bit_rate = 0;
2370 }
2371 st->index = s->nb_streams;
2372 st->id = id;
2373 st->start_time = AV_NOPTS_VALUE;
2374 st->duration = AV_NOPTS_VALUE;
2375 /* we set the current DTS to 0 so that formats without any timestamps
2376 but durations get some timestamps, formats with some unknown
2377 timestamps have their first few packets buffered and the
2378 timestamps corrected before they are returned to the user */
2379 st->cur_dts = 0;
2380 st->first_dts = AV_NOPTS_VALUE;
2381 st->probe_packets = MAX_PROBE_PACKETS;
2382
2383 /* default pts setting is MPEG-like */
2384 av_set_pts_info(st, 33, 1, 90000);
2385 st->last_IP_pts = AV_NOPTS_VALUE;
2386 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2387 st->pts_buffer[i]= AV_NOPTS_VALUE;
2388 st->reference_dts = AV_NOPTS_VALUE;
2389
2390 st->sample_aspect_ratio = (AVRational){0,1};
2391
2392 s->streams[s->nb_streams++] = st;
2393 return st;
2394 }
2395
2396 AVProgram *av_new_program(AVFormatContext *ac, int id)
2397 {
2398 AVProgram *program=NULL;
2399 int i;
2400
2401 #ifdef DEBUG_SI
2402 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2403 #endif
2404
2405 for(i=0; i<ac->nb_programs; i++)
2406 if(ac->programs[i]->id == id)
2407 program = ac->programs[i];
2408
2409 if(!program){
2410 program = av_mallocz(sizeof(AVProgram));
2411 if (!program)
2412 return NULL;
2413 dynarray_add(&ac->programs, &ac->nb_programs, program);
2414 program->discard = AVDISCARD_NONE;
2415 }
2416 program->id = id;
2417
2418 return program;
2419 }
2420
2421 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2422 {
2423 AVChapter *chapter = NULL;
2424 int i;
2425
2426 for(i=0; i<s->nb_chapters; i++)
2427 if(s->chapters[i]->id == id)
2428 chapter = s->chapters[i];
2429
2430 if(!chapter){
2431 chapter= av_mallocz(sizeof(AVChapter));
2432 if(!chapter)
2433 return NULL;
2434 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2435 }
2436 #if LIBAVFORMAT_VERSION_INT < (53<<16)
2437 av_free(chapter->title);
2438 #endif
2439 av_metadata_set(&chapter->metadata, "title", title);
2440 chapter->id = id;
2441 chapter->time_base= time_base;
2442 chapter->start = start;
2443 chapter->end = end;
2444
2445 return chapter;
2446 }
2447
2448 /************************************************************/
2449 /* output media file */
2450
2451 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2452 {
2453 int ret;
2454
2455 if (s->oformat->priv_data_size > 0) {
2456 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2457 if (!s->priv_data)
2458 return AVERROR(ENOMEM);
2459 } else
2460 s->priv_data = NULL;
2461
2462 if (s->oformat->set_parameters) {
2463 ret = s->oformat->set_parameters(s, ap);
2464 if (ret < 0)
2465 return ret;
2466 }
2467 return 0;
2468 }
2469
2470 int av_write_header(AVFormatContext *s)
2471 {
2472 int ret, i;
2473 AVStream *st;
2474
2475 // some sanity checks
2476 for(i=0;i<s->nb_streams;i++) {
2477 st = s->streams[i];
2478
2479 switch (st->codec->codec_type) {
2480 case CODEC_TYPE_AUDIO:
2481 if(st->codec->sample_rate<=0){
2482 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2483 return -1;
2484 }
2485 if(!st->codec->block_align)
2486 st->codec->block_align = st->codec->channels *
2487 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2488 break;
2489 case CODEC_TYPE_VIDEO:
2490 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2491 av_log(s, AV_LOG_ERROR, "time base not set\n");
2492 return -1;
2493 }
2494 if(st->codec->width<=0 || st->codec->height<=0){
2495 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2496 return -1;
2497 }
2498 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2499 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2500 return -1;
2501 }
2502 break;
2503 }
2504
2505 if(s->oformat->codec_tag){
2506 if(st->codec->codec_tag){
2507 //FIXME
2508 //check that tag + id is in the table
2509 //if neither is in the table -> OK
2510 //if tag is in the table with another id -> FAIL
2511 //if id is in the table with another tag -> FAIL unless strict < ?
2512 }else
2513 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2514 }
2515
2516 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2517 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2518 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2519 }
2520
2521 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2522 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2523 if (!s->priv_data)
2524 return AVERROR(ENOMEM);
2525 }
2526
2527 #if LIBAVFORMAT_VERSION_MAJOR < 53
2528 ff_metadata_mux_compat(s);
2529 #endif
2530
2531 if(s->oformat->write_header){
2532 ret = s->oformat->write_header(s);
2533 if (ret < 0)
2534 return ret;
2535 }
2536
2537 /* init PTS generation */
2538 for(i=0;i<s->nb_streams;i++) {
2539 int64_t den = AV_NOPTS_VALUE;
2540 st = s->streams[i];
2541
2542 switch (st->codec->codec_type) {
2543 case CODEC_TYPE_AUDIO:
2544 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2545 break;
2546 case CODEC_TYPE_VIDEO:
2547 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2548 break;
2549 default:
2550 break;
2551 }
2552 if (den != AV_NOPTS_VALUE) {
2553 if (den <= 0)
2554 return AVERROR_INVALIDDATA;
2555 av_frac_init(&st->pts, 0, 0, den);
2556 }
2557 }
2558 return 0;
2559 }
2560
2561 //FIXME merge with compute_pkt_fields
2562 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2563 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2564 int num, den, frame_size, i;
2565
2566 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2567
2568 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2569 return -1;*/
2570
2571 /* duration field */
2572 if (pkt->duration == 0) {
2573 compute_frame_duration(&num, &den, st, NULL, pkt);
2574 if (den && num) {
2575 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2576 }
2577 }
2578
2579 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2580 pkt->pts= pkt->dts;
2581
2582 //XXX/FIXME this is a temporary hack until all encoders output pts
2583 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2584 pkt->dts=
2585 // pkt->pts= st->cur_dts;
2586 pkt->pts= st->pts.val;
2587 }
2588
2589 //calculate dts from pts
2590 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2591 st->pts_buffer[0]= pkt->pts;
2592 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2593 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2594 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2595 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2596
2597 pkt->dts= st->pts_buffer[0];
2598 }
2599
2600 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2601 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2602 return -1;
2603 }
2604 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2605 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2606 return -1;
2607 }
2608
2609 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2610 st->cur_dts= pkt->dts;
2611 st->pts.val= pkt->dts;
2612
2613 /* update pts */
2614 switch (st->codec->codec_type) {
2615 case CODEC_TYPE_AUDIO:
2616 frame_size = get_audio_frame_size(st->codec, pkt->size);
2617
2618 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2619 likely equal to the encoder delay, but it would be better if we
2620 had the real timestamps from the encoder */
2621 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2622 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2623 }
2624 break;
2625 case CODEC_TYPE_VIDEO:
2626 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2627 break;
2628 default:
2629 break;
2630 }
2631 return 0;
2632 }
2633
2634 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2635 {
2636 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2637
2638 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2639 return ret;
2640
2641 ret= s->oformat->write_packet(s, pkt);
2642 if(!ret)
2643 ret= url_ferror(s->pb);
2644 return ret;
2645 }
2646
2647 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2648 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2649 {
2650 AVPacketList **next_point, *this_pktl;
2651
2652 this_pktl = av_mallocz(sizeof(AVPacketList));
2653 this_pktl->pkt= *pkt;
2654 pkt->destruct= NULL; // do not free original but only the copy
2655 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2656
2657 next_point = &s->packet_buffer;
2658 while(*next_point){
2659 if(compare(s, &(*next_point)->pkt, pkt))
2660 break;
2661 next_point= &(*next_point)->next;
2662 }
2663 this_pktl->next= *next_point;
2664 *next_point= this_pktl;
2665 }
2666
2667 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2668 {
2669 AVStream *st = s->streams[ pkt ->stream_index];
2670 AVStream *st2= s->streams[ next->stream_index];
2671 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2672 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2673
2674 if (pkt->dts == AV_NOPTS_VALUE)
2675 return 0;
2676
2677 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2678 }
2679
2680 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2681 AVPacketList *pktl;
2682 int stream_count=0;
2683 int streams[MAX_STREAMS];
2684
2685 if(pkt){
2686 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2687 }
2688
2689 memset(streams, 0, sizeof(streams));
2690 pktl= s->packet_buffer;
2691 while(pktl){
2692 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2693 if(streams[ pktl->pkt.stream_index ] == 0)
2694 stream_count++;
2695 streams[ pktl->pkt.stream_index ]++;
2696 pktl= pktl->next;
2697 }
2698
2699 if(stream_count && (s->nb_streams == stream_count || flush)){
2700 pktl= s->packet_buffer;
2701 *out= pktl->pkt;
2702
2703 s->packet_buffer= pktl->next;
2704 av_freep(&pktl);
2705 return 1;
2706 }else{
2707 av_init_packet(out);
2708 return 0;
2709 }
2710 }
2711
2712 /**
2713 * Interleaves an AVPacket correctly so it can be muxed.
2714 * @param out the interleaved packet will be output here
2715 * @param in the input packet
2716 * @param flush 1 if no further packets are available as input and all
2717 * remaining packets should be output
2718 * @return 1 if a packet was output, 0 if no packet could be output,
2719 * < 0 if an error occurred
2720 */
2721 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2722 if(s->oformat->interleave_packet)
2723 return s->oformat->interleave_packet(s, out, in, flush);
2724 else
2725 return av_interleave_packet_per_dts(s, out, in, flush);
2726 }
2727
2728 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2729 AVStream *st= s->streams[ pkt->stream_index];
2730
2731 //FIXME/XXX/HACK drop zero sized packets
2732 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2733 return 0;
2734
2735 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2736 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2737 return -1;
2738
2739 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2740 return -1;
2741
2742 for(;;){
2743 AVPacket opkt;
2744 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2745 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2746 return ret;
2747
2748 ret= s->oformat->write_packet(s, &opkt);
2749
2750 av_free_packet(&opkt);
2751 pkt= NULL;
2752
2753 if(ret<0)
2754 return ret;
2755 if(url_ferror(s->pb))
2756 return url_ferror(s->pb);
2757 }
2758 }
2759
2760 int av_write_trailer(AVFormatContext *s)
2761 {
2762 int ret, i;
2763
2764 for(;;){
2765 AVPacket pkt;
2766 ret= av_interleave_packet(s, &pkt, NULL, 1);
2767 if(ret<0) //FIXME cleanup needed for ret<0 ?
2768 goto fail;
2769 if(!ret)
2770 break;
2771
2772 ret= s->oformat->write_packet(s, &pkt);
2773
2774 av_free_packet(&pkt);
2775
2776 if(ret<0)
2777 goto fail;
2778 if(url_ferror(s->pb))
2779 goto fail;
2780 }
2781
2782 if(s->oformat->write_trailer)
2783 ret = s->oformat->write_trailer(s);
2784 fail:
2785 if(ret == 0)
2786 ret=url_ferror(s->pb);
2787 for(i=0;i<s->nb_streams;i++)
2788 av_freep(&s->streams[i]->priv_data);
2789 av_freep(&s->priv_data);
2790 return ret;
2791 }
2792
2793 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2794 {
2795 int i, j;
2796 AVProgram *program=NULL;
2797 void *tmp;
2798
2799 for(i=0; i<ac->nb_programs; i++){
2800 if(ac->programs[i]->id != progid)
2801 continue;
2802 program = ac->programs[i];
2803 for(j=0; j<program->nb_stream_indexes; j++)
2804 if(program->stream_index[j] == idx)
2805 return;
2806
2807 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2808 if(!tmp)
2809 return;
2810 program->stream_index = tmp;
2811 program->stream_index[program->nb_stream_indexes++] = idx;
2812 return;
2813 }
2814 }
2815
2816 static void print_fps(double d, const char *postfix){
2817 uint64_t v= lrintf(d*100);
2818 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2819 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2820 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2821 }
2822
2823 /* "user interface" functions */
2824 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2825 {
2826 char buf[256];
2827 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2828 AVStream *st = ic->streams[i];
2829 int g = av_gcd(st->time_base.num, st->time_base.den);
2830 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2831 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2832 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2833 /* the pid is an important information, so we display it */
2834 /* XXX: add a generic system */
2835 if (flags & AVFMT_SHOW_IDS)
2836 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2837 if (lang)
2838 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2839 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2840 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2841 if (st->sample_aspect_ratio.num && // default
2842 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2843 AVRational display_aspect_ratio;
2844 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2845 st->codec->width*st->sample_aspect_ratio.num,
2846 st->codec->height*st->sample_aspect_ratio.den,
2847 1024*1024);
2848 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2849 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2850 display_aspect_ratio.num, display_aspect_ratio.den);
2851 }
2852 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2853 if(st->r_frame_rate.den && st->r_frame_rate.num)
2854 print_fps(av_q2d(st->r_frame_rate), "tbr");
2855 if(st->time_base.den && st->time_base.num)
2856 print_fps(1/av_q2d(st->time_base), "tbn");
2857 if(st->codec->time_base.den && st->codec->time_base.num)
2858 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2859 }
2860 av_log(NULL, AV_LOG_INFO, "\n");
2861 }
2862
2863 void dump_format(AVFormatContext *ic,
2864 int index,
2865 const char *url,
2866 int is_output)
2867 {
2868 int i;
2869
2870 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2871 is_output ? "Output" : "Input",
2872 index,
2873 is_output ? ic->oformat->name : ic->iformat->name,
2874 is_output ? "to" : "from", url);
2875 if (!is_output) {
2876 av_log(NULL, AV_LOG_INFO, " Duration: ");
2877 if (ic->duration != AV_NOPTS_VALUE) {
2878 int hours, mins, secs, us;
2879 secs = ic->duration / AV_TIME_BASE;
2880 us = ic->duration % AV_TIME_BASE;
2881 mins = secs / 60;
2882 secs %= 60;
2883 hours = mins / 60;
2884 mins %= 60;
2885 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2886 (100 * us) / AV_TIME_BASE);
2887 } else {
2888 av_log(NULL, AV_LOG_INFO, "N/A");
2889 }
2890 if (ic->start_time != AV_NOPTS_VALUE) {
2891 int secs, us;
2892 av_log(NULL, AV_LOG_INFO, ", start: ");
2893 secs = ic->start_time / AV_TIME_BASE;
2894 us = ic->start_time % AV_TIME_BASE;
2895 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2896 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2897 }
2898 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2899 if (ic->bit_rate) {
2900 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2901 } else {
2902 av_log(NULL, AV_LOG_INFO, "N/A");
2903 }
2904 av_log(NULL, AV_LOG_INFO, "\n");
2905 }
2906 if(ic->nb_programs) {
2907 int j, k;
2908 for(j=0; j<ic->nb_programs; j++) {
2909 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
2910 "name", NULL, 0);
2911 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2912 name ? name->value : "");
2913 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2914 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2915 }
2916 } else
2917 for(i=0;i<ic->nb_streams;i++)
2918 dump_stream_format(ic, i, index, is_output);
2919 }
2920
2921 #if LIBAVFORMAT_VERSION_MAJOR < 53
2922 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2923 {
2924 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2925 }
2926
2927 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2928 {
2929 AVRational frame_rate;
2930 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2931 *frame_rate_num= frame_rate.num;
2932 *frame_rate_den= frame_rate.den;
2933 return ret;
2934 }
2935 #endif
2936
2937 int64_t av_gettime(void)
2938 {
2939 struct timeval tv;
2940 gettimeofday(&tv,NULL);
2941 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2942 }
2943
2944 int64_t parse_date(const char *datestr, int duration)
2945 {
2946 const char *p;
2947 int64_t t;
2948 struct tm dt;
2949 int i;
2950 static const char * const date_fmt[] = {
2951 "%Y-%m-%d",
2952 "%Y%m%d",
2953 };
2954 static const char * const time_fmt[] = {
2955 "%H:%M:%S",
2956 "%H%M%S",
2957 };
2958 const char *q;
2959 int is_utc, len;
2960 char lastch;
2961 int negative = 0;
2962
2963 #undef time
2964 time_t now = time(0);
2965
2966 len = strlen(datestr);
2967 if (len > 0)
2968 lastch = datestr[len - 1];
2969 else
2970 lastch = '\0';
2971 is_utc = (lastch == 'z' || lastch == 'Z');
2972
2973 memset(&dt, 0, sizeof(dt));
2974
2975 p = datestr;
2976 q = NULL;
2977 if (!duration) {
2978 if (!strncasecmp(datestr, "now", len))
2979 return (int64_t) now * 1000000;
2980
2981 /* parse the year-month-day part */
2982 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2983 q = small_strptime(p, date_fmt[i], &dt);
2984 if (q) {
2985 break;
2986 }
2987 }
2988
2989 /* if the year-month-day part is missing, then take the
2990 * current year-month-day time */
2991 if (!q) {
2992 if (is_utc) {
2993 dt = *gmtime(&now);
2994 } else {
2995 dt = *localtime(&now);
2996 }
2997 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2998 } else {
2999 p = q;
3000 }
3001
3002 if (*p == 'T' || *p == 't' || *p == ' ')
3003 p++;
3004
3005 /* parse the hour-minute-second part */
3006 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
3007 q = small_strptime(p, time_fmt[i], &dt);
3008 if (q) {
3009 break;
3010 }
3011 }
3012 } else {
3013 /* parse datestr as a duration */
3014 if (p[0] == '-') {
3015 negative = 1;
3016 ++p;
3017 }
3018 /* parse datestr as HH:MM:SS */
3019 q = small_strptime(p, time_fmt[0], &dt);
3020 if (!q) {
3021 /* parse datestr as S+ */
3022 dt.tm_sec = strtol(p, (char **)&q, 10);
3023 if (q == p)
3024 /* the parsing didn't succeed */
3025 return INT64_MIN;
3026 dt.tm_min = 0;
3027 dt.tm_hour = 0;
3028 }
3029 }
3030
3031 /* Now we have all the fields that we can get */
3032 if (!q) {
3033 return INT64_MIN;
3034 }
3035
3036 if (duration) {
3037 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3038 } else {
3039 dt.tm_isdst = -1; /* unknown */
3040 if (is_utc) {
3041 t = mktimegm(&dt);
3042 } else {
3043 t = mktime(&dt);
3044 }
3045 }
3046
3047 t *= 1000000;
3048
3049 /* parse the .m... part */
3050 if (*q == '.') {
3051 int val, n;
3052 q++;
3053 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3054 if (!isdigit(*q))
3055 break;
3056 val += n * (*q - '0');
3057 }
3058 t += val;
3059 }
3060 return negative ? -t : t;
3061 }
3062
3063 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3064 {
3065 const char *p;
3066 char tag[128], *q;
3067
3068 p = info;
3069 if (*p == '?')
3070 p++;
3071 for(;;) {
3072 q = tag;
3073 while (*p != '\0' && *p != '=' && *p != '&') {
3074 if ((q - tag) < sizeof(tag) - 1)
3075 *q++ = *p;
3076 p++;
3077 }
3078 *q = '\0';
3079 q = arg;
3080 if (*p == '=') {
3081 p++;
3082 while (*p != '&' && *p != '\0') {
3083 if ((q - arg) < arg_size - 1) {
3084 if (*p == '+')
3085 *q++ = ' ';
3086 else
3087 *q++ = *p;
3088 }
3089 p++;
3090 }
3091 *q = '\0';
3092 }
3093 if (!strcmp(tag, tag1))
3094 return 1;
3095 if (*p != '&')
3096 break;
3097 p++;
3098 }
3099 return 0;
3100 }
3101
3102 int av_get_frame_filename(char *buf, int buf_size,
3103 const char *path, int number)
3104 {
3105 const char *p;
3106 char *q, buf1[20], c;
3107 int nd, len, percentd_found;
3108
3109 q = buf;
3110 p = path;
3111 percentd_found = 0;
3112 for(;;) {
3113 c = *p++;
3114 if (c == '\0')
3115 break;
3116 if (c == '%') {
3117 do {
3118 nd = 0;
3119 while (isdigit(*p)) {
3120 nd = nd * 10 + *p++ - '0';
3121 }
3122 c = *p++;
3123 } while (isdigit(c));
3124
3125 switch(c) {
3126 case '%':
3127 goto addchar;
3128 case 'd':
3129 if (percentd_found)
3130 goto fail;
3131 percentd_found = 1;
3132 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3133 len = strlen(buf1);
3134 if ((q - buf + len) > buf_size