Do not call update_initial_durations if pkt->duration is 0.
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "opt.h"
23 #include "avstring.h"
24 #include "riff.h"
25 #include <sys/time.h>
26 #include <time.h>
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 /**
32 * @file libavformat/utils.c
33 * various utility functions for use within FFmpeg
34 */
35
36 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
37 static void av_frac_add(AVFrac *f, int64_t incr);
38
39 /** head of registered input format linked list */
40 AVInputFormat *first_iformat = NULL;
41 /** head of registered output format linked list */
42 AVOutputFormat *first_oformat = NULL;
43
44 AVInputFormat *av_iformat_next(AVInputFormat *f)
45 {
46 if(f) return f->next;
47 else return first_iformat;
48 }
49
50 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
51 {
52 if(f) return f->next;
53 else return first_oformat;
54 }
55
56 void av_register_input_format(AVInputFormat *format)
57 {
58 AVInputFormat **p;
59 p = &first_iformat;
60 while (*p != NULL) p = &(*p)->next;
61 *p = format;
62 format->next = NULL;
63 }
64
65 void av_register_output_format(AVOutputFormat *format)
66 {
67 AVOutputFormat **p;
68 p = &first_oformat;
69 while (*p != NULL) p = &(*p)->next;
70 *p = format;
71 format->next = NULL;
72 }
73
74 int match_ext(const char *filename, const char *extensions)
75 {
76 const char *ext, *p;
77 char ext1[32], *q;
78
79 if(!filename)
80 return 0;
81
82 ext = strrchr(filename, '.');
83 if (ext) {
84 ext++;
85 p = extensions;
86 for(;;) {
87 q = ext1;
88 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
89 *q++ = *p++;
90 *q = '\0';
91 if (!strcasecmp(ext1, ext))
92 return 1;
93 if (*p == '\0')
94 break;
95 p++;
96 }
97 }
98 return 0;
99 }
100
101 AVOutputFormat *guess_format(const char *short_name, const char *filename,
102 const char *mime_type)
103 {
104 AVOutputFormat *fmt, *fmt_found;
105 int score_max, score;
106
107 /* specific test for image sequences */
108 #ifdef CONFIG_IMAGE2_MUXER
109 if (!short_name && filename &&
110 av_filename_number_test(filename) &&
111 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
112 return guess_format("image2", NULL, NULL);
113 }
114 #endif
115 /* Find the proper file type. */
116 fmt_found = NULL;
117 score_max = 0;
118 fmt = first_oformat;
119 while (fmt != NULL) {
120 score = 0;
121 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
122 score += 100;
123 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
124 score += 10;
125 if (filename && fmt->extensions &&
126 match_ext(filename, fmt->extensions)) {
127 score += 5;
128 }
129 if (score > score_max) {
130 score_max = score;
131 fmt_found = fmt;
132 }
133 fmt = fmt->next;
134 }
135 return fmt_found;
136 }
137
138 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
139 const char *mime_type)
140 {
141 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
142
143 if (fmt) {
144 AVOutputFormat *stream_fmt;
145 char stream_format_name[64];
146
147 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
148 stream_fmt = guess_format(stream_format_name, NULL, NULL);
149
150 if (stream_fmt)
151 fmt = stream_fmt;
152 }
153
154 return fmt;
155 }
156
157 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
158 const char *filename, const char *mime_type, enum CodecType type){
159 if(type == CODEC_TYPE_VIDEO){
160 enum CodecID codec_id= CODEC_ID_NONE;
161
162 #ifdef CONFIG_IMAGE2_MUXER
163 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
164 codec_id= av_guess_image2_codec(filename);
165 }
166 #endif
167 if(codec_id == CODEC_ID_NONE)
168 codec_id= fmt->video_codec;
169 return codec_id;
170 }else if(type == CODEC_TYPE_AUDIO)
171 return fmt->audio_codec;
172 else
173 return CODEC_ID_NONE;
174 }
175
176 AVInputFormat *av_find_input_format(const char *short_name)
177 {
178 AVInputFormat *fmt;
179 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
180 if (!strcmp(fmt->name, short_name))
181 return fmt;
182 }
183 return NULL;
184 }
185
186 /* memory handling */
187
188 void av_destruct_packet(AVPacket *pkt)
189 {
190 av_free(pkt->data);
191 pkt->data = NULL; pkt->size = 0;
192 }
193
194 void av_init_packet(AVPacket *pkt)
195 {
196 pkt->pts = AV_NOPTS_VALUE;
197 pkt->dts = AV_NOPTS_VALUE;
198 pkt->pos = -1;
199 pkt->duration = 0;
200 pkt->flags = 0;
201 pkt->stream_index = 0;
202 pkt->destruct= av_destruct_packet_nofree;
203 }
204
205 int av_new_packet(AVPacket *pkt, int size)
206 {
207 uint8_t *data;
208 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
209 return AVERROR(ENOMEM);
210 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
211 if (!data)
212 return AVERROR(ENOMEM);
213 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
214
215 av_init_packet(pkt);
216 pkt->data = data;
217 pkt->size = size;
218 pkt->destruct = av_destruct_packet;
219 return 0;
220 }
221
222 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
223 {
224 int ret= av_new_packet(pkt, size);
225
226 if(ret<0)
227 return ret;
228
229 pkt->pos= url_ftell(s);
230
231 ret= get_buffer(s, pkt->data, size);
232 if(ret<=0)
233 av_free_packet(pkt);
234 else
235 pkt->size= ret;
236
237 return ret;
238 }
239
240 int av_dup_packet(AVPacket *pkt)
241 {
242 if (pkt->destruct != av_destruct_packet) {
243 uint8_t *data;
244 /* We duplicate the packet and don't forget to add the padding again. */
245 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
246 return AVERROR(ENOMEM);
247 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
248 if (!data) {
249 return AVERROR(ENOMEM);
250 }
251 memcpy(data, pkt->data, pkt->size);
252 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
253 pkt->data = data;
254 pkt->destruct = av_destruct_packet;
255 }
256 return 0;
257 }
258
259 int av_filename_number_test(const char *filename)
260 {
261 char buf[1024];
262 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
263 }
264
265 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
266 {
267 AVInputFormat *fmt1, *fmt;
268 int score;
269
270 fmt = NULL;
271 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
272 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
273 continue;
274 score = 0;
275 if (fmt1->read_probe) {
276 score = fmt1->read_probe(pd);
277 } else if (fmt1->extensions) {
278 if (match_ext(pd->filename, fmt1->extensions)) {
279 score = 50;
280 }
281 }
282 if (score > *score_max) {
283 *score_max = score;
284 fmt = fmt1;
285 }
286 }
287 return fmt;
288 }
289
290 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
291 int score=0;
292 return av_probe_input_format2(pd, is_opened, &score);
293 }
294
295 /************************************************************/
296 /* input media file */
297
298 /**
299 * Open a media file from an IO stream. 'fmt' must be specified.
300 */
301 static const char* format_to_name(void* ptr)
302 {
303 AVFormatContext* fc = (AVFormatContext*) ptr;
304 if(fc->iformat) return fc->iformat->name;
305 else if(fc->oformat) return fc->oformat->name;
306 else return "NULL";
307 }
308
309 #define OFFSET(x) offsetof(AVFormatContext,x)
310 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
311 //these names are too long to be readable
312 #define E AV_OPT_FLAG_ENCODING_PARAM
313 #define D AV_OPT_FLAG_DECODING_PARAM
314
315 static const AVOption options[]={
316 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
317 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
318 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
319 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
320 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
321 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
322 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
323 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
324 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
325 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
326 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
327 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
328 {NULL},
329 };
330
331 #undef E
332 #undef D
333 #undef DEFAULT
334
335 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
336
337 static void avformat_get_context_defaults(AVFormatContext *s)
338 {
339 memset(s, 0, sizeof(AVFormatContext));
340
341 s->av_class = &av_format_context_class;
342
343 av_opt_set_defaults(s);
344 }
345
346 AVFormatContext *av_alloc_format_context(void)
347 {
348 AVFormatContext *ic;
349 ic = av_malloc(sizeof(AVFormatContext));
350 if (!ic) return ic;
351 avformat_get_context_defaults(ic);
352 ic->av_class = &av_format_context_class;
353 return ic;
354 }
355
356 int av_open_input_stream(AVFormatContext **ic_ptr,
357 ByteIOContext *pb, const char *filename,
358 AVInputFormat *fmt, AVFormatParameters *ap)
359 {
360 int err;
361 AVFormatContext *ic;
362 AVFormatParameters default_ap;
363
364 if(!ap){
365 ap=&default_ap;
366 memset(ap, 0, sizeof(default_ap));
367 }
368
369 if(!ap->prealloced_context)
370 ic = av_alloc_format_context();
371 else
372 ic = *ic_ptr;
373 if (!ic) {
374 err = AVERROR(ENOMEM);
375 goto fail;
376 }
377 ic->iformat = fmt;
378 ic->pb = pb;
379 ic->duration = AV_NOPTS_VALUE;
380 ic->start_time = AV_NOPTS_VALUE;
381 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
382
383 /* allocate private data */
384 if (fmt->priv_data_size > 0) {
385 ic->priv_data = av_mallocz(fmt->priv_data_size);
386 if (!ic->priv_data) {
387 err = AVERROR(ENOMEM);
388 goto fail;
389 }
390 } else {
391 ic->priv_data = NULL;
392 }
393
394 err = ic->iformat->read_header(ic, ap);
395 if (err < 0)
396 goto fail;
397
398 if (pb && !ic->data_offset)
399 ic->data_offset = url_ftell(ic->pb);
400
401 *ic_ptr = ic;
402 return 0;
403 fail:
404 if (ic) {
405 av_freep(&ic->priv_data);
406 }
407 av_free(ic);
408 *ic_ptr = NULL;
409 return err;
410 }
411
412 /** size of probe buffer, for guessing file type from file contents */
413 #define PROBE_BUF_MIN 2048
414 #define PROBE_BUF_MAX (1<<20)
415
416 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
417 AVInputFormat *fmt,
418 int buf_size,
419 AVFormatParameters *ap)
420 {
421 int err, probe_size;
422 AVProbeData probe_data, *pd = &probe_data;
423 ByteIOContext *pb = NULL;
424
425 pd->filename = "";
426 if (filename)
427 pd->filename = filename;
428 pd->buf = NULL;
429 pd->buf_size = 0;
430
431 if (!fmt) {
432 /* guess format if no file can be opened */
433 fmt = av_probe_input_format(pd, 0);
434 }
435
436 /* Do not open file if the format does not need it. XXX: specific
437 hack needed to handle RTSP/TCP */
438 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
439 /* if no file needed do not try to open one */
440 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
441 goto fail;
442 }
443 if (buf_size > 0) {
444 url_setbufsize(pb, buf_size);
445 }
446
447 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
448 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
449 /* read probe data */
450 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
451 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
452 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
453 if (url_fseek(pb, 0, SEEK_SET) < 0) {
454 url_fclose(pb);
455 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
456 pb = NULL;
457 err = AVERROR(EIO);
458 goto fail;
459 }
460 }
461 /* guess file format */
462 fmt = av_probe_input_format2(pd, 1, &score);
463 }
464 av_freep(&pd->buf);
465 }
466
467 /* if still no format found, error */
468 if (!fmt) {
469 err = AVERROR_NOFMT;
470 goto fail;
471 }
472
473 /* check filename in case an image number is expected */
474 if (fmt->flags & AVFMT_NEEDNUMBER) {
475 if (!av_filename_number_test(filename)) {
476 err = AVERROR_NUMEXPECTED;
477 goto fail;
478 }
479 }
480 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
481 if (err)
482 goto fail;
483 return 0;
484 fail:
485 av_freep(&pd->buf);
486 if (pb)
487 url_fclose(pb);
488 *ic_ptr = NULL;
489 return err;
490
491 }
492
493 /*******************************************************/
494
495 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
496 {
497 int ret;
498 AVStream *st;
499 av_init_packet(pkt);
500 ret= s->iformat->read_packet(s, pkt);
501 if (ret < 0)
502 return ret;
503 st= s->streams[pkt->stream_index];
504
505 switch(st->codec->codec_type){
506 case CODEC_TYPE_VIDEO:
507 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
508 break;
509 case CODEC_TYPE_AUDIO:
510 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
511 break;
512 case CODEC_TYPE_SUBTITLE:
513 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
514 break;
515 }
516
517 return ret;
518 }
519
520 /**********************************************************/
521
522 /**
523 * Get the number of samples of an audio frame. Return -1 on error.
524 */
525 static int get_audio_frame_size(AVCodecContext *enc, int size)
526 {
527 int frame_size;
528
529 if (enc->frame_size <= 1) {
530 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
531
532 if (bits_per_sample) {
533 if (enc->channels == 0)
534 return -1;
535 frame_size = (size << 3) / (bits_per_sample * enc->channels);
536 } else {
537 /* used for example by ADPCM codecs */
538 if (enc->bit_rate == 0)
539 return -1;
540 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
541 }
542 } else {
543 frame_size = enc->frame_size;
544 }
545 return frame_size;
546 }
547
548
549 /**
550 * Return the frame duration in seconds. Return 0 if not available.
551 */
552 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
553 AVCodecParserContext *pc, AVPacket *pkt)
554 {
555 int frame_size;
556
557 *pnum = 0;
558 *pden = 0;
559 switch(st->codec->codec_type) {
560 case CODEC_TYPE_VIDEO:
561 if(st->time_base.num*1000LL > st->time_base.den){
562 *pnum = st->time_base.num;
563 *pden = st->time_base.den;
564 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
565 *pnum = st->codec->time_base.num;
566 *pden = st->codec->time_base.den;
567 if (pc && pc->repeat_pict) {
568 *pden *= 2;
569 *pnum = (*pnum) * (2 + pc->repeat_pict);
570 }
571 }
572 break;
573 case CODEC_TYPE_AUDIO:
574 frame_size = get_audio_frame_size(st->codec, pkt->size);
575 if (frame_size < 0)
576 break;
577 *pnum = frame_size;
578 *pden = st->codec->sample_rate;
579 break;
580 default:
581 break;
582 }
583 }
584
585 static int is_intra_only(AVCodecContext *enc){
586 if(enc->codec_type == CODEC_TYPE_AUDIO){
587 return 1;
588 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
589 switch(enc->codec_id){
590 case CODEC_ID_MJPEG:
591 case CODEC_ID_MJPEGB:
592 case CODEC_ID_LJPEG:
593 case CODEC_ID_RAWVIDEO:
594 case CODEC_ID_DVVIDEO:
595 case CODEC_ID_HUFFYUV:
596 case CODEC_ID_FFVHUFF:
597 case CODEC_ID_ASV1:
598 case CODEC_ID_ASV2:
599 case CODEC_ID_VCR1:
600 return 1;
601 default: break;
602 }
603 }
604 return 0;
605 }
606
607 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
608 int64_t dts, int64_t pts)
609 {
610 AVStream *st= s->streams[stream_index];
611 AVPacketList *pktl= s->packet_buffer;
612
613 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)
614 return;
615
616 st->first_dts= dts - st->cur_dts;
617 st->cur_dts= dts;
618
619 for(; pktl; pktl= pktl->next){
620 if(pktl->pkt.stream_index != stream_index)
621 continue;
622 //FIXME think more about this check
623 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
624 pktl->pkt.pts += st->first_dts;
625
626 if(pktl->pkt.dts != AV_NOPTS_VALUE)
627 pktl->pkt.dts += st->first_dts;
628
629 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
630 st->start_time= pktl->pkt.pts;
631 }
632 if (st->start_time == AV_NOPTS_VALUE)
633 st->start_time = pts;
634 }
635
636 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
637 {
638 AVPacketList *pktl= s->packet_buffer;
639
640 assert(pkt->duration && !st->cur_dts);
641
642 for(; pktl; pktl= pktl->next){
643 if(pktl->pkt.stream_index != pkt->stream_index)
644 continue;
645 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
646 && !pktl->pkt.duration){
647 pktl->pkt.pts= pktl->pkt.dts= st->cur_dts;
648 st->cur_dts += pkt->duration;
649 pktl->pkt.duration= pkt->duration;
650 }else
651 break;
652 }
653 }
654
655 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
656 AVCodecParserContext *pc, AVPacket *pkt)
657 {
658 int num, den, presentation_delayed, delay, i;
659 int64_t offset;
660
661 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
662 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
663 pkt->dts -= 1LL<<st->pts_wrap_bits;
664 }
665
666 if (pkt->duration == 0) {
667 compute_frame_duration(&num, &den, st, pc, pkt);
668 if (den && num) {
669 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
670
671 if(st->cur_dts == 0 && pkt->duration != 0)
672 update_initial_durations(s, st, pkt);
673 }
674 }
675
676 /* correct timestamps with byte offset if demuxers only have timestamps
677 on packet boundaries */
678 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
679 /* this will estimate bitrate based on this frame's duration and size */
680 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
681 if(pkt->pts != AV_NOPTS_VALUE)
682 pkt->pts += offset;
683 if(pkt->dts != AV_NOPTS_VALUE)
684 pkt->dts += offset;
685 }
686
687 /* do we have a video B-frame ? */
688 delay= st->codec->has_b_frames;
689 presentation_delayed = 0;
690 /* XXX: need has_b_frame, but cannot get it if the codec is
691 not initialized */
692 if (delay &&
693 pc && pc->pict_type != FF_B_TYPE)
694 presentation_delayed = 1;
695 /* This may be redundant, but it should not hurt. */
696 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
697 presentation_delayed = 1;
698
699 if(st->cur_dts == AV_NOPTS_VALUE){
700 st->cur_dts = 0; //FIXME maybe set it to 0 during init
701 }
702
703 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
704 /* interpolate PTS and DTS if they are not present */
705 if(delay <=1){
706 if (presentation_delayed) {
707 /* DTS = decompression timestamp */
708 /* PTS = presentation timestamp */
709 if (pkt->dts == AV_NOPTS_VALUE)
710 pkt->dts = st->last_IP_pts;
711 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
712 if (pkt->dts == AV_NOPTS_VALUE)
713 pkt->dts = st->cur_dts;
714
715 /* this is tricky: the dts must be incremented by the duration
716 of the frame we are displaying, i.e. the last I- or P-frame */
717 if (st->last_IP_duration == 0)
718 st->last_IP_duration = pkt->duration;
719 st->cur_dts = pkt->dts + st->last_IP_duration;
720 st->last_IP_duration = pkt->duration;
721 st->last_IP_pts= pkt->pts;
722 /* cannot compute PTS if not present (we can compute it only
723 by knowing the future */
724 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
725 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
726 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
727 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
728 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
729 pkt->pts += pkt->duration;
730 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
731 }
732 }
733
734 /* presentation is not delayed : PTS and DTS are the same */
735 if(pkt->pts == AV_NOPTS_VALUE)
736 pkt->pts = pkt->dts;
737 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
738 if(pkt->pts == AV_NOPTS_VALUE)
739 pkt->pts = st->cur_dts;
740 pkt->dts = pkt->pts;
741 st->cur_dts = pkt->pts + pkt->duration;
742 }
743 }
744
745 if(pkt->pts != AV_NOPTS_VALUE){
746 st->pts_buffer[0]= pkt->pts;
747 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
748 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
749 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
750 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
751 if(pkt->dts == AV_NOPTS_VALUE)
752 pkt->dts= st->pts_buffer[0];
753 if(delay>1){
754 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
755 }
756 if(pkt->dts > st->cur_dts)
757 st->cur_dts = pkt->dts;
758 }
759
760 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
761
762 /* update flags */
763 if(is_intra_only(st->codec))
764 pkt->flags |= PKT_FLAG_KEY;
765 else if (pc) {
766 pkt->flags = 0;
767 /* keyframe computation */
768 if (pc->pict_type == FF_I_TYPE)
769 pkt->flags |= PKT_FLAG_KEY;
770 }
771 }
772
773 void av_destruct_packet_nofree(AVPacket *pkt)
774 {
775 pkt->data = NULL; pkt->size = 0;
776 }
777
778 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
779 {
780 AVStream *st;
781 int len, ret, i;
782
783 av_init_packet(pkt);
784
785 for(;;) {
786 /* select current input stream component */
787 st = s->cur_st;
788 if (st) {
789 if (!st->need_parsing || !st->parser) {
790 /* no parsing needed: we just output the packet as is */
791 /* raw data support */
792 *pkt = s->cur_pkt;
793 compute_pkt_fields(s, st, NULL, pkt);
794 s->cur_st = NULL;
795 break;
796 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
797 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
798 s->cur_ptr, s->cur_len,
799 s->cur_pkt.pts, s->cur_pkt.dts);
800 s->cur_pkt.pts = AV_NOPTS_VALUE;
801 s->cur_pkt.dts = AV_NOPTS_VALUE;
802 /* increment read pointer */
803 s->cur_ptr += len;
804 s->cur_len -= len;
805
806 /* return packet if any */
807 if (pkt->size) {
808 got_packet:
809 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
810 pkt->duration = 0;
811 pkt->stream_index = st->index;
812 pkt->pts = st->parser->pts;
813 pkt->dts = st->parser->dts;
814 pkt->destruct = av_destruct_packet_nofree;
815 compute_pkt_fields(s, st, st->parser, pkt);
816
817 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
818 ff_reduce_index(s, st->index);
819 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
820 0, 0, AVINDEX_KEYFRAME);
821 }
822
823 break;
824 }
825 } else {
826 /* free packet */
827 av_free_packet(&s->cur_pkt);
828 s->cur_st = NULL;
829 }
830 } else {
831 /* read next packet */
832 ret = av_read_packet(s, &s->cur_pkt);
833 if (ret < 0) {
834 if (ret == AVERROR(EAGAIN))
835 return ret;
836 /* return the last frames, if any */
837 for(i = 0; i < s->nb_streams; i++) {
838 st = s->streams[i];
839 if (st->parser && st->need_parsing) {
840 av_parser_parse(st->parser, st->codec,
841 &pkt->data, &pkt->size,
842 NULL, 0,
843 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
844 if (pkt->size)
845 goto got_packet;
846 }
847 }
848 /* no more packets: really terminate parsing */
849 return ret;
850 }
851
852 st = s->streams[s->cur_pkt.stream_index];
853 if(st->codec->debug & FF_DEBUG_PTS)
854 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
855 s->cur_pkt.stream_index,
856 s->cur_pkt.pts,
857 s->cur_pkt.dts,
858 s->cur_pkt.size);
859
860 s->cur_st = st;
861 s->cur_ptr = s->cur_pkt.data;
862 s->cur_len = s->cur_pkt.size;
863 if (st->need_parsing && !st->parser) {
864 st->parser = av_parser_init(st->codec->codec_id);
865 if (!st->parser) {
866 /* no parser available: just output the raw packets */
867 st->need_parsing = AVSTREAM_PARSE_NONE;
868 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
869 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
870 }
871 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
872 st->parser->last_frame_offset=
873 st->parser->cur_offset= s->cur_pkt.pos;
874 }
875 }
876 }
877 }
878 if(st->codec->debug & FF_DEBUG_PTS)
879 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
880 pkt->stream_index,
881 pkt->pts,
882 pkt->dts,
883 pkt->size);
884
885 return 0;
886 }
887
888 static AVPacket *add_to_pktbuf(AVFormatContext *s, AVPacket *pkt){
889 AVPacketList *pktl= s->packet_buffer;
890 AVPacketList **plast_pktl= &s->packet_buffer;
891
892 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
893
894 pktl = av_mallocz(sizeof(AVPacketList));
895 if (!pktl)
896 return NULL;
897
898 /* add the packet in the buffered packet list */
899 *plast_pktl = pktl;
900 pktl->pkt= *pkt;
901 return &pktl->pkt;
902 }
903
904 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
905 {
906 AVPacketList *pktl;
907 int eof=0;
908 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
909
910 for(;;){
911 pktl = s->packet_buffer;
912 if (pktl) {
913 AVPacket *next_pkt= &pktl->pkt;
914
915 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
916 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
917 if( pktl->pkt.stream_index == next_pkt->stream_index
918 && next_pkt->dts < pktl->pkt.dts
919 && pktl->pkt.pts != pktl->pkt.dts //not b frame
920 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
921 next_pkt->pts= pktl->pkt.dts;
922 }
923 pktl= pktl->next;
924 }
925 pktl = s->packet_buffer;
926 }
927
928 if( next_pkt->pts != AV_NOPTS_VALUE
929 || next_pkt->dts == AV_NOPTS_VALUE
930 || !genpts || eof){
931 /* read packet from packet buffer, if there is data */
932 *pkt = *next_pkt;
933 s->packet_buffer = pktl->next;
934 av_free(pktl);
935 return 0;
936 }
937 }
938 if(genpts){
939 int ret= av_read_frame_internal(s, pkt);
940 if(ret<0){
941 if(pktl && ret != AVERROR(EAGAIN)){
942 eof=1;
943 continue;
944 }else
945 return ret;
946 }
947
948 if(av_dup_packet(add_to_pktbuf(s, pkt)) < 0)
949 return AVERROR(ENOMEM);
950 }else{
951 assert(!s->packet_buffer);
952 return av_read_frame_internal(s, pkt);
953 }
954 }
955 }
956
957 /* XXX: suppress the packet queue */
958 static void flush_packet_queue(AVFormatContext *s)
959 {
960 AVPacketList *pktl;
961
962 for(;;) {
963 pktl = s->packet_buffer;
964 if (!pktl)
965 break;
966 s->packet_buffer = pktl->next;
967 av_free_packet(&pktl->pkt);
968 av_free(pktl);
969 }
970 }
971
972 /*******************************************************/
973 /* seek support */
974
975 int av_find_default_stream_index(AVFormatContext *s)
976 {
977 int i;
978 AVStream *st;
979
980 if (s->nb_streams <= 0)
981 return -1;
982 for(i = 0; i < s->nb_streams; i++) {
983 st = s->streams[i];
984 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
985 return i;
986 }
987 }
988 return 0;
989 }
990
991 /**
992 * Flush the frame reader.
993 */
994 static void av_read_frame_flush(AVFormatContext *s)
995 {
996 AVStream *st;
997 int i;
998
999 flush_packet_queue(s);
1000
1001 /* free previous packet */
1002 if (s->cur_st) {
1003 if (s->cur_st->parser)
1004 av_free_packet(&s->cur_pkt);
1005 s->cur_st = NULL;
1006 }
1007 /* fail safe */
1008 s->cur_ptr = NULL;
1009 s->cur_len = 0;
1010
1011 /* for each stream, reset read state */
1012 for(i = 0; i < s->nb_streams; i++) {
1013 st = s->streams[i];
1014
1015 if (st->parser) {
1016 av_parser_close(st->parser);
1017 st->parser = NULL;
1018 }
1019 st->last_IP_pts = AV_NOPTS_VALUE;
1020 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1021 }
1022 }
1023
1024 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1025 int i;
1026
1027 for(i = 0; i < s->nb_streams; i++) {
1028 AVStream *st = s->streams[i];
1029
1030 st->cur_dts = av_rescale(timestamp,
1031 st->time_base.den * (int64_t)ref_st->time_base.num,
1032 st->time_base.num * (int64_t)ref_st->time_base.den);
1033 }
1034 }
1035
1036 void ff_reduce_index(AVFormatContext *s, int stream_index)
1037 {
1038 AVStream *st= s->streams[stream_index];
1039 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1040
1041 if((unsigned)st->nb_index_entries >= max_entries){
1042 int i;
1043 for(i=0; 2*i<st->nb_index_entries; i++)
1044 st->index_entries[i]= st->index_entries[2*i];
1045 st->nb_index_entries= i;
1046 }
1047 }
1048
1049 int av_add_index_entry(AVStream *st,
1050 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1051 {
1052 AVIndexEntry *entries, *ie;
1053 int index;
1054
1055 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1056 return -1;
1057
1058 entries = av_fast_realloc(st->index_entries,
1059 &st->index_entries_allocated_size,
1060 (st->nb_index_entries + 1) *
1061 sizeof(AVIndexEntry));
1062 if(!entries)
1063 return -1;
1064
1065 st->index_entries= entries;
1066
1067 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1068
1069 if(index<0){
1070 index= st->nb_index_entries++;
1071 ie= &entries[index];
1072 assert(index==0 || ie[-1].timestamp < timestamp);
1073 }else{
1074 ie= &entries[index];
1075 if(ie->timestamp != timestamp){
1076 if(ie->timestamp <= timestamp)
1077 return -1;
1078 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1079 st->nb_index_entries++;
1080 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1081 distance= ie->min_distance;
1082 }
1083
1084 ie->pos = pos;
1085 ie->timestamp = timestamp;
1086 ie->min_distance= distance;
1087 ie->size= size;
1088 ie->flags = flags;
1089
1090 return index;
1091 }
1092
1093 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1094 int flags)
1095 {
1096 AVIndexEntry *entries= st->index_entries;
1097 int nb_entries= st->nb_index_entries;
1098 int a, b, m;
1099 int64_t timestamp;
1100
1101 a = - 1;
1102 b = nb_entries;
1103
1104 while (b - a > 1) {
1105 m = (a + b) >> 1;
1106 timestamp = entries[m].timestamp;
1107 if(timestamp >= wanted_timestamp)
1108 b = m;
1109 if(timestamp <= wanted_timestamp)
1110 a = m;
1111 }
1112 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1113
1114 if(!(flags & AVSEEK_FLAG_ANY)){
1115 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1116 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1117 }
1118 }
1119
1120 if(m == nb_entries)
1121 return -1;
1122 return m;
1123 }
1124
1125 #define DEBUG_SEEK
1126
1127 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1128 AVInputFormat *avif= s->iformat;
1129 int64_t pos_min, pos_max, pos, pos_limit;
1130 int64_t ts_min, ts_max, ts;
1131 int index;
1132 AVStream *st;
1133
1134 if (stream_index < 0)
1135 return -1;
1136
1137 #ifdef DEBUG_SEEK
1138 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1139 #endif
1140
1141 ts_max=
1142 ts_min= AV_NOPTS_VALUE;
1143 pos_limit= -1; //gcc falsely says it may be uninitialized
1144
1145 st= s->streams[stream_index];
1146 if(st->index_entries){
1147 AVIndexEntry *e;
1148
1149 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1150 index= FFMAX(index, 0);
1151 e= &st->index_entries[index];
1152
1153 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1154 pos_min= e->pos;
1155 ts_min= e->timestamp;
1156 #ifdef DEBUG_SEEK
1157 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1158 pos_min,ts_min);
1159 #endif
1160 }else{
1161 assert(index==0);
1162 }
1163
1164 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1165 assert(index < st->nb_index_entries);
1166 if(index >= 0){
1167 e= &st->index_entries[index];
1168 assert(e->timestamp >= target_ts);
1169 pos_max= e->pos;
1170 ts_max= e->timestamp;
1171 pos_limit= pos_max - e->min_distance;
1172 #ifdef DEBUG_SEEK
1173 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1174 pos_max,pos_limit, ts_max);
1175 #endif
1176 }
1177 }
1178
1179 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1180 if(pos<0)
1181 return -1;
1182
1183 /* do the seek */
1184 url_fseek(s->pb, pos, SEEK_SET);
1185
1186 av_update_cur_dts(s, st, ts);
1187
1188 return 0;
1189 }
1190
1191 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1192 int64_t pos, ts;
1193 int64_t start_pos, filesize;
1194 int no_change;
1195
1196 #ifdef DEBUG_SEEK
1197 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1198 #endif
1199
1200 if(ts_min == AV_NOPTS_VALUE){
1201 pos_min = s->data_offset;
1202 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1203 if (ts_min == AV_NOPTS_VALUE)
1204 return -1;
1205 }
1206
1207 if(ts_max == AV_NOPTS_VALUE){
1208 int step= 1024;
1209 filesize = url_fsize(s->pb);
1210 pos_max = filesize - 1;
1211 do{
1212 pos_max -= step;
1213 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1214 step += step;
1215 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1216 if (ts_max == AV_NOPTS_VALUE)
1217 return -1;
1218
1219 for(;;){
1220 int64_t tmp_pos= pos_max + 1;
1221 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1222 if(tmp_ts == AV_NOPTS_VALUE)
1223 break;
1224 ts_max= tmp_ts;
1225 pos_max= tmp_pos;
1226 if(tmp_pos >= filesize)
1227 break;
1228 }
1229 pos_limit= pos_max;
1230 }
1231
1232 if(ts_min > ts_max){
1233 return -1;
1234 }else if(ts_min == ts_max){
1235 pos_limit= pos_min;
1236 }
1237
1238 no_change=0;
1239 while (pos_min < pos_limit) {
1240 #ifdef DEBUG_SEEK
1241 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1242 pos_min, pos_max,
1243 ts_min, ts_max);
1244 #endif
1245 assert(pos_limit <= pos_max);
1246
1247 if(no_change==0){
1248 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1249 // interpolate position (better than dichotomy)
1250 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1251 + pos_min - approximate_keyframe_distance;
1252 }else if(no_change==1){
1253 // bisection, if interpolation failed to change min or max pos last time
1254 pos = (pos_min + pos_limit)>>1;
1255 }else{
1256 /* linear search if bisection failed, can only happen if there
1257 are very few or no keyframes between min/max */
1258 pos=pos_min;
1259 }
1260 if(pos <= pos_min)
1261 pos= pos_min + 1;
1262 else if(pos > pos_limit)
1263 pos= pos_limit;
1264 start_pos= pos;
1265
1266 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1267 if(pos == pos_max)
1268 no_change++;
1269 else
1270 no_change=0;
1271 #ifdef DEBUG_SEEK
1272 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1273 #endif
1274 if(ts == AV_NOPTS_VALUE){
1275 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1276 return -1;
1277 }
1278 assert(ts != AV_NOPTS_VALUE);
1279 if (target_ts <= ts) {
1280 pos_limit = start_pos - 1;
1281 pos_max = pos;
1282 ts_max = ts;
1283 }
1284 if (target_ts >= ts) {
1285 pos_min = pos;
1286 ts_min = ts;
1287 }
1288 }
1289
1290 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1291 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1292 #ifdef DEBUG_SEEK
1293 pos_min = pos;
1294 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1295 pos_min++;
1296 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1297 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1298 pos, ts_min, target_ts, ts_max);
1299 #endif
1300 *ts_ret= ts;
1301 return pos;
1302 }
1303
1304 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1305 int64_t pos_min, pos_max;
1306 #if 0
1307 AVStream *st;
1308
1309 if (stream_index < 0)
1310 return -1;
1311
1312 st= s->streams[stream_index];
1313 #endif
1314
1315 pos_min = s->data_offset;
1316 pos_max = url_fsize(s->pb) - 1;
1317
1318 if (pos < pos_min) pos= pos_min;
1319 else if(pos > pos_max) pos= pos_max;
1320
1321 url_fseek(s->pb, pos, SEEK_SET);
1322
1323 #if 0
1324 av_update_cur_dts(s, st, ts);
1325 #endif
1326 return 0;
1327 }
1328
1329 static int av_seek_frame_generic(AVFormatContext *s,
1330 int stream_index, int64_t timestamp, int flags)
1331 {
1332 int index;
1333 AVStream *st;
1334 AVIndexEntry *ie;
1335
1336 st = s->streams[stream_index];
1337
1338 index = av_index_search_timestamp(st, timestamp, flags);
1339
1340 if(index < 0 || index==st->nb_index_entries-1){
1341 int i;
1342 AVPacket pkt;
1343
1344 if(st->index_entries && st->nb_index_entries){
1345 ie= &st->index_entries[st->nb_index_entries-1];
1346 url_fseek(s->pb, ie->pos, SEEK_SET);
1347 av_update_cur_dts(s, st, ie->timestamp);
1348 }else
1349 url_fseek(s->pb, 0, SEEK_SET);
1350
1351 for(i=0;; i++) {
1352 int ret = av_read_frame(s, &pkt);
1353 if(ret<0)
1354 break;
1355 av_free_packet(&pkt);
1356 if(stream_index == pkt.stream_index){
1357 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1358 break;
1359 }
1360 }
1361 index = av_index_search_timestamp(st, timestamp, flags);
1362 }
1363 if (index < 0)
1364 return -1;
1365
1366 av_read_frame_flush(s);
1367 if (s->iformat->read_seek){
1368 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1369 return 0;
1370 }
1371 ie = &st->index_entries[index];
1372 url_fseek(s->pb, ie->pos, SEEK_SET);
1373
1374 av_update_cur_dts(s, st, ie->timestamp);
1375
1376 return 0;
1377 }
1378
1379 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1380 {
1381 int ret;
1382 AVStream *st;
1383
1384 av_read_frame_flush(s);
1385
1386 if(flags & AVSEEK_FLAG_BYTE)
1387 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1388
1389 if(stream_index < 0){
1390 stream_index= av_find_default_stream_index(s);
1391 if(stream_index < 0)
1392 return -1;
1393
1394 st= s->streams[stream_index];
1395 /* timestamp for default must be expressed in AV_TIME_BASE units */
1396 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1397 }
1398 st= s->streams[stream_index];
1399
1400 /* first, we try the format specific seek */
1401 if (s->iformat->read_seek)
1402 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1403 else
1404 ret = -1;
1405 if (ret >= 0) {
1406 return 0;
1407 }
1408
1409 if(s->iformat->read_timestamp)
1410 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1411 else
1412 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1413 }
1414
1415 /*******************************************************/
1416
1417 /**
1418 * Returns TRUE if the stream has accurate duration in any stream.
1419 *
1420 * @return TRUE if the stream has accurate duration for at least one component.
1421 */
1422 static int av_has_duration(AVFormatContext *ic)
1423 {
1424 int i;
1425 AVStream *st;
1426
1427 for(i = 0;i < ic->nb_streams; i++) {
1428 st = ic->streams[i];
1429 if (st->duration != AV_NOPTS_VALUE)
1430 return 1;
1431 }
1432 return 0;
1433 }
1434
1435 /**
1436 * Estimate the stream timings from the one of each components.
1437 *
1438 * Also computes the global bitrate if possible.
1439 */
1440 static void av_update_stream_timings(AVFormatContext *ic)
1441 {
1442 int64_t start_time, start_time1, end_time, end_time1;
1443 int64_t duration, duration1;
1444 int i;
1445 AVStream *st;
1446
1447 start_time = INT64_MAX;
1448 end_time = INT64_MIN;
1449 duration = INT64_MIN;
1450 for(i = 0;i < ic->nb_streams; i++) {
1451 st = ic->streams[i];
1452 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1453 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1454 if (start_time1 < start_time)
1455 start_time = start_time1;
1456 if (st->duration != AV_NOPTS_VALUE) {
1457 end_time1 = start_time1
1458 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1459 if (end_time1 > end_time)
1460 end_time = end_time1;
1461 }
1462 }
1463 if (st->duration != AV_NOPTS_VALUE) {
1464 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1465 if (duration1 > duration)
1466 duration = duration1;
1467 }
1468 }
1469 if (start_time != INT64_MAX) {
1470 ic->start_time = start_time;
1471 if (end_time != INT64_MIN) {
1472 if (end_time - start_time > duration)
1473 duration = end_time - start_time;
1474 }
1475 }
1476 if (duration != INT64_MIN) {
1477 ic->duration = duration;
1478 if (ic->file_size > 0) {
1479 /* compute the bitrate */
1480 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1481 (double)ic->duration;
1482 }
1483 }
1484 }
1485
1486 static void fill_all_stream_timings(AVFormatContext *ic)
1487 {
1488 int i;
1489 AVStream *st;
1490
1491 av_update_stream_timings(ic);
1492 for(i = 0;i < ic->nb_streams; i++) {
1493 st = ic->streams[i];
1494 if (st->start_time == AV_NOPTS_VALUE) {
1495 if(ic->start_time != AV_NOPTS_VALUE)
1496 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1497 if(ic->duration != AV_NOPTS_VALUE)
1498 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1499 }
1500 }
1501 }
1502
1503 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1504 {
1505 int64_t filesize, duration;
1506 int bit_rate, i;
1507 AVStream *st;
1508
1509 /* if bit_rate is already set, we believe it */
1510 if (ic->bit_rate == 0) {
1511 bit_rate = 0;
1512 for(i=0;i<ic->nb_streams;i++) {
1513 st = ic->streams[i];
1514 bit_rate += st->codec->bit_rate;
1515 }
1516 ic->bit_rate = bit_rate;
1517 }
1518
1519 /* if duration is already set, we believe it */
1520 if (ic->duration == AV_NOPTS_VALUE &&
1521 ic->bit_rate != 0 &&
1522 ic->file_size != 0) {
1523 filesize = ic->file_size;
1524 if (filesize > 0) {
1525 for(i = 0; i < ic->nb_streams; i++) {
1526 st = ic->streams[i];
1527 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1528 if (st->duration == AV_NOPTS_VALUE)
1529 st->duration = duration;
1530 }
1531 }
1532 }
1533 }
1534
1535 #define DURATION_MAX_READ_SIZE 250000
1536
1537 /* only usable for MPEG-PS streams */
1538 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1539 {
1540 AVPacket pkt1, *pkt = &pkt1;
1541 AVStream *st;
1542 int read_size, i, ret;
1543 int64_t end_time;
1544 int64_t filesize, offset, duration;
1545
1546 /* free previous packet */
1547 if (ic->cur_st && ic->cur_st->parser)
1548 av_free_packet(&ic->cur_pkt);
1549 ic->cur_st = NULL;
1550
1551 /* flush packet queue */
1552 flush_packet_queue(ic);
1553
1554 for(i=0;i<ic->nb_streams;i++) {
1555 st = ic->streams[i];
1556 if (st->parser) {
1557 av_parser_close(st->parser);
1558 st->parser= NULL;
1559 }
1560 }
1561
1562 /* we read the first packets to get the first PTS (not fully
1563 accurate, but it is enough now) */
1564 url_fseek(ic->pb, 0, SEEK_SET);
1565 read_size = 0;
1566 for(;;) {
1567 if (read_size >= DURATION_MAX_READ_SIZE)
1568 break;
1569 /* if all info is available, we can stop */
1570 for(i = 0;i < ic->nb_streams; i++) {
1571 st = ic->streams[i];
1572 if (st->start_time == AV_NOPTS_VALUE)
1573 break;
1574 }
1575 if (i == ic->nb_streams)
1576 break;
1577
1578 ret = av_read_packet(ic, pkt);
1579 if (ret != 0)
1580 break;
1581 read_size += pkt->size;
1582 st = ic->streams[pkt->stream_index];
1583 if (pkt->pts != AV_NOPTS_VALUE) {
1584 if (st->start_time == AV_NOPTS_VALUE)
1585 st->start_time = pkt->pts;
1586 }
1587 av_free_packet(pkt);
1588 }
1589
1590 /* estimate the end time (duration) */
1591 /* XXX: may need to support wrapping */
1592 filesize = ic->file_size;
1593 offset = filesize - DURATION_MAX_READ_SIZE;
1594 if (offset < 0)
1595 offset = 0;
1596
1597 url_fseek(ic->pb, offset, SEEK_SET);
1598 read_size = 0;
1599 for(;;) {
1600 if (read_size >= DURATION_MAX_READ_SIZE)
1601 break;
1602
1603 ret = av_read_packet(ic, pkt);
1604 if (ret != 0)
1605 break;
1606 read_size += pkt->size;
1607 st = ic->streams[pkt->stream_index];
1608 if (pkt->pts != AV_NOPTS_VALUE &&
1609 st->start_time != AV_NOPTS_VALUE) {
1610 end_time = pkt->pts;
1611 duration = end_time - st->start_time;
1612 if (duration > 0) {
1613 if (st->duration == AV_NOPTS_VALUE ||
1614 st->duration < duration)
1615 st->duration = duration;
1616 }
1617 }
1618 av_free_packet(pkt);
1619 }
1620
1621 fill_all_stream_timings(ic);
1622
1623 url_fseek(ic->pb, old_offset, SEEK_SET);
1624 for(i=0; i<ic->nb_streams; i++){
1625 st= ic->streams[i];
1626 st->cur_dts= st->first_dts;
1627 st->last_IP_pts = AV_NOPTS_VALUE;
1628 }
1629 }
1630
1631 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1632 {
1633 int64_t file_size;
1634
1635 /* get the file size, if possible */
1636 if (ic->iformat->flags & AVFMT_NOFILE) {
1637 file_size = 0;
1638 } else {
1639 file_size = url_fsize(ic->pb);
1640 if (file_size < 0)
1641 file_size = 0;
1642 }
1643 ic->file_size = file_size;
1644
1645 if ((!strcmp(ic->iformat->name, "mpeg") ||
1646 !strcmp(ic->iformat->name, "mpegts")) &&
1647 file_size && !url_is_streamed(ic->pb)) {
1648 /* get accurate estimate from the PTSes */
1649 av_estimate_timings_from_pts(ic, old_offset);
1650 } else if (av_has_duration(ic)) {
1651 /* at least one component has timings - we use them for all
1652 the components */
1653 fill_all_stream_timings(ic);
1654 } else {
1655 /* less precise: use bitrate info */
1656 av_estimate_timings_from_bit_rate(ic);
1657 }
1658 av_update_stream_timings(ic);
1659
1660 #if 0
1661 {
1662 int i;
1663 AVStream *st;
1664 for(i = 0;i < ic->nb_streams; i++) {
1665 st = ic->streams[i];
1666 printf("%d: start_time: %0.3f duration: %0.3f\n",
1667 i, (double)st->start_time / AV_TIME_BASE,
1668 (double)st->duration / AV_TIME_BASE);
1669 }
1670 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1671 (double)ic->start_time / AV_TIME_BASE,
1672 (double)ic->duration / AV_TIME_BASE,
1673 ic->bit_rate / 1000);
1674 }
1675 #endif
1676 }
1677
1678 static int has_codec_parameters(AVCodecContext *enc)
1679 {
1680 int val;
1681 switch(enc->codec_type) {
1682 case CODEC_TYPE_AUDIO:
1683 val = enc->sample_rate;
1684 break;
1685 case CODEC_TYPE_VIDEO:
1686 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1687 break;
1688 default:
1689 val = 1;
1690 break;
1691 }
1692 return (enc->codec_id != CODEC_ID_NONE && val != 0);
1693 }
1694
1695 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1696 {
1697 int16_t *samples;
1698 AVCodec *codec;
1699 int got_picture, data_size, ret=0;
1700 AVFrame picture;
1701
1702 if(!st->codec->codec){
1703 codec = avcodec_find_decoder(st->codec->codec_id);
1704 if (!codec)
1705 return -1;
1706 ret = avcodec_open(st->codec, codec);
1707 if (ret < 0)
1708 return ret;
1709 }
1710
1711 if(!has_codec_parameters(st->codec)){
1712 switch(st->codec->codec_type) {
1713 case CODEC_TYPE_VIDEO:
1714 ret = avcodec_decode_video(st->codec, &picture,
1715 &got_picture, data, size);
1716 break;
1717 case CODEC_TYPE_AUDIO:
1718 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1719 samples = av_malloc(data_size);
1720 if (!samples)
1721 goto fail;
1722 ret = avcodec_decode_audio2(st->codec, samples,
1723 &data_size, data, size);
1724 av_free(samples);
1725 break;
1726 default:
1727 break;
1728 }
1729 }
1730 fail:
1731 return ret;
1732 }
1733
1734 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
1735 {
1736 AVInputFormat *fmt;
1737 fmt = av_probe_input_format2(pd, 1, &score);
1738
1739 if (fmt) {
1740 if (strncmp(fmt->name, "mp3", 3) == 0)
1741 st->codec->codec_id = CODEC_ID_MP3;
1742 else if (strncmp(fmt->name, "ac3", 3) == 0)
1743 st->codec->codec_id = CODEC_ID_AC3;
1744 }
1745 return !!fmt;
1746 }
1747
1748 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1749 {
1750 while (tags->id != CODEC_ID_NONE) {
1751 if (tags->id == id)
1752 return tags->tag;
1753 tags++;
1754 }
1755 return 0;
1756 }
1757
1758 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1759 {
1760 int i;
1761 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1762 if(tag == tags[i].tag)
1763 return tags[i].id;
1764 }
1765 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1766 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1767 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1768 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1769 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1770 return tags[i].id;
1771 }
1772 return CODEC_ID_NONE;
1773 }
1774
1775 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1776 {
1777 int i;
1778 for(i=0; tags && tags[i]; i++){
1779 int tag= codec_get_tag(tags[i], id);
1780 if(tag) return tag;
1781 }
1782 return 0;
1783 }
1784
1785 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1786 {
1787 int i;
1788 for(i=0; tags && tags[i]; i++){
1789 enum CodecID id= codec_get_id(tags[i], tag);
1790 if(id!=CODEC_ID_NONE) return id;
1791 }
1792 return CODEC_ID_NONE;
1793 }
1794
1795 /* absolute maximum size we read until we abort */
1796 #define MAX_READ_SIZE 5000000
1797
1798 #define MAX_STD_TIMEBASES (60*12+5)
1799 static int get_std_framerate(int i){
1800 if(i<60*12) return i*1001;
1801 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1802 }
1803
1804 /*
1805 * Is the time base unreliable.
1806 * This is a heuristic to balance between quick acceptance of the values in
1807 * the headers vs. some extra checks.
1808 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1809 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1810 * And there are "variable" fps files this needs to detect as well.
1811 */
1812 static int tb_unreliable(AVCodecContext *c){
1813 if( c->time_base.den >= 101L*c->time_base.num
1814 || c->time_base.den < 5L*c->time_base.num
1815 /* || c->codec_tag == ff_get_fourcc("DIVX")
1816 || c->codec_tag == ff_get_fourcc("XVID")*/
1817 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1818 return 1;
1819 return 0;
1820 }
1821
1822 int av_find_stream_info(AVFormatContext *ic)
1823 {
1824 int i, count, ret, read_size, j;
1825 AVStream *st;
1826 AVPacket pkt1, *pkt;
1827 int64_t last_dts[MAX_STREAMS];
1828 int duration_count[MAX_STREAMS]={0};
1829 double (*duration_error)[MAX_STD_TIMEBASES];
1830 offset_t old_offset = url_ftell(ic->pb);
1831 int64_t codec_info_duration[MAX_STREAMS]={0};
1832 int codec_info_nb_frames[MAX_STREAMS]={0};
1833 AVProbeData probe_data[MAX_STREAMS];
1834 int codec_identified[MAX_STREAMS]={0};
1835
1836 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1837 if (!duration_error) return AVERROR(ENOMEM);
1838
1839 for(i=0;i<ic->nb_streams;i++) {
1840 st = ic->streams[i];
1841 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1842 /* if(!st->time_base.num)
1843 st->time_base= */
1844 if(!st->codec->time_base.num)
1845 st->codec->time_base= st->time_base;
1846 }
1847 //only for the split stuff
1848 if (!st->parser) {
1849 st->parser = av_parser_init(st->codec->codec_id);
1850 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1851 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1852 }
1853 }
1854 }
1855
1856 for(i=0;i<MAX_STREAMS;i++){
1857 last_dts[i]= AV_NOPTS_VALUE;
1858 }
1859
1860 memset(probe_data, 0, sizeof(probe_data));
1861 count = 0;
1862 read_size = 0;
1863 for(;;) {
1864 /* check if one codec still needs to be handled */
1865 for(i=0;i<ic->nb_streams;i++) {
1866 st = ic->streams[i];
1867 if (!has_codec_parameters(st->codec))
1868 break;
1869 /* variable fps and no guess at the real fps */
1870 if( tb_unreliable(st->codec)
1871 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1872 break;
1873 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1874 break;
1875 if(st->first_dts == AV_NOPTS_VALUE)
1876 break;
1877 }
1878 if (i == ic->nb_streams) {
1879 /* NOTE: if the format has no header, then we need to read
1880 some packets to get most of the streams, so we cannot
1881 stop here */
1882 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1883 /* if we found the info for all the codecs, we can stop */
1884 ret = count;
1885 break;
1886 }
1887 }
1888 /* we did not get all the codec info, but we read too much data */
1889 if (read_size >= MAX_READ_SIZE) {
1890 ret = count;
1891 break;
1892 }
1893
1894 /* NOTE: a new stream can be added there if no header in file
1895 (AVFMTCTX_NOHEADER) */
1896 ret = av_read_frame_internal(ic, &pkt1);
1897 if (ret < 0) {
1898 /* EOF or error */
1899 ret = -1; /* we could not have all the codec parameters before EOF */
1900 for(i=0;i<ic->nb_streams;i++) {
1901 st = ic->streams[i];
1902 if (!has_codec_parameters(st->codec)){
1903 char buf[256];
1904 avcodec_string(buf, sizeof(buf), st->codec, 0);
1905 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1906 } else {
1907 ret = 0;
1908 }
1909 }
1910 break;
1911 }
1912
1913 pkt= add_to_pktbuf(ic, &pkt1);
1914 if(av_dup_packet(pkt) < 0)
1915 return AVERROR(ENOMEM);
1916
1917 read_size += pkt->size;
1918
1919 st = ic->streams[pkt->stream_index];
1920 if(codec_info_nb_frames[st->index]>1)
1921 codec_info_duration[st->index] += pkt->duration;
1922 if (pkt->duration != 0)
1923 codec_info_nb_frames[st->index]++;
1924
1925 {
1926 int index= pkt->stream_index;
1927 int64_t last= last_dts[index];
1928 int64_t duration= pkt->dts - last;
1929
1930 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1931 double dur= duration * av_q2d(st->time_base);
1932
1933 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1934 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1935 if(duration_count[index] < 2)
1936 memset(duration_error[index], 0, sizeof(*duration_error));
1937 for(i=1; i<MAX_STD_TIMEBASES; i++){
1938 int framerate= get_std_framerate(i);
1939 int ticks= lrintf(dur*framerate/(1001*12));
1940 double error= dur - ticks*1001*12/(double)framerate;
1941 duration_error[index][i] += error*error;
1942 }
1943 duration_count[index]++;
1944 }
1945 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1946 last_dts[pkt->stream_index]= pkt->dts;
1947
1948 if (st->codec->codec_id == CODEC_ID_NONE) {
1949 AVProbeData *pd = &(probe_data[st->index]);
1950 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
1951 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
1952 pd->buf_size += pkt->size;
1953 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
1954 }
1955 }
1956 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1957 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1958 if(i){
1959 st->codec->extradata_size= i;
1960 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1961 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1962 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1963 }
1964 }
1965
1966 /* if still no information, we try to open the codec and to
1967 decompress the frame. We try to avoid that in most cases as
1968 it takes longer and uses more memory. For MPEG-4, we need to
1969 decompress for QuickTime. */
1970 if (!has_codec_parameters(st->codec) /*&&
1971 (st->codec->codec_id == CODEC_ID_FLV1 ||
1972 st->codec->codec_id == CODEC_ID_H264 ||
1973 st->codec->codec_id == CODEC_ID_H263 ||
1974 st->codec->codec_id == CODEC_ID_H261 ||
1975 st->codec->codec_id == CODEC_ID_VORBIS ||
1976 st->codec->codec_id == CODEC_ID_MJPEG ||
1977 st->codec->codec_id == CODEC_ID_PNG ||
1978 st->codec->codec_id == CODEC_ID_PAM ||
1979 st->codec->codec_id == CODEC_ID_PGM ||
1980 st->codec->codec_id == CODEC_ID_PGMYUV ||
1981 st->codec->codec_id == CODEC_ID_PBM ||
1982 st->codec->codec_id == CODEC_ID_PPM ||
1983 st->codec->codec_id == CODEC_ID_SHORTEN ||
1984 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1985 try_decode_frame(st, pkt->data, pkt->size);
1986
1987 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1988 break;
1989 }
1990 count++;
1991 }
1992
1993 // close codecs which were opened in try_decode_frame()
1994 for(i=0;i<ic->nb_streams;i++) {
1995 st = ic->streams[i];
1996 if(st->codec->codec)
1997 avcodec_close(st->codec);
1998 }
1999 for(i=0;i<ic->nb_streams;i++) {
2000 st = ic->streams[i];
2001 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2002 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2003 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2004
2005 if(duration_count[i]
2006 && tb_unreliable(st->codec) /*&&
2007 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2008 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2009 double best_error= 2*av_q2d(st->time_base);
2010 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2011
2012 for(j=1; j<MAX_STD_TIMEBASES; j++){
2013 double error= duration_error[i][j] * get_std_framerate(j);
2014 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2015 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2016 if(error < best_error){
2017 best_error= error;
2018 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2019 }
2020 }
2021 }
2022
2023 if (!st->r_frame_rate.num){
2024 if( st->codec->time_base.den * (int64_t)st->time_base.num
2025 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2026 st->r_frame_rate.num = st->codec->time_base.den;
2027 st->r_frame_rate.den = st->codec->time_base.num;
2028 }else{
2029 st->r_frame_rate.num = st->time_base.den;
2030 st->r_frame_rate.den = st->time_base.num;
2031 }
2032 }
2033 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2034 if (st->codec->codec_id == CODEC_ID_NONE && probe_data[st->index].buf_size > 0) {
2035 codec_identified[st->index] = set_codec_from_probe_data(st, &(probe_data[st->index]), 1);
2036 if (codec_identified[st->index]) {
2037 st->need_parsing = AVSTREAM_PARSE_FULL;
2038 }
2039 }
2040 if(!st->codec->bits_per_sample)
2041 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2042 }
2043 }
2044
2045 av_estimate_timings(ic, old_offset);
2046
2047 for(i=0;i<ic->nb_streams;i++) {
2048 st = ic->streams[i];
2049 if (codec_identified[st->index])
2050 break;
2051 }
2052 //FIXME this is a mess
2053 if(i!=ic->nb_streams){
2054 av_read_frame_flush(ic);
2055 for(i=0;i<ic->nb_streams;i++) {
2056 st = ic->streams[i];
2057 if (codec_identified[st->index]) {
2058 av_seek_frame(ic, st->index, 0.0, 0);
2059 }
2060 st->cur_dts= st->first_dts;
2061 }
2062 url_fseek(ic->pb, ic->data_offset, SEEK_SET);
2063 }
2064
2065 #if 0
2066 /* correct DTS for B-frame streams with no timestamps */
2067 for(i=0;i<ic->nb_streams;i++) {
2068 st = ic->streams[i];
2069 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2070 if(b-frames){
2071 ppktl = &ic->packet_buffer;
2072 while(ppkt1){
2073 if(ppkt1->stream_index != i)
2074 continue;
2075 if(ppkt1->pkt->dts < 0)
2076 break;
2077 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2078 break;
2079 ppkt1->pkt->dts -= delta;
2080 ppkt1= ppkt1->next;
2081 }
2082 if(ppkt1)
2083 continue;
2084 st->cur_dts -= delta;
2085 }
2086 }
2087 }
2088 #endif
2089
2090 av_free(duration_error);
2091 for(i=0;i<MAX_STREAMS;i++){
2092 av_freep(&(probe_data[i].buf));
2093 }
2094
2095 return ret;
2096 }
2097
2098 /*******************************************************/
2099
2100 int av_read_play(AVFormatContext *s)
2101 {
2102 if (s->iformat->read_play)
2103 return s->iformat->read_play(s);
2104 if (s->pb)
2105 return av_url_read_fpause(s->pb, 0);
2106 return AVERROR(ENOSYS);
2107 }
2108
2109 int av_read_pause(AVFormatContext *s)
2110 {
2111 if (s->iformat->read_pause)
2112 return s->iformat->read_pause(s);
2113 if (s->pb)
2114 return av_url_read_fpause(s->pb, 1);
2115 return AVERROR(ENOSYS);
2116 }
2117
2118 void av_close_input_stream(AVFormatContext *s)
2119 {
2120 int i;
2121 AVStream *st;
2122
2123 /* free previous packet */
2124 if (s->cur_st && s->cur_st->parser)
2125 av_free_packet(&s->cur_pkt);
2126
2127 if (s->iformat->read_close)
2128 s->iformat->read_close(s);
2129 for(i=0;i<s->nb_streams;i++) {
2130 /* free all data in a stream component */
2131 st = s->streams[i];
2132 if (st->parser) {
2133 av_parser_close(st->parser);
2134 }
2135 av_free(st->index_entries);
2136 av_free(st->codec->extradata);
2137 av_free(st->codec);
2138 av_free(st->filename);
2139 av_free(st);
2140 }
2141 for(i=s->nb_programs-1; i>=0; i--) {
2142 av_freep(&s->programs[i]->provider_name);
2143 av_freep(&s->programs[i]->name);
2144 av_freep(&s->programs[i]->stream_index);
2145 av_freep(&s->programs[i]);
2146 }
2147 flush_packet_queue(s);
2148 av_freep(&s->priv_data);
2149 av_free(s);
2150 }
2151
2152 void av_close_input_file(AVFormatContext *s)
2153 {
2154 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2155 av_close_input_stream(s);
2156 if (pb)
2157 url_fclose(pb);
2158 }
2159
2160 AVStream *av_new_stream(AVFormatContext *s, int id)
2161 {
2162 AVStream *st;
2163 int i;
2164
2165 if (s->nb_streams >= MAX_STREAMS)
2166 return NULL;
2167
2168 st = av_mallocz(sizeof(AVStream));
2169 if (!st)
2170 return NULL;
2171
2172 st->codec= avcodec_alloc_context();
2173 if (s->iformat) {
2174 /* no default bitrate if decoding */
2175 st->codec->bit_rate = 0;
2176 }
2177 st->index = s->nb_streams;
2178 st->id = id;
2179 st->start_time = AV_NOPTS_VALUE;
2180 st->duration = AV_NOPTS_VALUE;
2181 st->cur_dts = AV_NOPTS_VALUE;
2182 st->first_dts = AV_NOPTS_VALUE;
2183
2184 /* default pts setting is MPEG-like */
2185 av_set_pts_info(st, 33, 1, 90000);
2186 st->last_IP_pts = AV_NOPTS_VALUE;
2187 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2188 st->pts_buffer[i]= AV_NOPTS_VALUE;
2189
2190 s->streams[s->nb_streams++] = st;
2191 return st;
2192 }
2193
2194 AVProgram *av_new_program(AVFormatContext *ac, int id)
2195 {
2196 AVProgram *program=NULL;
2197 int i;
2198
2199 #ifdef DEBUG_SI
2200 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2201 #endif
2202
2203 for(i=0; i<ac->nb_programs; i++)
2204 if(ac->programs[i]->id == id)
2205 program = ac->programs[i];
2206
2207 if(!program){
2208 program = av_mallocz(sizeof(AVProgram));
2209 if (!program)
2210 return NULL;
2211 dynarray_add(&ac->programs, &ac->nb_programs, program);
2212 program->discard = AVDISCARD_NONE;
2213 }
2214 program->id = id;
2215
2216 return program;
2217 }
2218
2219 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2220 {
2221 assert(!provider_name == !name);
2222 if(name){
2223 av_free(program->provider_name);
2224 av_free(program-> name);
2225 program->provider_name = av_strdup(provider_name);
2226 program-> name = av_strdup( name);
2227 }
2228 }
2229
2230
2231 /************************************************************/
2232 /* output media file */
2233
2234 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2235 {
2236 int ret;
2237
2238 if (s->oformat->priv_data_size > 0) {
2239 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2240 if (!s->priv_data)
2241 return AVERROR(ENOMEM);
2242 } else
2243 s->priv_data = NULL;
2244
2245 if (s->oformat->set_parameters) {
2246 ret = s->oformat->set_parameters(s, ap);
2247 if (ret < 0)
2248 return ret;
2249 }
2250 return 0;
2251 }
2252
2253 int av_write_header(AVFormatContext *s)
2254 {
2255 int ret, i;
2256 AVStream *st;
2257
2258 // some sanity checks
2259 for(i=0;i<s->nb_streams;i++) {
2260 st = s->streams[i];
2261
2262 switch (st->codec->codec_type) {
2263 case CODEC_TYPE_AUDIO:
2264 if(st->codec->sample_rate<=0){
2265 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2266 return -1;
2267 }
2268 break;
2269 case CODEC_TYPE_VIDEO:
2270 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2271 av_log(s, AV_LOG_ERROR, "time base not set\n");
2272 return -1;
2273 }
2274 if(st->codec->width<=0 || st->codec->height<=0){
2275 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2276 return -1;
2277 }
2278 break;
2279 }
2280
2281 if(s->oformat->codec_tag){
2282 if(st->codec->codec_tag){
2283 //FIXME
2284 //check that tag + id is in the table
2285 //if neither is in the table -> OK
2286 //if tag is in the table with another id -> FAIL
2287 //if id is in the table with another tag -> FAIL unless strict < ?
2288 }else
2289 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2290 }
2291 }
2292
2293 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2294 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2295 if (!s->priv_data)
2296 return AVERROR(ENOMEM);
2297 }
2298
2299 if(s->oformat->write_header){
2300 ret = s->oformat->write_header(s);
2301 if (ret < 0)
2302 return ret;
2303 }
2304
2305 /* init PTS generation */
2306 for(i=0;i<s->nb_streams;i++) {
2307 int64_t den = AV_NOPTS_VALUE;
2308 st = s->streams[i];
2309
2310 switch (st->codec->codec_type) {
2311 case CODEC_TYPE_AUDIO:
2312 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2313 break;
2314 case CODEC_TYPE_VIDEO:
2315 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2316 break;
2317 default:
2318 break;
2319 }
2320 if (den != AV_NOPTS_VALUE) {
2321 if (den <= 0)
2322 return AVERROR_INVALIDDATA;
2323 av_frac_init(&st->pts, 0, 0, den);
2324 }
2325 }
2326 return 0;
2327 }
2328
2329 //FIXME merge with compute_pkt_fields
2330 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2331 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2332 int num, den, frame_size, i;
2333
2334 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2335
2336 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2337 return -1;*/
2338
2339 /* duration field */
2340 if (pkt->duration == 0) {
2341 compute_frame_duration(&num, &den, st, NULL, pkt);
2342 if (den && num) {
2343 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2344 }
2345 }
2346
2347 //XXX/FIXME this is a temporary hack until all encoders output pts
2348 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2349 pkt->dts=
2350 // pkt->pts= st->cur_dts;
2351 pkt->pts= st->pts.val;
2352 }
2353
2354 //calculate dts from pts
2355 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2356 st->pts_buffer[0]= pkt->pts;
2357 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2358 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2359 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2360 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2361
2362 pkt->dts= st->pts_buffer[0];
2363 }
2364
2365 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2366 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2367 return -1;
2368 }
2369 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2370 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2371 return -1;
2372 }
2373
2374 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2375 st->cur_dts= pkt->dts;
2376 st->pts.val= pkt->dts;
2377
2378 /* update pts */
2379 switch (st->codec->codec_type) {
2380 case CODEC_TYPE_AUDIO:
2381 frame_size = get_audio_frame_size(st->codec, pkt->size);
2382
2383 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2384 likely equal to the encoder delay, but it would be better if we
2385 had the real timestamps from the encoder */
2386 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2387 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2388 }
2389 break;
2390 case CODEC_TYPE_VIDEO:
2391 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2392 break;
2393 default:
2394 break;
2395 }
2396 return 0;
2397 }
2398
2399 static void truncate_ts(AVStream *st, AVPacket *pkt){
2400 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2401
2402 // if(pkt->dts < 0)
2403 // pkt->dts= 0; //this happens for low_delay=0 and B-frames, FIXME, needs further investigation about what we should do here
2404
2405 if (pkt->pts != AV_NOPTS_VALUE)
2406 pkt->pts &= pts_mask;
2407 if (pkt->dts != AV_NOPTS_VALUE)
2408 pkt->dts &= pts_mask;
2409 }
2410
2411 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2412 {
2413 int ret;
2414
2415 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2416 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2417 return ret;
2418
2419 truncate_ts(s->streams[pkt->stream_index], pkt);
2420
2421 ret= s->oformat->write_packet(s, pkt);
2422 if(!ret)
2423 ret= url_ferror(s->pb);
2424 return ret;
2425 }
2426
2427 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2428 AVPacketList *pktl, **next_point, *this_pktl;
2429 int stream_count=0;
2430 int streams[MAX_STREAMS];
2431
2432 if(pkt){
2433 AVStream *st= s->streams[ pkt->stream_index];
2434
2435 // assert(pkt->destruct != av_destruct_packet); //FIXME
2436
2437 this_pktl = av_mallocz(sizeof(AVPacketList));
2438 this_pktl->pkt= *pkt;
2439 if(pkt->destruct == av_destruct_packet)
2440 pkt->destruct= NULL; // not shared -> must keep original from being freed
2441 else
2442 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2443
2444 next_point = &s->packet_buffer;
2445 while(*next_point){
2446 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2447 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2448 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2449 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2450 break;
2451 next_point= &(*next_point)->next;
2452 }
2453 this_pktl->next= *next_point;
2454 *next_point= this_pktl;
2455 }
2456
2457 memset(streams, 0, sizeof(streams));
2458 pktl= s->packet_buffer;
2459 while(pktl){
2460 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2461 if(streams[ pktl->pkt.stream_index ] == 0)
2462 stream_count++;
2463 streams[ pktl->pkt.stream_index ]++;
2464 pktl= pktl->next;
2465 }
2466
2467 if(s->nb_streams == stream_count || (flush && stream_count)){
2468 pktl= s->packet_buffer;
2469 *out= pktl->pkt;
2470
2471 s->packet_buffer= pktl->next;
2472 av_freep(&pktl);
2473 return 1;
2474 }else{
2475 av_init_packet(out);
2476 return 0;
2477 }
2478 }
2479
2480 /**
2481 * Interleaves an AVPacket correctly so it can be muxed.
2482 * @param out the interleaved packet will be output here
2483 * @param in the input packet
2484 * @param flush 1 if no further packets are available as input and all
2485 * remaining packets should be output
2486 * @return 1 if a packet was output, 0 if no packet could be output,
2487 * < 0 if an error occurred
2488 */
2489 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2490 if(s->oformat->interleave_packet)
2491 return s->oformat->interleave_packet(s, out, in, flush);
2492 else
2493 return av_interleave_packet_per_dts(s, out, in, flush);
2494 }
2495
2496 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2497 AVStream *st= s->streams[ pkt->stream_index];
2498
2499 //FIXME/XXX/HACK drop zero sized packets
2500 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2501 return 0;
2502
2503 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2504 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2505 return -1;
2506
2507 if(pkt->dts == AV_NOPTS_VALUE)
2508 return -1;
2509
2510 for(;;){
2511 AVPacket opkt;
2512 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2513 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2514 return ret;
2515
2516 truncate_ts(s->streams[opkt.stream_index], &opkt);
2517 ret= s->oformat->write_packet(s, &opkt);
2518
2519 av_free_packet(&opkt);
2520 pkt= NULL;
2521
2522 if(ret<0)
2523 return ret;
2524 if(url_ferror(s->pb))
2525 return url_ferror(s->pb);
2526 }
2527 }
2528
2529 int av_write_trailer(AVFormatContext *s)
2530 {
2531 int ret, i;
2532
2533 for(;;){
2534 AVPacket pkt;
2535 ret= av_interleave_packet(s, &pkt, NULL, 1);
2536 if(ret<0) //FIXME cleanup needed for ret<0 ?
2537 goto fail;
2538 if(!ret)
2539 break;
2540
2541 truncate_ts(s->streams[pkt.stream_index], &pkt);
2542 ret= s->oformat->write_packet(s, &pkt);
2543
2544 av_free_packet(&pkt);
2545
2546 if(ret<0)
2547 goto fail;
2548 if(url_ferror(s->pb))
2549 goto fail;
2550 }
2551
2552 if(s->oformat->write_trailer)
2553 ret = s->oformat->write_trailer(s);
2554 fail:
2555 if(ret == 0)
2556 ret=url_ferror(s->pb);
2557 for(i=0;i<s->nb_streams;i++)
2558 av_freep(&s->streams[i]->priv_data);
2559 av_freep(&s->priv_data);
2560 return ret;
2561 }
2562
2563 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2564 {
2565 int i, j;
2566 AVProgram *program=NULL;
2567 void *tmp;
2568
2569 for(i=0; i<ac->nb_programs; i++){
2570 if(ac->programs[i]->id != progid)
2571 continue;
2572 program = ac->programs[i];
2573 for(j=0; j<program->nb_stream_indexes; j++)
2574 if(program->stream_index[j] == idx)
2575 return;
2576
2577 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2578 if(!tmp)
2579 return;
2580 program->stream_index = tmp;
2581 program->stream_index[program->nb_stream_indexes++] = idx;
2582 return;
2583 }
2584 }
2585
2586 /* "user interface" functions */
2587 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2588 {
2589 char buf[256];
2590 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2591 AVStream *st = ic->streams[i];
2592 int g = ff_gcd(st->time_base.num, st->time_base.den);
2593 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2594 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2595 /* the pid is an important information, so we display it */
2596 /* XXX: add a generic system */
2597 if (flags & AVFMT_SHOW_IDS)
2598 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2599 if (strlen(st->language) > 0)
2600 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2601 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2602 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2603 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2604 if(st->r_frame_rate.den && st->r_frame_rate.num)
2605 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2606 /* else if(st->time_base.den && st->time_base.num)
2607 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2608 else
2609 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2610 }
2611 av_log(NULL, AV_LOG_INFO, "\n");
2612 }
2613
2614 void dump_format(AVFormatContext *ic,
2615 int index,
2616 const char *url,
2617 int is_output)
2618 {
2619 int i;
2620
2621 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2622 is_output ? "Output" : "Input",
2623 index,
2624 is_output ? ic->oformat->name : ic->iformat->name,
2625 is_output ? "to" : "from", url);
2626 if (!is_output) {
2627 av_log(NULL, AV_LOG_INFO, " Duration: ");
2628 if (ic->duration != AV_NOPTS_VALUE) {
2629 int hours, mins, secs, us;
2630 secs = ic->duration / AV_TIME_BASE;
2631 us = ic->duration % AV_TIME_BASE;
2632 mins = secs / 60;
2633 secs %= 60;
2634 hours = mins / 60;
2635 mins %= 60;
2636 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2637 (10 * us) / AV_TIME_BASE);
2638 } else {
2639 av_log(NULL, AV_LOG_INFO, "N/A");
2640 }
2641 if (ic->start_time != AV_NOPTS_VALUE) {
2642 int secs, us;
2643 av_log(NULL, AV_LOG_INFO, ", start: ");
2644 secs = ic->start_time / AV_TIME_BASE;
2645 us = ic->start_time % AV_TIME_BASE;
2646 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2647 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2648 }
2649 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2650 if (ic->bit_rate) {
2651 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2652 } else {
2653 av_log(NULL, AV_LOG_INFO, "N/A");
2654 }
2655 av_log(NULL, AV_LOG_INFO, "\n");
2656 }
2657 if(ic->nb_programs) {
2658 int j, k;
2659 for(j=0; j<ic->nb_programs; j++) {
2660 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2661 ic->programs[j]->name ? ic->programs[j]->name : "");
2662 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2663 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2664 }
2665 } else
2666 for(i=0;i<ic->nb_streams;i++)
2667 dump_stream_format(ic, i, index, is_output);
2668 }
2669
2670 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2671 {
2672 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2673 }
2674
2675 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2676 {
2677 AVRational frame_rate;
2678 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2679 *frame_rate_num= frame_rate.num;
2680 *frame_rate_den= frame_rate.den;
2681 return ret;
2682 }
2683
2684 /**
2685 * Gets the current time in microseconds.
2686 */
2687 int64_t av_gettime(void)
2688 {
2689 struct timeval tv;
2690 gettimeofday(&tv,NULL);
2691 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2692 }
2693
2694 int64_t parse_date(const char *datestr, int duration)
2695 {
2696 const char *p;
2697 int64_t t;
2698 struct tm dt;
2699 int i;
2700 static const char *date_fmt[] = {
2701 "%Y-%m-%d",
2702 "%Y%m%d",
2703 };
2704 static const char *time_fmt[] = {
2705 "%H:%M:%S",
2706 "%H%M%S",
2707 };
2708 const char *q;
2709 int is_utc, len;
2710 char lastch;
2711 int negative = 0;
2712
2713 #undef time
2714 time_t now = time(0);
2715
2716 len = strlen(datestr);
2717 if (len > 0)
2718 lastch = datestr[len - 1];
2719 else
2720 lastch = '\0';
2721 is_utc = (lastch == 'z' || lastch == 'Z');
2722
2723 memset(&dt, 0, sizeof(dt));
2724
2725 p = datestr;
2726 q = NULL;
2727 if (!duration) {
2728 /* parse the year-month-day part */
2729 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2730 q = small_strptime(p, date_fmt[i], &dt);
2731 if (q) {
2732 break;
2733 }
2734 }
2735
2736 /* if the year-month-day part is missing, then take the
2737 * current year-month-day time */
2738 if (!q) {
2739 if (is_utc) {
2740 dt = *gmtime(&now);
2741 } else {
2742 dt = *localtime(&now);
2743 }
2744 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2745 } else {
2746 p = q;
2747 }
2748
2749 if (*p == 'T' || *p == 't' || *p == ' ')
2750 p++;
2751
2752 /* parse the hour-minute-second part */
2753 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2754 q = small_strptime(p, time_fmt[i], &dt);
2755 if (q) {
2756 break;
2757 }
2758 }
2759 } else {
2760 /* parse datestr as a duration */
2761 if (p[0] == '-') {
2762 negative = 1;
2763 ++p;
2764 }
2765 /* parse datestr as HH:MM:SS */
2766 q = small_strptime(p, time_fmt[0], &dt);
2767 if (!q) {
2768 /* parse datestr as S+ */
2769 dt.tm_sec = strtol(p, (char **)&q, 10);
2770 if (q == p)
2771 /* the parsing didn't succeed */
2772 return INT64_MIN;
2773 dt.tm_min = 0;
2774 dt.tm_hour = 0;
2775 }
2776 }
2777
2778 /* Now we have all the fields that we can get */
2779 if (!q) {
2780 return INT64_MIN;
2781 }
2782
2783 if (duration) {
2784 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2785 } else {
2786 dt.tm_isdst = -1; /* unknown */
2787 if (is_utc) {
2788 t = mktimegm(&dt);
2789 } else {
2790 t = mktime(&dt);
2791 }
2792 }
2793
2794 t *= 1000000;
2795
2796 /* parse the .m... part */
2797 if (*q == '.') {
2798 int val, n;
2799 q++;
2800 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2801 if (!isdigit(*q))
2802 break;
2803 val += n * (*q - '0');
2804 }
2805 t += val;
2806 }
2807 return negative ? -t : t;
2808 }
2809
2810 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2811 {
2812 const char *p;
2813 char tag[128], *q;
2814
2815 p = info;
2816 if (*p == '?')
2817 p++;
2818 for(;;) {
2819 q = tag;
2820 while (*p != '\0' && *p != '=' && *p != '&') {
2821 if ((q - tag) < sizeof(tag) - 1)
2822 *q++ = *p;
2823 p++;
2824 }
2825 *q = '\0';
2826 q = arg;
2827 if (*p == '=') {
2828 p++;
2829 while (*p != '&' && *p != '\0') {
2830 if ((q - arg) < arg_size - 1) {
2831 if (*p == '+')
2832 *q++ = ' ';
2833 else
2834 *q++ = *p;
2835 }
2836 p++;
2837 }
2838 *q = '\0';
2839 }
2840 if (!strcmp(tag, tag1))
2841 return 1;
2842 if (*p != '&')
2843 break;
2844 p++;
2845 }
2846 return 0;
2847 }
2848
2849 int av_get_frame_filename(char *buf, int buf_size,
2850 const char *path, int number)
2851 {
2852 const char *p;
2853 char *q, buf1[20], c;
2854 int nd, len, percentd_found;
2855
2856 q = buf;
2857 p = path;
2858 percentd_found = 0;
2859 for(;;) {
2860 c = *p++;
2861 if (c == '\0')
2862 break;
2863 if (c == '%') {
2864 do {
2865 nd = 0;
2866 while (isdigit(*p)) {
2867 nd = nd * 10 + *p++ - '0';
2868 }
2869 c = *p++;
2870 } while (isdigit(c));
2871
2872 switch(c) {
2873 case '%':
2874 goto addchar;
2875 case 'd':
2876 if (percentd_found)
2877 goto fail;
2878 percentd_found = 1;
2879 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2880 len = strlen(buf1);
2881 if ((q - buf + len) > buf_size - 1)
2882 goto fail;
2883 memcpy(q, buf1, len);
2884 q += len;
2885 break;
2886 default:
2887 goto fail;
2888 }
2889 } else {
2890 addchar:
2891 if ((q - buf) < buf_size - 1)
2892 *q++ = c;
2893 }
2894 }
2895 if (!percentd_found)
2896 goto fail;
2897 *q = '\0';
2898 return 0;
2899 fail:
2900 *q = '\0';
2901 return -1;
2902 }
2903
2904 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
2905 {
2906 int len, i, j, c;
2907 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2908
2909 for(i=0;i<size;i+=16) {
2910 len = size - i;
2911 if (len > 16)
2912 len = 16;
2913 PRINT("%08x ", i);
2914 for(j=0;j<16;j++) {
2915 if (j < len)
2916 PRINT(" %02x", buf[i+j]);
2917 else
2918 PRINT(" ");
2919 }
2920 PRINT(" ");
2921 for(j=0;j<len;j++) {
2922 c = buf[i+j];
2923 if (c < ' ' || c > '~')
2924 c = '.';
2925 PRINT("%c", c);
2926 }
2927 PRINT("\n");
2928 }
2929 #undef PRINT
2930 }
2931
2932 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2933 {
2934 hex_dump_internal(NULL, f, 0, buf, size);
2935 }
2936
2937 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
2938 {
2939 hex_dump_internal(avcl, NULL, level, buf, size);
2940 }
2941
2942 //FIXME needs to know the time_base
2943 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
2944 {
2945 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2946 PRINT("stream #%d:\n", pkt->stream_index);
2947 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2948 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2949 /* DTS is _always_ valid after av_read_frame() */
2950 PRINT(" dts=");
2951 if (pkt->dts == AV_NOPTS_VALUE)
2952 PRINT("N/A");
2953 else
2954 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
2955 /* PTS may not be known if B-frames are present. */
2956 PRINT(" pts=");
2957 if (pkt->pts == AV_NOPTS_VALUE)
2958 PRINT("N/A");
2959 else
2960 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
2961 PRINT("\n");
2962 PRINT(" size=%d\n", pkt->size);
2963 #undef PRINT
2964 if (dump_payload)
2965 av_hex_dump(f, pkt->data, pkt->size);
2966 }
2967
2968 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2969 {
2970 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
2971 }
2972
2973 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
2974 {
2975 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
2976 }
2977
2978 void url_split(char *proto, int proto_size,
2979 char *authorization, int authorization_size,
2980 char *hostname, int hostname_size,
2981 int *port_ptr,
2982 char *path, int path_size,
2983 const char *url)
2984 {
2985 const char *p, *ls, *at, *col, *brk;
2986
2987 if (port_ptr) *port_ptr = -1;
2988 if (proto_size > 0) proto[0] = 0;
2989 if (authorization_size > 0) authorization[0] = 0;
2990 if (hostname_size > 0) hostname[0] = 0;
2991 if (path_size > 0) path[0] = 0;
2992
2993 /* parse protocol */
2994 if ((p = strchr(url, ':'))) {
2995 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2996 p++; /* skip ':' */
2997 if (*p == '/') p++;
2998 if (*p == '/') p++;
2999 } else {
3000 /* no protocol means plain filename */
3001 av_strlcpy(path, url, path_size);
3002 return;
3003 }
3004
3005 /* separate path from hostname */
3006 ls = strchr(p, '/');
3007 if(!ls)
3008 ls = strchr(p, '?');
3009 if(ls)
3010 av_strlcpy(path, ls, path_size);
3011 else
3012 ls = &p[strlen(p)]; // XXX
3013
3014 /* the rest is hostname, use that to parse auth/port */
3015 if (ls != p) {
3016 /* authorization (user[:pass]@hostname) */
3017 if ((at = strchr(p, '@')) && at < ls) {
3018 av_strlcpy(authorization, p,
3019 FFMIN(authorization_size, at + 1 - p));
3020 p = at + 1; /* skip '@' */
3021 }
3022
3023 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3024 /* [host]:port */
3025 av_strlcpy(hostname, p + 1,
3026 FFMIN(hostname_size, brk - p));
3027 if (brk[1] == ':' && port_ptr)
3028 *port_ptr = atoi(brk + 2);
3029 } else if ((col = strchr(p, ':')) && col < ls) {
3030 av_strlcpy(hostname, p,
3031 FFMIN(col + 1 - p, hostname_size));
3032 if (port_ptr) *port_ptr = atoi(col + 1);
3033 } else
3034 av_strlcpy(hostname, p,
3035 FFMIN(ls + 1 - p, hostname_size));
3036 }
3037 }
3038
3039 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3040 int pts_num, int pts_den)
3041 {
3042 s->pts_wrap_bits = pts_wrap_bits;
3043 s->time_base.num = pts_num;
3044 s->time_base.den = pts_den;
3045 }
3046
3047 /* fraction handling */
3048
3049 /**
3050 * f = val + (num / den) + 0.5.
3051 *
3052 * 'num' is normalized so that it is such as 0 <= num < den.
3053 *
3054 * @param f fractional number
3055 * @param val integer value
3056 * @param num must be >= 0
3057 * @param den must be >= 1
3058 */
3059 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3060 {
3061 num += (den >> 1);
3062 if (num >= den) {
3063 val += num / den;
3064 num = num % den;
3065 }
3066 f->val = val;
3067 f->num = num;
3068 f->den = den;
3069 }
3070
3071 /**
3072 * Fractional addition to f: f = f + (incr / f->den).
3073 *
3074 * @param f fractional number
3075 * @param incr increment, can be positive or negative
3076 */
3077 static void av_frac_add(AVFrac *f, int64_t incr)
3078 {
3079 int64_t num, den;
3080
3081 num = f->num + incr;
3082 den = f->den;
3083 if (num < 0) {
3084 f->val += num / den;
3085 num = num % den;
3086 if (num < 0) {
3087 num += den;
3088 f->val--;
3089 }
3090 } else if (num >= den) {
3091 f->val += num / den;
3092 num = num % den;
3093 }
3094 f->num = num;
3095 }