Simplify av_open_input_file
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "opt.h"
23 #include "avstring.h"
24 #include "riff.h"
25 #include <sys/time.h>
26 #include <time.h>
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 /**
32 * @file libavformat/utils.c
33 * Various utility functions for using ffmpeg library.
34 */
35
36 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
37 static void av_frac_add(AVFrac *f, int64_t incr);
38
39 /** head of registered input format linked list. */
40 AVInputFormat *first_iformat = NULL;
41 /** head of registered output format linked list. */
42 AVOutputFormat *first_oformat = NULL;
43
44 AVInputFormat *av_iformat_next(AVInputFormat *f)
45 {
46 if(f) return f->next;
47 else return first_iformat;
48 }
49
50 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
51 {
52 if(f) return f->next;
53 else return first_oformat;
54 }
55
56 void av_register_input_format(AVInputFormat *format)
57 {
58 AVInputFormat **p;
59 p = &first_iformat;
60 while (*p != NULL) p = &(*p)->next;
61 *p = format;
62 format->next = NULL;
63 }
64
65 void av_register_output_format(AVOutputFormat *format)
66 {
67 AVOutputFormat **p;
68 p = &first_oformat;
69 while (*p != NULL) p = &(*p)->next;
70 *p = format;
71 format->next = NULL;
72 }
73
74 int match_ext(const char *filename, const char *extensions)
75 {
76 const char *ext, *p;
77 char ext1[32], *q;
78
79 if(!filename)
80 return 0;
81
82 ext = strrchr(filename, '.');
83 if (ext) {
84 ext++;
85 p = extensions;
86 for(;;) {
87 q = ext1;
88 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
89 *q++ = *p++;
90 *q = '\0';
91 if (!strcasecmp(ext1, ext))
92 return 1;
93 if (*p == '\0')
94 break;
95 p++;
96 }
97 }
98 return 0;
99 }
100
101 AVOutputFormat *guess_format(const char *short_name, const char *filename,
102 const char *mime_type)
103 {
104 AVOutputFormat *fmt, *fmt_found;
105 int score_max, score;
106
107 /* specific test for image sequences */
108 #ifdef CONFIG_IMAGE2_MUXER
109 if (!short_name && filename &&
110 av_filename_number_test(filename) &&
111 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
112 return guess_format("image2", NULL, NULL);
113 }
114 #endif
115 /* find the proper file type */
116 fmt_found = NULL;
117 score_max = 0;
118 fmt = first_oformat;
119 while (fmt != NULL) {
120 score = 0;
121 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
122 score += 100;
123 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
124 score += 10;
125 if (filename && fmt->extensions &&
126 match_ext(filename, fmt->extensions)) {
127 score += 5;
128 }
129 if (score > score_max) {
130 score_max = score;
131 fmt_found = fmt;
132 }
133 fmt = fmt->next;
134 }
135 return fmt_found;
136 }
137
138 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
139 const char *mime_type)
140 {
141 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
142
143 if (fmt) {
144 AVOutputFormat *stream_fmt;
145 char stream_format_name[64];
146
147 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
148 stream_fmt = guess_format(stream_format_name, NULL, NULL);
149
150 if (stream_fmt)
151 fmt = stream_fmt;
152 }
153
154 return fmt;
155 }
156
157 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
158 const char *filename, const char *mime_type, enum CodecType type){
159 if(type == CODEC_TYPE_VIDEO){
160 enum CodecID codec_id= CODEC_ID_NONE;
161
162 #ifdef CONFIG_IMAGE2_MUXER
163 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
164 codec_id= av_guess_image2_codec(filename);
165 }
166 #endif
167 if(codec_id == CODEC_ID_NONE)
168 codec_id= fmt->video_codec;
169 return codec_id;
170 }else if(type == CODEC_TYPE_AUDIO)
171 return fmt->audio_codec;
172 else
173 return CODEC_ID_NONE;
174 }
175
176 AVInputFormat *av_find_input_format(const char *short_name)
177 {
178 AVInputFormat *fmt;
179 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
180 if (!strcmp(fmt->name, short_name))
181 return fmt;
182 }
183 return NULL;
184 }
185
186 /* memory handling */
187
188 void av_destruct_packet(AVPacket *pkt)
189 {
190 av_free(pkt->data);
191 pkt->data = NULL; pkt->size = 0;
192 }
193
194 void av_init_packet(AVPacket *pkt)
195 {
196 pkt->pts = AV_NOPTS_VALUE;
197 pkt->dts = AV_NOPTS_VALUE;
198 pkt->pos = -1;
199 pkt->duration = 0;
200 pkt->flags = 0;
201 pkt->stream_index = 0;
202 pkt->destruct= av_destruct_packet_nofree;
203 }
204
205 int av_new_packet(AVPacket *pkt, int size)
206 {
207 uint8_t *data;
208 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
209 return AVERROR(ENOMEM);
210 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
211 if (!data)
212 return AVERROR(ENOMEM);
213 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
214
215 av_init_packet(pkt);
216 pkt->data = data;
217 pkt->size = size;
218 pkt->destruct = av_destruct_packet;
219 return 0;
220 }
221
222 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
223 {
224 int ret= av_new_packet(pkt, size);
225
226 if(ret<0)
227 return ret;
228
229 pkt->pos= url_ftell(s);
230
231 ret= get_buffer(s, pkt->data, size);
232 if(ret<=0)
233 av_free_packet(pkt);
234 else
235 pkt->size= ret;
236
237 return ret;
238 }
239
240 int av_dup_packet(AVPacket *pkt)
241 {
242 if (pkt->destruct != av_destruct_packet) {
243 uint8_t *data;
244 /* we duplicate the packet and don't forget to put the padding
245 again */
246 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
247 return AVERROR(ENOMEM);
248 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
249 if (!data) {
250 return AVERROR(ENOMEM);
251 }
252 memcpy(data, pkt->data, pkt->size);
253 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
254 pkt->data = data;
255 pkt->destruct = av_destruct_packet;
256 }
257 return 0;
258 }
259
260 int av_filename_number_test(const char *filename)
261 {
262 char buf[1024];
263 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
264 }
265
266 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
267 {
268 AVInputFormat *fmt1, *fmt;
269 int score;
270
271 fmt = NULL;
272 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
273 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
274 continue;
275 score = 0;
276 if (fmt1->read_probe) {
277 score = fmt1->read_probe(pd);
278 } else if (fmt1->extensions) {
279 if (match_ext(pd->filename, fmt1->extensions)) {
280 score = 50;
281 }
282 }
283 if (score > *score_max) {
284 *score_max = score;
285 fmt = fmt1;
286 }
287 }
288 return fmt;
289 }
290
291 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
292 int score=0;
293 return av_probe_input_format2(pd, is_opened, &score);
294 }
295
296 /************************************************************/
297 /* input media file */
298
299 /**
300 * Open a media file from an IO stream. 'fmt' must be specified.
301 */
302 static const char* format_to_name(void* ptr)
303 {
304 AVFormatContext* fc = (AVFormatContext*) ptr;
305 if(fc->iformat) return fc->iformat->name;
306 else if(fc->oformat) return fc->oformat->name;
307 else return "NULL";
308 }
309
310 #define OFFSET(x) offsetof(AVFormatContext,x)
311 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
312 //these names are too long to be readable
313 #define E AV_OPT_FLAG_ENCODING_PARAM
314 #define D AV_OPT_FLAG_DECODING_PARAM
315
316 static const AVOption options[]={
317 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
318 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
319 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
320 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
321 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
322 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
323 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
324 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
325 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
326 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
327 {NULL},
328 };
329
330 #undef E
331 #undef D
332 #undef DEFAULT
333
334 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
335
336 static void avformat_get_context_defaults(AVFormatContext *s)
337 {
338 memset(s, 0, sizeof(AVFormatContext));
339
340 s->av_class = &av_format_context_class;
341
342 av_opt_set_defaults(s);
343 }
344
345 AVFormatContext *av_alloc_format_context(void)
346 {
347 AVFormatContext *ic;
348 ic = av_malloc(sizeof(AVFormatContext));
349 if (!ic) return ic;
350 avformat_get_context_defaults(ic);
351 ic->av_class = &av_format_context_class;
352 return ic;
353 }
354
355 int av_open_input_stream(AVFormatContext **ic_ptr,
356 ByteIOContext *pb, const char *filename,
357 AVInputFormat *fmt, AVFormatParameters *ap)
358 {
359 int err;
360 AVFormatContext *ic;
361 AVFormatParameters default_ap;
362
363 if(!ap){
364 ap=&default_ap;
365 memset(ap, 0, sizeof(default_ap));
366 }
367
368 if(!ap->prealloced_context)
369 ic = av_alloc_format_context();
370 else
371 ic = *ic_ptr;
372 if (!ic) {
373 err = AVERROR(ENOMEM);
374 goto fail;
375 }
376 ic->iformat = fmt;
377 ic->pb = pb;
378 ic->duration = AV_NOPTS_VALUE;
379 ic->start_time = AV_NOPTS_VALUE;
380 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
381
382 /* allocate private data */
383 if (fmt->priv_data_size > 0) {
384 ic->priv_data = av_mallocz(fmt->priv_data_size);
385 if (!ic->priv_data) {
386 err = AVERROR(ENOMEM);
387 goto fail;
388 }
389 } else {
390 ic->priv_data = NULL;
391 }
392
393 err = ic->iformat->read_header(ic, ap);
394 if (err < 0)
395 goto fail;
396
397 if (pb && !ic->data_offset)
398 ic->data_offset = url_ftell(ic->pb);
399
400 *ic_ptr = ic;
401 return 0;
402 fail:
403 if (ic) {
404 av_freep(&ic->priv_data);
405 }
406 av_free(ic);
407 *ic_ptr = NULL;
408 return err;
409 }
410
411 /** Size of probe buffer, for guessing file type from file contents. */
412 #define PROBE_BUF_MIN 2048
413 #define PROBE_BUF_MAX (1<<20)
414
415 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
416 AVInputFormat *fmt,
417 int buf_size,
418 AVFormatParameters *ap)
419 {
420 int err, probe_size;
421 AVProbeData probe_data, *pd = &probe_data;
422 ByteIOContext *pb = NULL;
423
424 pd->filename = "";
425 if (filename)
426 pd->filename = filename;
427 pd->buf = NULL;
428 pd->buf_size = 0;
429
430 if (!fmt) {
431 /* guess format if no file can be opened */
432 fmt = av_probe_input_format(pd, 0);
433 }
434
435 /* do not open file if the format does not need it. XXX: specific
436 hack needed to handle RTSP/TCP */
437 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
438 /* if no file needed do not try to open one */
439 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
440 goto fail;
441 }
442 if (buf_size > 0) {
443 url_setbufsize(pb, buf_size);
444 }
445
446 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
447 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
448 /* read probe data */
449 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
450 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
451 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
452 if (url_fseek(pb, 0, SEEK_SET) < 0) {
453 url_fclose(pb);
454 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
455 pb = NULL;
456 err = AVERROR(EIO);
457 goto fail;
458 }
459 }
460 /* guess file format */
461 fmt = av_probe_input_format2(pd, 1, &score);
462 }
463 av_freep(&pd->buf);
464 }
465
466 /* if still no format found, error */
467 if (!fmt) {
468 err = AVERROR_NOFMT;
469 goto fail;
470 }
471
472 /* check filename in case of an image number is expected */
473 if (fmt->flags & AVFMT_NEEDNUMBER) {
474 if (!av_filename_number_test(filename)) {
475 err = AVERROR_NUMEXPECTED;
476 goto fail;
477 }
478 }
479 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
480 if (err)
481 goto fail;
482 return 0;
483 fail:
484 av_freep(&pd->buf);
485 if (pb)
486 url_fclose(pb);
487 *ic_ptr = NULL;
488 return err;
489
490 }
491
492 /*******************************************************/
493
494 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
495 {
496 av_init_packet(pkt);
497 return s->iformat->read_packet(s, pkt);
498 }
499
500 /**********************************************************/
501
502 /**
503 * Get the number of samples of an audio frame. Return (-1) if error.
504 */
505 static int get_audio_frame_size(AVCodecContext *enc, int size)
506 {
507 int frame_size;
508
509 if (enc->frame_size <= 1) {
510 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
511
512 if (bits_per_sample) {
513 if (enc->channels == 0)
514 return -1;
515 frame_size = (size << 3) / (bits_per_sample * enc->channels);
516 } else {
517 /* used for example by ADPCM codecs */
518 if (enc->bit_rate == 0)
519 return -1;
520 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
521 }
522 } else {
523 frame_size = enc->frame_size;
524 }
525 return frame_size;
526 }
527
528
529 /**
530 * Return the frame duration in seconds, return 0 if not available.
531 */
532 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
533 AVCodecParserContext *pc, AVPacket *pkt)
534 {
535 int frame_size;
536
537 *pnum = 0;
538 *pden = 0;
539 switch(st->codec->codec_type) {
540 case CODEC_TYPE_VIDEO:
541 if(st->time_base.num*1000LL > st->time_base.den){
542 *pnum = st->time_base.num;
543 *pden = st->time_base.den;
544 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
545 *pnum = st->codec->time_base.num;
546 *pden = st->codec->time_base.den;
547 if (pc && pc->repeat_pict) {
548 *pden *= 2;
549 *pnum = (*pnum) * (2 + pc->repeat_pict);
550 }
551 }
552 break;
553 case CODEC_TYPE_AUDIO:
554 frame_size = get_audio_frame_size(st->codec, pkt->size);
555 if (frame_size < 0)
556 break;
557 *pnum = frame_size;
558 *pden = st->codec->sample_rate;
559 break;
560 default:
561 break;
562 }
563 }
564
565 static int is_intra_only(AVCodecContext *enc){
566 if(enc->codec_type == CODEC_TYPE_AUDIO){
567 return 1;
568 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
569 switch(enc->codec_id){
570 case CODEC_ID_MJPEG:
571 case CODEC_ID_MJPEGB:
572 case CODEC_ID_LJPEG:
573 case CODEC_ID_RAWVIDEO:
574 case CODEC_ID_DVVIDEO:
575 case CODEC_ID_HUFFYUV:
576 case CODEC_ID_FFVHUFF:
577 case CODEC_ID_ASV1:
578 case CODEC_ID_ASV2:
579 case CODEC_ID_VCR1:
580 return 1;
581 default: break;
582 }
583 }
584 return 0;
585 }
586
587 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
588 int64_t dts, int64_t pts)
589 {
590 AVStream *st= s->streams[stream_index];
591 AVPacketList *pktl= s->packet_buffer;
592
593 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)
594 return;
595
596 st->first_dts= dts - st->cur_dts;
597 st->cur_dts= dts;
598
599 for(; pktl; pktl= pktl->next){
600 if(pktl->pkt.stream_index != stream_index)
601 continue;
602 //FIXME think more about this check
603 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
604 pktl->pkt.pts += st->first_dts;
605
606 if(pktl->pkt.dts != AV_NOPTS_VALUE)
607 pktl->pkt.dts += st->first_dts;
608
609 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
610 st->start_time= pktl->pkt.pts;
611 }
612 if (st->start_time == AV_NOPTS_VALUE)
613 st->start_time = pts;
614 }
615
616 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
617 AVCodecParserContext *pc, AVPacket *pkt)
618 {
619 int num, den, presentation_delayed, delay, i;
620 int64_t offset;
621
622 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
623 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
624 pkt->dts -= 1LL<<st->pts_wrap_bits;
625 }
626
627 if (pkt->duration == 0) {
628 compute_frame_duration(&num, &den, st, pc, pkt);
629 if (den && num) {
630 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
631 }
632 }
633
634 /* correct timestamps with byte offset if demuxers only have timestamps on packet boundaries */
635 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
636 /* this will estimate bitrate based on this frame's duration and size */
637 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
638 if(pkt->pts != AV_NOPTS_VALUE)
639 pkt->pts += offset;
640 if(pkt->dts != AV_NOPTS_VALUE)
641 pkt->dts += offset;
642 }
643
644 /* do we have a video B frame ? */
645 delay= st->codec->has_b_frames;
646 presentation_delayed = 0;
647 /* XXX: need has_b_frame, but cannot get it if the codec is
648 not initialized */
649 if (delay &&
650 pc && pc->pict_type != FF_B_TYPE)
651 presentation_delayed = 1;
652 /* This may be redundant, but it should not hurt. */
653 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
654 presentation_delayed = 1;
655
656 if(st->cur_dts == AV_NOPTS_VALUE){
657 st->cur_dts = 0; //FIXME maybe set it to 0 during init
658 }
659
660 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
661 /* interpolate PTS and DTS if they are not present */
662 if(delay <=1){
663 if (presentation_delayed) {
664 /* DTS = decompression time stamp */
665 /* PTS = presentation time stamp */
666 if (pkt->dts == AV_NOPTS_VALUE)
667 pkt->dts = st->last_IP_pts;
668 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
669 if (pkt->dts == AV_NOPTS_VALUE)
670 pkt->dts = st->cur_dts;
671
672 /* this is tricky: the dts must be incremented by the duration
673 of the frame we are displaying, i.e. the last I or P frame */
674 if (st->last_IP_duration == 0)
675 st->last_IP_duration = pkt->duration;
676 st->cur_dts = pkt->dts + st->last_IP_duration;
677 st->last_IP_duration = pkt->duration;
678 st->last_IP_pts= pkt->pts;
679 /* cannot compute PTS if not present (we can compute it only
680 by knowing the futur */
681 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
682 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
683 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
684 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
685 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
686 pkt->pts += pkt->duration;
687 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
688 }
689 }
690
691 /* presentation is not delayed : PTS and DTS are the same */
692 if(pkt->pts == AV_NOPTS_VALUE)
693 pkt->pts = pkt->dts;
694 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
695 if(pkt->pts == AV_NOPTS_VALUE)
696 pkt->pts = st->cur_dts;
697 pkt->dts = pkt->pts;
698 st->cur_dts = pkt->pts + pkt->duration;
699 }
700 }
701
702 if(pkt->pts != AV_NOPTS_VALUE){
703 st->pts_buffer[0]= pkt->pts;
704 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
705 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
706 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
707 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
708 if(pkt->dts == AV_NOPTS_VALUE)
709 pkt->dts= st->pts_buffer[0];
710 if(delay>1){
711 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
712 }
713 if(pkt->dts > st->cur_dts)
714 st->cur_dts = pkt->dts;
715 }
716
717 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
718
719 /* update flags */
720 if(is_intra_only(st->codec))
721 pkt->flags |= PKT_FLAG_KEY;
722 else if (pc) {
723 pkt->flags = 0;
724 /* key frame computation */
725 if (pc->pict_type == FF_I_TYPE)
726 pkt->flags |= PKT_FLAG_KEY;
727 }
728 }
729
730 void av_destruct_packet_nofree(AVPacket *pkt)
731 {
732 pkt->data = NULL; pkt->size = 0;
733 }
734
735 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
736 {
737 AVStream *st;
738 int len, ret, i;
739
740 av_init_packet(pkt);
741
742 for(;;) {
743 /* select current input stream component */
744 st = s->cur_st;
745 if (st) {
746 if (!st->need_parsing || !st->parser) {
747 /* no parsing needed: we just output the packet as is */
748 /* raw data support */
749 *pkt = s->cur_pkt;
750 compute_pkt_fields(s, st, NULL, pkt);
751 s->cur_st = NULL;
752 break;
753 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
754 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
755 s->cur_ptr, s->cur_len,
756 s->cur_pkt.pts, s->cur_pkt.dts);
757 s->cur_pkt.pts = AV_NOPTS_VALUE;
758 s->cur_pkt.dts = AV_NOPTS_VALUE;
759 /* increment read pointer */
760 s->cur_ptr += len;
761 s->cur_len -= len;
762
763 /* return packet if any */
764 if (pkt->size) {
765 got_packet:
766 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
767 pkt->duration = 0;
768 pkt->stream_index = st->index;
769 pkt->pts = st->parser->pts;
770 pkt->dts = st->parser->dts;
771 pkt->destruct = av_destruct_packet_nofree;
772 compute_pkt_fields(s, st, st->parser, pkt);
773
774 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
775 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
776 0, 0, AVINDEX_KEYFRAME);
777 }
778
779 break;
780 }
781 } else {
782 /* free packet */
783 av_free_packet(&s->cur_pkt);
784 s->cur_st = NULL;
785 }
786 } else {
787 /* read next packet */
788 ret = av_read_packet(s, &s->cur_pkt);
789 if (ret < 0) {
790 if (ret == AVERROR(EAGAIN))
791 return ret;
792 /* return the last frames, if any */
793 for(i = 0; i < s->nb_streams; i++) {
794 st = s->streams[i];
795 if (st->parser && st->need_parsing) {
796 av_parser_parse(st->parser, st->codec,
797 &pkt->data, &pkt->size,
798 NULL, 0,
799 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
800 if (pkt->size)
801 goto got_packet;
802 }
803 }
804 /* no more packets: really terminates parsing */
805 return ret;
806 }
807
808 st = s->streams[s->cur_pkt.stream_index];
809 if(st->codec->debug & FF_DEBUG_PTS)
810 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
811 s->cur_pkt.stream_index,
812 s->cur_pkt.pts,
813 s->cur_pkt.dts,
814 s->cur_pkt.size);
815
816 s->cur_st = st;
817 s->cur_ptr = s->cur_pkt.data;
818 s->cur_len = s->cur_pkt.size;
819 if (st->need_parsing && !st->parser) {
820 st->parser = av_parser_init(st->codec->codec_id);
821 if (!st->parser) {
822 /* no parser available : just output the raw packets */
823 st->need_parsing = AVSTREAM_PARSE_NONE;
824 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
825 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
826 }
827 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
828 st->parser->last_frame_offset=
829 st->parser->cur_offset= s->cur_pkt.pos;
830 }
831 }
832 }
833 }
834 if(st->codec->debug & FF_DEBUG_PTS)
835 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
836 pkt->stream_index,
837 pkt->pts,
838 pkt->dts,
839 pkt->size);
840
841 return 0;
842 }
843
844 static AVPacket *add_to_pktbuf(AVFormatContext *s, AVPacket *pkt){
845 AVPacketList *pktl= s->packet_buffer;
846 AVPacketList **plast_pktl= &s->packet_buffer;
847
848 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
849
850 pktl = av_mallocz(sizeof(AVPacketList));
851 if (!pktl)
852 return NULL;
853
854 /* add the packet in the buffered packet list */
855 *plast_pktl = pktl;
856 pktl->pkt= *pkt;
857 return &pktl->pkt;
858 }
859
860 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
861 {
862 AVPacketList *pktl;
863 int eof=0;
864 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
865
866 for(;;){
867 pktl = s->packet_buffer;
868 if (pktl) {
869 AVPacket *next_pkt= &pktl->pkt;
870
871 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
872 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
873 if( pktl->pkt.stream_index == next_pkt->stream_index
874 && next_pkt->dts < pktl->pkt.dts
875 && pktl->pkt.pts != pktl->pkt.dts //not b frame
876 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
877 next_pkt->pts= pktl->pkt.dts;
878 }
879 pktl= pktl->next;
880 }
881 pktl = s->packet_buffer;
882 }
883
884 if( next_pkt->pts != AV_NOPTS_VALUE
885 || next_pkt->dts == AV_NOPTS_VALUE
886 || !genpts || eof){
887 /* read packet from packet buffer, if there is data */
888 *pkt = *next_pkt;
889 s->packet_buffer = pktl->next;
890 av_free(pktl);
891 return 0;
892 }
893 }
894 if(genpts){
895 int ret= av_read_frame_internal(s, pkt);
896 if(ret<0){
897 if(pktl && ret != AVERROR(EAGAIN)){
898 eof=1;
899 continue;
900 }else
901 return ret;
902 }
903
904 if(av_dup_packet(add_to_pktbuf(s, pkt)) < 0)
905 return AVERROR(ENOMEM);
906 }else{
907 assert(!s->packet_buffer);
908 return av_read_frame_internal(s, pkt);
909 }
910 }
911 }
912
913 /* XXX: suppress the packet queue */
914 static void flush_packet_queue(AVFormatContext *s)
915 {
916 AVPacketList *pktl;
917
918 for(;;) {
919 pktl = s->packet_buffer;
920 if (!pktl)
921 break;
922 s->packet_buffer = pktl->next;
923 av_free_packet(&pktl->pkt);
924 av_free(pktl);
925 }
926 }
927
928 /*******************************************************/
929 /* seek support */
930
931 int av_find_default_stream_index(AVFormatContext *s)
932 {
933 int i;
934 AVStream *st;
935
936 if (s->nb_streams <= 0)
937 return -1;
938 for(i = 0; i < s->nb_streams; i++) {
939 st = s->streams[i];
940 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
941 return i;
942 }
943 }
944 return 0;
945 }
946
947 /**
948 * Flush the frame reader.
949 */
950 static void av_read_frame_flush(AVFormatContext *s)
951 {
952 AVStream *st;
953 int i;
954
955 flush_packet_queue(s);
956
957 /* free previous packet */
958 if (s->cur_st) {
959 if (s->cur_st->parser)
960 av_free_packet(&s->cur_pkt);
961 s->cur_st = NULL;
962 }
963 /* fail safe */
964 s->cur_ptr = NULL;
965 s->cur_len = 0;
966
967 /* for each stream, reset read state */
968 for(i = 0; i < s->nb_streams; i++) {
969 st = s->streams[i];
970
971 if (st->parser) {
972 av_parser_close(st->parser);
973 st->parser = NULL;
974 }
975 st->last_IP_pts = AV_NOPTS_VALUE;
976 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
977 }
978 }
979
980 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
981 int i;
982
983 for(i = 0; i < s->nb_streams; i++) {
984 AVStream *st = s->streams[i];
985
986 st->cur_dts = av_rescale(timestamp,
987 st->time_base.den * (int64_t)ref_st->time_base.num,
988 st->time_base.num * (int64_t)ref_st->time_base.den);
989 }
990 }
991
992 int av_add_index_entry(AVStream *st,
993 int64_t pos, int64_t timestamp, int size, int distance, int flags)
994 {
995 AVIndexEntry *entries, *ie;
996 int index;
997
998 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
999 return -1;
1000
1001 entries = av_fast_realloc(st->index_entries,
1002 &st->index_entries_allocated_size,
1003 (st->nb_index_entries + 1) *
1004 sizeof(AVIndexEntry));
1005 if(!entries)
1006 return -1;
1007
1008 st->index_entries= entries;
1009
1010 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1011
1012 if(index<0){
1013 index= st->nb_index_entries++;
1014 ie= &entries[index];
1015 assert(index==0 || ie[-1].timestamp < timestamp);
1016 }else{
1017 ie= &entries[index];
1018 if(ie->timestamp != timestamp){
1019 if(ie->timestamp <= timestamp)
1020 return -1;
1021 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1022 st->nb_index_entries++;
1023 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1024 distance= ie->min_distance;
1025 }
1026
1027 ie->pos = pos;
1028 ie->timestamp = timestamp;
1029 ie->min_distance= distance;
1030 ie->size= size;
1031 ie->flags = flags;
1032
1033 return index;
1034 }
1035
1036 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1037 int flags)
1038 {
1039 AVIndexEntry *entries= st->index_entries;
1040 int nb_entries= st->nb_index_entries;
1041 int a, b, m;
1042 int64_t timestamp;
1043
1044 a = - 1;
1045 b = nb_entries;
1046
1047 while (b - a > 1) {
1048 m = (a + b) >> 1;
1049 timestamp = entries[m].timestamp;
1050 if(timestamp >= wanted_timestamp)
1051 b = m;
1052 if(timestamp <= wanted_timestamp)
1053 a = m;
1054 }
1055 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1056
1057 if(!(flags & AVSEEK_FLAG_ANY)){
1058 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1059 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1060 }
1061 }
1062
1063 if(m == nb_entries)
1064 return -1;
1065 return m;
1066 }
1067
1068 #define DEBUG_SEEK
1069
1070 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1071 AVInputFormat *avif= s->iformat;
1072 int64_t pos_min, pos_max, pos, pos_limit;
1073 int64_t ts_min, ts_max, ts;
1074 int index;
1075 AVStream *st;
1076
1077 if (stream_index < 0)
1078 return -1;
1079
1080 #ifdef DEBUG_SEEK
1081 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1082 #endif
1083
1084 ts_max=
1085 ts_min= AV_NOPTS_VALUE;
1086 pos_limit= -1; //gcc falsely says it may be uninitialized
1087
1088 st= s->streams[stream_index];
1089 if(st->index_entries){
1090 AVIndexEntry *e;
1091
1092 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1093 index= FFMAX(index, 0);
1094 e= &st->index_entries[index];
1095
1096 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1097 pos_min= e->pos;
1098 ts_min= e->timestamp;
1099 #ifdef DEBUG_SEEK
1100 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1101 pos_min,ts_min);
1102 #endif
1103 }else{
1104 assert(index==0);
1105 }
1106
1107 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1108 assert(index < st->nb_index_entries);
1109 if(index >= 0){
1110 e= &st->index_entries[index];
1111 assert(e->timestamp >= target_ts);
1112 pos_max= e->pos;
1113 ts_max= e->timestamp;
1114 pos_limit= pos_max - e->min_distance;
1115 #ifdef DEBUG_SEEK
1116 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1117 pos_max,pos_limit, ts_max);
1118 #endif
1119 }
1120 }
1121
1122 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1123 if(pos<0)
1124 return -1;
1125
1126 /* do the seek */
1127 url_fseek(s->pb, pos, SEEK_SET);
1128
1129 av_update_cur_dts(s, st, ts);
1130
1131 return 0;
1132 }
1133
1134 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1135 int64_t pos, ts;
1136 int64_t start_pos, filesize;
1137 int no_change;
1138
1139 #ifdef DEBUG_SEEK
1140 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1141 #endif
1142
1143 if(ts_min == AV_NOPTS_VALUE){
1144 pos_min = s->data_offset;
1145 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1146 if (ts_min == AV_NOPTS_VALUE)
1147 return -1;
1148 }
1149
1150 if(ts_max == AV_NOPTS_VALUE){
1151 int step= 1024;
1152 filesize = url_fsize(s->pb);
1153 pos_max = filesize - 1;
1154 do{
1155 pos_max -= step;
1156 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1157 step += step;
1158 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1159 if (ts_max == AV_NOPTS_VALUE)
1160 return -1;
1161
1162 for(;;){
1163 int64_t tmp_pos= pos_max + 1;
1164 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1165 if(tmp_ts == AV_NOPTS_VALUE)
1166 break;
1167 ts_max= tmp_ts;
1168 pos_max= tmp_pos;
1169 if(tmp_pos >= filesize)
1170 break;
1171 }
1172 pos_limit= pos_max;
1173 }
1174
1175 if(ts_min > ts_max){
1176 return -1;
1177 }else if(ts_min == ts_max){
1178 pos_limit= pos_min;
1179 }
1180
1181 no_change=0;
1182 while (pos_min < pos_limit) {
1183 #ifdef DEBUG_SEEK
1184 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1185 pos_min, pos_max,
1186 ts_min, ts_max);
1187 #endif
1188 assert(pos_limit <= pos_max);
1189
1190 if(no_change==0){
1191 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1192 // interpolate position (better than dichotomy)
1193 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1194 + pos_min - approximate_keyframe_distance;
1195 }else if(no_change==1){
1196 // bisection, if interpolation failed to change min or max pos last time
1197 pos = (pos_min + pos_limit)>>1;
1198 }else{
1199 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1200 pos=pos_min;
1201 }
1202 if(pos <= pos_min)
1203 pos= pos_min + 1;
1204 else if(pos > pos_limit)
1205 pos= pos_limit;
1206 start_pos= pos;
1207
1208 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1209 if(pos == pos_max)
1210 no_change++;
1211 else
1212 no_change=0;
1213 #ifdef DEBUG_SEEK
1214 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1215 #endif
1216 if(ts == AV_NOPTS_VALUE){
1217 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1218 return -1;
1219 }
1220 assert(ts != AV_NOPTS_VALUE);
1221 if (target_ts <= ts) {
1222 pos_limit = start_pos - 1;
1223 pos_max = pos;
1224 ts_max = ts;
1225 }
1226 if (target_ts >= ts) {
1227 pos_min = pos;
1228 ts_min = ts;
1229 }
1230 }
1231
1232 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1233 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1234 #ifdef DEBUG_SEEK
1235 pos_min = pos;
1236 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1237 pos_min++;
1238 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1239 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1240 pos, ts_min, target_ts, ts_max);
1241 #endif
1242 *ts_ret= ts;
1243 return pos;
1244 }
1245
1246 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1247 int64_t pos_min, pos_max;
1248 #if 0
1249 AVStream *st;
1250
1251 if (stream_index < 0)
1252 return -1;
1253
1254 st= s->streams[stream_index];
1255 #endif
1256
1257 pos_min = s->data_offset;
1258 pos_max = url_fsize(s->pb) - 1;
1259
1260 if (pos < pos_min) pos= pos_min;
1261 else if(pos > pos_max) pos= pos_max;
1262
1263 url_fseek(s->pb, pos, SEEK_SET);
1264
1265 #if 0
1266 av_update_cur_dts(s, st, ts);
1267 #endif
1268 return 0;
1269 }
1270
1271 static int av_seek_frame_generic(AVFormatContext *s,
1272 int stream_index, int64_t timestamp, int flags)
1273 {
1274 int index;
1275 AVStream *st;
1276 AVIndexEntry *ie;
1277
1278 st = s->streams[stream_index];
1279
1280 index = av_index_search_timestamp(st, timestamp, flags);
1281
1282 if(index < 0 || index==st->nb_index_entries-1){
1283 int i;
1284 AVPacket pkt;
1285
1286 if(st->index_entries && st->nb_index_entries){
1287 ie= &st->index_entries[st->nb_index_entries-1];
1288 url_fseek(s->pb, ie->pos, SEEK_SET);
1289 av_update_cur_dts(s, st, ie->timestamp);
1290 }else
1291 url_fseek(s->pb, 0, SEEK_SET);
1292
1293 for(i=0;; i++) {
1294 int ret = av_read_frame(s, &pkt);
1295 if(ret<0)
1296 break;
1297 av_free_packet(&pkt);
1298 if(stream_index == pkt.stream_index){
1299 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1300 break;
1301 }
1302 }
1303 index = av_index_search_timestamp(st, timestamp, flags);
1304 }
1305 if (index < 0)
1306 return -1;
1307
1308 av_read_frame_flush(s);
1309 if (s->iformat->read_seek){
1310 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1311 return 0;
1312 }
1313 ie = &st->index_entries[index];
1314 url_fseek(s->pb, ie->pos, SEEK_SET);
1315
1316 av_update_cur_dts(s, st, ie->timestamp);
1317
1318 return 0;
1319 }
1320
1321 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1322 {
1323 int ret;
1324 AVStream *st;
1325
1326 av_read_frame_flush(s);
1327
1328 if(flags & AVSEEK_FLAG_BYTE)
1329 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1330
1331 if(stream_index < 0){
1332 stream_index= av_find_default_stream_index(s);
1333 if(stream_index < 0)
1334 return -1;
1335
1336 st= s->streams[stream_index];
1337 /* timestamp for default must be expressed in AV_TIME_BASE units */
1338 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1339 }
1340 st= s->streams[stream_index];
1341
1342 /* first, we try the format specific seek */
1343 if (s->iformat->read_seek)
1344 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1345 else
1346 ret = -1;
1347 if (ret >= 0) {
1348 return 0;
1349 }
1350
1351 if(s->iformat->read_timestamp)
1352 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1353 else
1354 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1355 }
1356
1357 /*******************************************************/
1358
1359 /**
1360 * Returns TRUE if the stream has accurate duration in any stream.
1361 *
1362 * @return TRUE if the stream has accurate duration for at least one component.
1363 */
1364 static int av_has_duration(AVFormatContext *ic)
1365 {
1366 int i;
1367 AVStream *st;
1368
1369 for(i = 0;i < ic->nb_streams; i++) {
1370 st = ic->streams[i];
1371 if (st->duration != AV_NOPTS_VALUE)
1372 return 1;
1373 }
1374 return 0;
1375 }
1376
1377 /**
1378 * Estimate the stream timings from the one of each components.
1379 *
1380 * Also computes the global bitrate if possible.
1381 */
1382 static void av_update_stream_timings(AVFormatContext *ic)
1383 {
1384 int64_t start_time, start_time1, end_time, end_time1;
1385 int64_t duration, duration1;
1386 int i;
1387 AVStream *st;
1388
1389 start_time = INT64_MAX;
1390 end_time = INT64_MIN;
1391 duration = INT64_MIN;
1392 for(i = 0;i < ic->nb_streams; i++) {
1393 st = ic->streams[i];
1394 if (st->start_time != AV_NOPTS_VALUE) {
1395 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1396 if (start_time1 < start_time)
1397 start_time = start_time1;
1398 if (st->duration != AV_NOPTS_VALUE) {
1399 end_time1 = start_time1
1400 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1401 if (end_time1 > end_time)
1402 end_time = end_time1;
1403 }
1404 }
1405 if (st->duration != AV_NOPTS_VALUE) {
1406 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1407 if (duration1 > duration)
1408 duration = duration1;
1409 }
1410 }
1411 if (start_time != INT64_MAX) {
1412 ic->start_time = start_time;
1413 if (end_time != INT64_MIN) {
1414 if (end_time - start_time > duration)
1415 duration = end_time - start_time;
1416 }
1417 }
1418 if (duration != INT64_MIN) {
1419 ic->duration = duration;
1420 if (ic->file_size > 0) {
1421 /* compute the bit rate */
1422 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1423 (double)ic->duration;
1424 }
1425 }
1426 }
1427
1428 static void fill_all_stream_timings(AVFormatContext *ic)
1429 {
1430 int i;
1431 AVStream *st;
1432
1433 av_update_stream_timings(ic);
1434 for(i = 0;i < ic->nb_streams; i++) {
1435 st = ic->streams[i];
1436 if (st->start_time == AV_NOPTS_VALUE) {
1437 if(ic->start_time != AV_NOPTS_VALUE)
1438 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1439 if(ic->duration != AV_NOPTS_VALUE)
1440 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1441 }
1442 }
1443 }
1444
1445 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1446 {
1447 int64_t filesize, duration;
1448 int bit_rate, i;
1449 AVStream *st;
1450
1451 /* if bit_rate is already set, we believe it */
1452 if (ic->bit_rate == 0) {
1453 bit_rate = 0;
1454 for(i=0;i<ic->nb_streams;i++) {
1455 st = ic->streams[i];
1456 bit_rate += st->codec->bit_rate;
1457 }
1458 ic->bit_rate = bit_rate;
1459 }
1460
1461 /* if duration is already set, we believe it */
1462 if (ic->duration == AV_NOPTS_VALUE &&
1463 ic->bit_rate != 0 &&
1464 ic->file_size != 0) {
1465 filesize = ic->file_size;
1466 if (filesize > 0) {
1467 for(i = 0; i < ic->nb_streams; i++) {
1468 st = ic->streams[i];
1469 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1470 if (st->duration == AV_NOPTS_VALUE)
1471 st->duration = duration;
1472 }
1473 }
1474 }
1475 }
1476
1477 #define DURATION_MAX_READ_SIZE 250000
1478
1479 /* only usable for MPEG-PS streams */
1480 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1481 {
1482 AVPacket pkt1, *pkt = &pkt1;
1483 AVStream *st;
1484 int read_size, i, ret;
1485 int64_t end_time;
1486 int64_t filesize, offset, duration;
1487
1488 /* free previous packet */
1489 if (ic->cur_st && ic->cur_st->parser)
1490 av_free_packet(&ic->cur_pkt);
1491 ic->cur_st = NULL;
1492
1493 /* flush packet queue */
1494 flush_packet_queue(ic);
1495
1496 for(i=0;i<ic->nb_streams;i++) {
1497 st = ic->streams[i];
1498 if (st->parser) {
1499 av_parser_close(st->parser);
1500 st->parser= NULL;
1501 }
1502 }
1503
1504 /* we read the first packets to get the first PTS (not fully
1505 accurate, but it is enough now) */
1506 url_fseek(ic->pb, 0, SEEK_SET);
1507 read_size = 0;
1508 for(;;) {
1509 if (read_size >= DURATION_MAX_READ_SIZE)
1510 break;
1511 /* if all info is available, we can stop */
1512 for(i = 0;i < ic->nb_streams; i++) {
1513 st = ic->streams[i];
1514 if (st->start_time == AV_NOPTS_VALUE)
1515 break;
1516 }
1517 if (i == ic->nb_streams)
1518 break;
1519
1520 ret = av_read_packet(ic, pkt);
1521 if (ret != 0)
1522 break;
1523 read_size += pkt->size;
1524 st = ic->streams[pkt->stream_index];
1525 if (pkt->pts != AV_NOPTS_VALUE) {
1526 if (st->start_time == AV_NOPTS_VALUE)
1527 st->start_time = pkt->pts;
1528 }
1529 av_free_packet(pkt);
1530 }
1531
1532 /* estimate the end time (duration) */
1533 /* XXX: may need to support wrapping */
1534 filesize = ic->file_size;
1535 offset = filesize - DURATION_MAX_READ_SIZE;
1536 if (offset < 0)
1537 offset = 0;
1538
1539 url_fseek(ic->pb, offset, SEEK_SET);
1540 read_size = 0;
1541 for(;;) {
1542 if (read_size >= DURATION_MAX_READ_SIZE)
1543 break;
1544
1545 ret = av_read_packet(ic, pkt);
1546 if (ret != 0)
1547 break;
1548 read_size += pkt->size;
1549 st = ic->streams[pkt->stream_index];
1550 if (pkt->pts != AV_NOPTS_VALUE &&
1551 st->start_time != AV_NOPTS_VALUE) {
1552 end_time = pkt->pts;
1553 duration = end_time - st->start_time;
1554 if (duration > 0) {
1555 if (st->duration == AV_NOPTS_VALUE ||
1556 st->duration < duration)
1557 st->duration = duration;
1558 }
1559 }
1560 av_free_packet(pkt);
1561 }
1562
1563 fill_all_stream_timings(ic);
1564
1565 url_fseek(ic->pb, old_offset, SEEK_SET);
1566 for(i=0; i<ic->nb_streams; i++){
1567 st= ic->streams[i];
1568 st->cur_dts= st->first_dts;
1569 st->last_IP_pts = AV_NOPTS_VALUE;
1570 }
1571 }
1572
1573 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1574 {
1575 int64_t file_size;
1576
1577 /* get the file size, if possible */
1578 if (ic->iformat->flags & AVFMT_NOFILE) {
1579 file_size = 0;
1580 } else {
1581 file_size = url_fsize(ic->pb);
1582 if (file_size < 0)
1583 file_size = 0;
1584 }
1585 ic->file_size = file_size;
1586
1587 if ((!strcmp(ic->iformat->name, "mpeg") ||
1588 !strcmp(ic->iformat->name, "mpegts")) &&
1589 file_size && !ic->pb->is_streamed) {
1590 /* get accurate estimate from the PTSes */
1591 av_estimate_timings_from_pts(ic, old_offset);
1592 } else if (av_has_duration(ic)) {
1593 /* at least one components has timings - we use them for all
1594 the components */
1595 fill_all_stream_timings(ic);
1596 } else {
1597 /* less precise: use bit rate info */
1598 av_estimate_timings_from_bit_rate(ic);
1599 }
1600 av_update_stream_timings(ic);
1601
1602 #if 0
1603 {
1604 int i;
1605 AVStream *st;
1606 for(i = 0;i < ic->nb_streams; i++) {
1607 st = ic->streams[i];
1608 printf("%d: start_time: %0.3f duration: %0.3f\n",
1609 i, (double)st->start_time / AV_TIME_BASE,
1610 (double)st->duration / AV_TIME_BASE);
1611 }
1612 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1613 (double)ic->start_time / AV_TIME_BASE,
1614 (double)ic->duration / AV_TIME_BASE,
1615 ic->bit_rate / 1000);
1616 }
1617 #endif
1618 }
1619
1620 static int has_codec_parameters(AVCodecContext *enc)
1621 {
1622 int val;
1623 switch(enc->codec_type) {
1624 case CODEC_TYPE_AUDIO:
1625 val = enc->sample_rate;
1626 break;
1627 case CODEC_TYPE_VIDEO:
1628 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1629 break;
1630 default:
1631 val = 1;
1632 break;
1633 }
1634 return (enc->codec_id != CODEC_ID_NONE && val != 0);
1635 }
1636
1637 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1638 {
1639 int16_t *samples;
1640 AVCodec *codec;
1641 int got_picture, data_size, ret=0;
1642 AVFrame picture;
1643
1644 if(!st->codec->codec){
1645 codec = avcodec_find_decoder(st->codec->codec_id);
1646 if (!codec)
1647 return -1;
1648 ret = avcodec_open(st->codec, codec);
1649 if (ret < 0)
1650 return ret;
1651 }
1652
1653 if(!has_codec_parameters(st->codec)){
1654 switch(st->codec->codec_type) {
1655 case CODEC_TYPE_VIDEO:
1656 ret = avcodec_decode_video(st->codec, &picture,
1657 &got_picture, (uint8_t *)data, size);
1658 break;
1659 case CODEC_TYPE_AUDIO:
1660 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1661 samples = av_malloc(data_size);
1662 if (!samples)
1663 goto fail;
1664 ret = avcodec_decode_audio2(st->codec, samples,
1665 &data_size, (uint8_t *)data, size);
1666 av_free(samples);
1667 break;
1668 default:
1669 break;
1670 }
1671 }
1672 fail:
1673 return ret;
1674 }
1675
1676 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
1677 {
1678 AVInputFormat *fmt;
1679 fmt = av_probe_input_format2(pd, 1, &score);
1680
1681 if (fmt) {
1682 if (strncmp(fmt->name, "mp3", 3) == 0)
1683 st->codec->codec_id = CODEC_ID_MP3;
1684 else if (strncmp(fmt->name, "ac3", 3) == 0)
1685 st->codec->codec_id = CODEC_ID_AC3;
1686 }
1687 return !!fmt;
1688 }
1689
1690 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1691 {
1692 while (tags->id != CODEC_ID_NONE) {
1693 if (tags->id == id)
1694 return tags->tag;
1695 tags++;
1696 }
1697 return 0;
1698 }
1699
1700 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1701 {
1702 int i;
1703 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1704 if(tag == tags[i].tag)
1705 return tags[i].id;
1706 }
1707 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1708 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1709 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1710 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1711 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1712 return tags[i].id;
1713 }
1714 return CODEC_ID_NONE;
1715 }
1716
1717 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1718 {
1719 int i;
1720 for(i=0; tags && tags[i]; i++){
1721 int tag= codec_get_tag(tags[i], id);
1722 if(tag) return tag;
1723 }
1724 return 0;
1725 }
1726
1727 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1728 {
1729 int i;
1730 for(i=0; tags && tags[i]; i++){
1731 enum CodecID id= codec_get_id(tags[i], tag);
1732 if(id!=CODEC_ID_NONE) return id;
1733 }
1734 return CODEC_ID_NONE;
1735 }
1736
1737 /* absolute maximum size we read until we abort */
1738 #define MAX_READ_SIZE 5000000
1739
1740 #define MAX_STD_TIMEBASES (60*12+5)
1741 static int get_std_framerate(int i){
1742 if(i<60*12) return i*1001;
1743 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1744 }
1745
1746 int av_find_stream_info(AVFormatContext *ic)
1747 {
1748 int i, count, ret, read_size, j;
1749 AVStream *st;
1750 AVPacket pkt1, *pkt;
1751 int64_t last_dts[MAX_STREAMS];
1752 int duration_count[MAX_STREAMS]={0};
1753 double (*duration_error)[MAX_STD_TIMEBASES];
1754 offset_t old_offset = url_ftell(ic->pb);
1755 int64_t codec_info_duration[MAX_STREAMS]={0};
1756 int codec_info_nb_frames[MAX_STREAMS]={0};
1757 AVProbeData probe_data[MAX_STREAMS];
1758 int codec_identified[MAX_STREAMS]={0};
1759
1760 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1761 if (!duration_error) return AVERROR(ENOMEM);
1762
1763 for(i=0;i<ic->nb_streams;i++) {
1764 st = ic->streams[i];
1765 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1766 /* if(!st->time_base.num)
1767 st->time_base= */
1768 if(!st->codec->time_base.num)
1769 st->codec->time_base= st->time_base;
1770 }
1771 //only for the split stuff
1772 if (!st->parser) {
1773 st->parser = av_parser_init(st->codec->codec_id);
1774 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1775 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1776 }
1777 }
1778 }
1779
1780 for(i=0;i<MAX_STREAMS;i++){
1781 last_dts[i]= AV_NOPTS_VALUE;
1782 }
1783
1784 memset(probe_data, 0, sizeof(probe_data));
1785 count = 0;
1786 read_size = 0;
1787 for(;;) {
1788 /* check if one codec still needs to be handled */
1789 for(i=0;i<ic->nb_streams;i++) {
1790 st = ic->streams[i];
1791 if (!has_codec_parameters(st->codec))
1792 break;
1793 /* variable fps and no guess at the real fps */
1794 if( (st->codec->time_base.den >= 101LL*st->codec->time_base.num || st->codec->codec_id == CODEC_ID_MPEG2VIDEO)
1795 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1796 break;
1797 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1798 break;
1799 if(st->first_dts == AV_NOPTS_VALUE)
1800 break;
1801 }
1802 if (i == ic->nb_streams) {
1803 /* NOTE: if the format has no header, then we need to read
1804 some packets to get most of the streams, so we cannot
1805 stop here */
1806 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1807 /* if we found the info for all the codecs, we can stop */
1808 ret = count;
1809 break;
1810 }
1811 }
1812 /* we did not get all the codec info, but we read too much data */
1813 if (read_size >= MAX_READ_SIZE) {
1814 ret = count;
1815 break;
1816 }
1817
1818 /* NOTE: a new stream can be added there if no header in file
1819 (AVFMTCTX_NOHEADER) */
1820 ret = av_read_frame_internal(ic, &pkt1);
1821 if (ret < 0) {
1822 /* EOF or error */
1823 ret = -1; /* we could not have all the codec parameters before EOF */
1824 for(i=0;i<ic->nb_streams;i++) {
1825 st = ic->streams[i];
1826 if (!has_codec_parameters(st->codec)){
1827 char buf[256];
1828 avcodec_string(buf, sizeof(buf), st->codec, 0);
1829 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1830 } else {
1831 ret = 0;
1832 }
1833 }
1834 break;
1835 }
1836
1837 pkt= add_to_pktbuf(ic, &pkt1);
1838 if(av_dup_packet(pkt) < 0)
1839 return AVERROR(ENOMEM);
1840
1841 read_size += pkt->size;
1842
1843 st = ic->streams[pkt->stream_index];
1844 if(codec_info_nb_frames[st->index]>1)
1845 codec_info_duration[st->index] += pkt->duration;
1846 if (pkt->duration != 0)
1847 codec_info_nb_frames[st->index]++;
1848
1849 {
1850 int index= pkt->stream_index;
1851 int64_t last= last_dts[index];
1852 int64_t duration= pkt->dts - last;
1853
1854 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1855 double dur= duration * av_q2d(st->time_base);
1856
1857 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1858 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1859 if(duration_count[index] < 2)
1860 memset(duration_error, 0, MAX_STREAMS * sizeof(*duration_error));
1861 for(i=1; i<MAX_STD_TIMEBASES; i++){
1862 int framerate= get_std_framerate(i);
1863 int ticks= lrintf(dur*framerate/(1001*12));
1864 double error= dur - ticks*1001*12/(double)framerate;
1865 duration_error[index][i] += error*error;
1866 }
1867 duration_count[index]++;
1868 }
1869 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1870 last_dts[pkt->stream_index]= pkt->dts;
1871
1872 if (st->codec->codec_id == CODEC_ID_NONE) {
1873 AVProbeData *pd = &(probe_data[st->index]);
1874 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
1875 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
1876 pd->buf_size += pkt->size;
1877 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
1878 }
1879 }
1880 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1881 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1882 if(i){
1883 st->codec->extradata_size= i;
1884 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1885 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1886 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1887 }
1888 }
1889
1890 /* if still no information, we try to open the codec and to
1891 decompress the frame. We try to avoid that in most cases as
1892 it takes longer and uses more memory. For MPEG4, we need to
1893 decompress for Quicktime. */
1894 if (!has_codec_parameters(st->codec) /*&&
1895 (st->codec->codec_id == CODEC_ID_FLV1 ||
1896 st->codec->codec_id == CODEC_ID_H264 ||
1897 st->codec->codec_id == CODEC_ID_H263 ||
1898 st->codec->codec_id == CODEC_ID_H261 ||
1899 st->codec->codec_id == CODEC_ID_VORBIS ||
1900 st->codec->codec_id == CODEC_ID_MJPEG ||
1901 st->codec->codec_id == CODEC_ID_PNG ||
1902 st->codec->codec_id == CODEC_ID_PAM ||
1903 st->codec->codec_id == CODEC_ID_PGM ||
1904 st->codec->codec_id == CODEC_ID_PGMYUV ||
1905 st->codec->codec_id == CODEC_ID_PBM ||
1906 st->codec->codec_id == CODEC_ID_PPM ||
1907 st->codec->codec_id == CODEC_ID_SHORTEN ||
1908 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1909 try_decode_frame(st, pkt->data, pkt->size);
1910
1911 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1912 break;
1913 }
1914 count++;
1915 }
1916
1917 // close codecs which where opened in try_decode_frame()
1918 for(i=0;i<ic->nb_streams;i++) {
1919 st = ic->streams[i];
1920 if(st->codec->codec)
1921 avcodec_close(st->codec);
1922 }
1923 for(i=0;i<ic->nb_streams;i++) {
1924 st = ic->streams[i];
1925 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1926 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1927 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1928
1929 if(duration_count[i]
1930 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&&
1931 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1932 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
1933 double best_error= 2*av_q2d(st->time_base);
1934 best_error= best_error*best_error*duration_count[i]*1000*12*30;
1935
1936 for(j=1; j<MAX_STD_TIMEBASES; j++){
1937 double error= duration_error[i][j] * get_std_framerate(j);
1938 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1939 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
1940 if(error < best_error){
1941 best_error= error;
1942 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
1943 }
1944 }
1945 }
1946
1947 if (!st->r_frame_rate.num){
1948 if( st->codec->time_base.den * (int64_t)st->time_base.num
1949 <= st->codec->time_base.num * (int64_t)st->time_base.den){
1950 st->r_frame_rate.num = st->codec->time_base.den;
1951 st->r_frame_rate.den = st->codec->time_base.num;
1952 }else{
1953 st->r_frame_rate.num = st->time_base.den;
1954 st->r_frame_rate.den = st->time_base.num;
1955 }
1956 }
1957 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
1958 if (st->codec->codec_id == CODEC_ID_NONE && probe_data[st->index].buf_size > 0) {
1959 codec_identified[st->index] = set_codec_from_probe_data(st, &(probe_data[st->index]), 1);
1960 if (codec_identified[st->index]) {
1961 st->need_parsing = AVSTREAM_PARSE_FULL;
1962 }
1963 }
1964 if(!st->codec->bits_per_sample)
1965 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
1966 }
1967 }
1968
1969 av_estimate_timings(ic, old_offset);
1970
1971 for(i=0;i<ic->nb_streams;i++) {
1972 st = ic->streams[i];
1973 if (codec_identified[st->index])
1974 break;
1975 }
1976 //FIXME this is a mess
1977 if(i!=ic->nb_streams){
1978 av_read_frame_flush(ic);
1979 for(i=0;i<ic->nb_streams;i++) {
1980 st = ic->streams[i];
1981 if (codec_identified[st->index]) {
1982 av_seek_frame(ic, st->index, 0.0, 0);
1983 }
1984 st->cur_dts= st->first_dts;
1985 }
1986 url_fseek(ic->pb, ic->data_offset, SEEK_SET);
1987 }
1988
1989 #if 0
1990 /* correct DTS for b frame streams with no timestamps */
1991 for(i=0;i<ic->nb_streams;i++) {
1992 st = ic->streams[i];
1993 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1994 if(b-frames){
1995 ppktl = &ic->packet_buffer;
1996 while(ppkt1){
1997 if(ppkt1->stream_index != i)
1998 continue;
1999 if(ppkt1->pkt->dts < 0)
2000 break;
2001 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2002 break;
2003 ppkt1->pkt->dts -= delta;
2004 ppkt1= ppkt1->next;
2005 }
2006 if(ppkt1)
2007 continue;
2008 st->cur_dts -= delta;
2009 }
2010 }
2011 }
2012 #endif
2013
2014 av_free(duration_error);
2015 for(i=0;i<MAX_STREAMS;i++){
2016 av_freep(&(probe_data[i].buf));
2017 }
2018
2019 return ret;
2020 }
2021
2022 /*******************************************************/
2023
2024 int av_read_play(AVFormatContext *s)
2025 {
2026 if (s->iformat->read_play)
2027 return s->iformat->read_play(s);
2028 if (s->pb && s->pb->read_play)
2029 return av_url_read_fplay(s->pb);
2030 return AVERROR(ENOSYS);
2031 }
2032
2033 int av_read_pause(AVFormatContext *s)
2034 {
2035 if (s->iformat->read_pause)
2036 return s->iformat->read_pause(s);
2037 if (s->pb && s->pb->read_pause)
2038 return av_url_read_fpause(s->pb);
2039 return AVERROR(ENOSYS);
2040 }
2041
2042 void av_close_input_file(AVFormatContext *s)
2043 {
2044 int i, must_open_file;
2045 AVStream *st;
2046
2047 /* free previous packet */
2048 if (s->cur_st && s->cur_st->parser)
2049 av_free_packet(&s->cur_pkt);
2050
2051 if (s->iformat->read_close)
2052 s->iformat->read_close(s);
2053 for(i=0;i<s->nb_streams;i++) {
2054 /* free all data in a stream component */
2055 st = s->streams[i];
2056 if (st->parser) {
2057 av_parser_close(st->parser);
2058 }
2059 av_free(st->index_entries);
2060 av_free(st->codec->extradata);
2061 av_free(st->codec);
2062 av_free(st);
2063 }
2064 for(i=s->nb_programs-1; i>=0; i--) {
2065 av_freep(&s->programs[i]->provider_name);
2066 av_freep(&s->programs[i]->name);
2067 av_freep(&s->programs[i]->stream_index);
2068 av_freep(&s->programs[i]);
2069 }
2070 flush_packet_queue(s);
2071 must_open_file = 1;
2072 if (s->iformat->flags & AVFMT_NOFILE) {
2073 must_open_file = 0;
2074 }
2075 if (must_open_file) {
2076 url_fclose(s->pb);
2077 }
2078 av_freep(&s->priv_data);
2079 av_free(s);
2080 }
2081
2082 AVStream *av_new_stream(AVFormatContext *s, int id)
2083 {
2084 AVStream *st;
2085 int i;
2086
2087 if (s->nb_streams >= MAX_STREAMS)
2088 return NULL;
2089
2090 st = av_mallocz(sizeof(AVStream));
2091 if (!st)
2092 return NULL;
2093
2094 st->codec= avcodec_alloc_context();
2095 if (s->iformat) {
2096 /* no default bitrate if decoding */
2097 st->codec->bit_rate = 0;
2098 }
2099 st->index = s->nb_streams;
2100 st->id = id;
2101 st->start_time = AV_NOPTS_VALUE;
2102 st->duration = AV_NOPTS_VALUE;
2103 st->cur_dts = AV_NOPTS_VALUE;
2104 st->first_dts = AV_NOPTS_VALUE;
2105
2106 /* default pts settings is MPEG like */
2107 av_set_pts_info(st, 33, 1, 90000);
2108 st->last_IP_pts = AV_NOPTS_VALUE;
2109 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2110 st->pts_buffer[i]= AV_NOPTS_VALUE;
2111
2112 s->streams[s->nb_streams++] = st;
2113 return st;
2114 }
2115
2116 AVProgram *av_new_program(AVFormatContext *ac, int id)
2117 {
2118 AVProgram *program=NULL;
2119 int i;
2120
2121 #ifdef DEBUG_SI
2122 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2123 #endif
2124
2125 for(i=0; i<ac->nb_programs; i++)
2126 if(ac->programs[i]->id == id)
2127 program = ac->programs[i];
2128
2129 if(!program){
2130 program = av_mallocz(sizeof(AVProgram));
2131 if (!program)
2132 return NULL;
2133 dynarray_add(&ac->programs, &ac->nb_programs, program);
2134 program->discard = AVDISCARD_NONE;
2135 }
2136 program->id = id;
2137
2138 return program;
2139 }
2140
2141 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2142 {
2143 assert(!provider_name == !name);
2144 if(name){
2145 av_free(program->provider_name);
2146 av_free(program-> name);
2147 program->provider_name = av_strdup(provider_name);
2148 program-> name = av_strdup( name);
2149 }
2150 }
2151
2152
2153 /************************************************************/
2154 /* output media file */
2155
2156 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2157 {
2158 int ret;
2159
2160 if (s->oformat->priv_data_size > 0) {
2161 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2162 if (!s->priv_data)
2163 return AVERROR(ENOMEM);
2164 } else
2165 s->priv_data = NULL;
2166
2167 if (s->oformat->set_parameters) {
2168 ret = s->oformat->set_parameters(s, ap);
2169 if (ret < 0)
2170 return ret;
2171 }
2172 return 0;
2173 }
2174
2175 int av_write_header(AVFormatContext *s)
2176 {
2177 int ret, i;
2178 AVStream *st;
2179
2180 // some sanity checks
2181 for(i=0;i<s->nb_streams;i++) {
2182 st = s->streams[i];
2183
2184 switch (st->codec->codec_type) {
2185 case CODEC_TYPE_AUDIO:
2186 if(st->codec->sample_rate<=0){
2187 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2188 return -1;
2189 }
2190 break;
2191 case CODEC_TYPE_VIDEO:
2192 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2193 av_log(s, AV_LOG_ERROR, "time base not set\n");
2194 return -1;
2195 }
2196 if(st->codec->width<=0 || st->codec->height<=0){
2197 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2198 return -1;
2199 }
2200 break;
2201 }
2202
2203 if(s->oformat->codec_tag){
2204 if(st->codec->codec_tag){
2205 //FIXME
2206 //check that tag + id is in the table
2207 //if neither is in the table -> ok
2208 //if tag is in the table with another id -> FAIL
2209 //if id is in the table with another tag -> FAIL unless strict < ?
2210 }else
2211 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2212 }
2213 }
2214
2215 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2216 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2217 if (!s->priv_data)
2218 return AVERROR(ENOMEM);
2219 }
2220
2221 if(s->oformat->write_header){
2222 ret = s->oformat->write_header(s);
2223 if (ret < 0)
2224 return ret;
2225 }
2226
2227 /* init PTS generation */
2228 for(i=0;i<s->nb_streams;i++) {
2229 int64_t den = AV_NOPTS_VALUE;
2230 st = s->streams[i];
2231
2232 switch (st->codec->codec_type) {
2233 case CODEC_TYPE_AUDIO:
2234 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2235 break;
2236 case CODEC_TYPE_VIDEO:
2237 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2238 break;
2239 default:
2240 break;
2241 }
2242 if (den != AV_NOPTS_VALUE) {
2243 if (den <= 0)
2244 return AVERROR_INVALIDDATA;
2245 av_frac_init(&st->pts, 0, 0, den);
2246 }
2247 }
2248 return 0;
2249 }
2250
2251 //FIXME merge with compute_pkt_fields
2252 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2253 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2254 int num, den, frame_size, i;
2255
2256 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2257
2258 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2259 return -1;*/
2260
2261 /* duration field */
2262 if (pkt->duration == 0) {
2263 compute_frame_duration(&num, &den, st, NULL, pkt);
2264 if (den && num) {
2265 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2266 }
2267 }
2268
2269 //XXX/FIXME this is a temporary hack until all encoders output pts
2270 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2271 pkt->dts=
2272 // pkt->pts= st->cur_dts;
2273 pkt->pts= st->pts.val;
2274 }
2275
2276 //calculate dts from pts
2277 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2278 st->pts_buffer[0]= pkt->pts;
2279 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2280 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2281 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2282 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2283
2284 pkt->dts= st->pts_buffer[0];
2285 }
2286
2287 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2288 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2289 return -1;
2290 }
2291 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2292 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2293 return -1;
2294 }
2295
2296 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2297 st->cur_dts= pkt->dts;
2298 st->pts.val= pkt->dts;
2299
2300 /* update pts */
2301 switch (st->codec->codec_type) {
2302 case CODEC_TYPE_AUDIO:
2303 frame_size = get_audio_frame_size(st->codec, pkt->size);
2304
2305 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2306 but it would be better if we had the real timestamps from the encoder */
2307 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2308 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2309 }
2310 break;
2311 case CODEC_TYPE_VIDEO:
2312 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2313 break;
2314 default:
2315 break;
2316 }
2317 return 0;
2318 }
2319
2320 static void truncate_ts(AVStream *st, AVPacket *pkt){
2321 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2322
2323 // if(pkt->dts < 0)
2324 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2325
2326 if (pkt->pts != AV_NOPTS_VALUE)
2327 pkt->pts &= pts_mask;
2328 if (pkt->dts != AV_NOPTS_VALUE)
2329 pkt->dts &= pts_mask;
2330 }
2331
2332 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2333 {
2334 int ret;
2335
2336 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2337 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2338 return ret;
2339
2340 truncate_ts(s->streams[pkt->stream_index], pkt);
2341
2342 ret= s->oformat->write_packet(s, pkt);
2343 if(!ret)
2344 ret= url_ferror(s->pb);
2345 return ret;
2346 }
2347
2348 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2349 AVPacketList *pktl, **next_point, *this_pktl;
2350 int stream_count=0;
2351 int streams[MAX_STREAMS];
2352
2353 if(pkt){
2354 AVStream *st= s->streams[ pkt->stream_index];
2355
2356 // assert(pkt->destruct != av_destruct_packet); //FIXME
2357
2358 this_pktl = av_mallocz(sizeof(AVPacketList));
2359 this_pktl->pkt= *pkt;
2360 if(pkt->destruct == av_destruct_packet)
2361 pkt->destruct= NULL; // non shared -> must keep original from being freed
2362 else
2363 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2364
2365 next_point = &s->packet_buffer;
2366 while(*next_point){
2367 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2368 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2369 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2370 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2371 break;
2372 next_point= &(*next_point)->next;
2373 }
2374 this_pktl->next= *next_point;
2375 *next_point= this_pktl;
2376 }
2377
2378 memset(streams, 0, sizeof(streams));
2379 pktl= s->packet_buffer;
2380 while(pktl){
2381 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2382 if(streams[ pktl->pkt.stream_index ] == 0)
2383 stream_count++;
2384 streams[ pktl->pkt.stream_index ]++;
2385 pktl= pktl->next;
2386 }
2387
2388 if(s->nb_streams == stream_count || (flush && stream_count)){
2389 pktl= s->packet_buffer;
2390 *out= pktl->pkt;
2391
2392 s->packet_buffer= pktl->next;
2393 av_freep(&pktl);
2394 return 1;
2395 }else{
2396 av_init_packet(out);
2397 return 0;
2398 }
2399 }
2400
2401 /**
2402 * Interleaves a AVPacket correctly so it can be muxed.
2403 * @param out the interleaved packet will be output here
2404 * @param in the input packet
2405 * @param flush 1 if no further packets are available as input and all
2406 * remaining packets should be output
2407 * @return 1 if a packet was output, 0 if no packet could be output,
2408 * < 0 if an error occured
2409 */
2410 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2411 if(s->oformat->interleave_packet)
2412 return s->oformat->interleave_packet(s, out, in, flush);
2413 else
2414 return av_interleave_packet_per_dts(s, out, in, flush);
2415 }
2416
2417 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2418 AVStream *st= s->streams[ pkt->stream_index];
2419
2420 //FIXME/XXX/HACK drop zero sized packets
2421 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2422 return 0;
2423
2424 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2425 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2426 return -1;
2427
2428 if(pkt->dts == AV_NOPTS_VALUE)
2429 return -1;
2430
2431 for(;;){
2432 AVPacket opkt;
2433 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2434 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2435 return ret;
2436
2437 truncate_ts(s->streams[opkt.stream_index], &opkt);
2438 ret= s->oformat->write_packet(s, &opkt);
2439
2440 av_free_packet(&opkt);
2441 pkt= NULL;
2442
2443 if(ret<0)
2444 return ret;
2445 if(url_ferror(s->pb))
2446 return url_ferror(s->pb);
2447 }
2448 }
2449
2450 int av_write_trailer(AVFormatContext *s)
2451 {
2452 int ret, i;
2453
2454 for(;;){
2455 AVPacket pkt;
2456 ret= av_interleave_packet(s, &pkt, NULL, 1);
2457 if(ret<0) //FIXME cleanup needed for ret<0 ?
2458 goto fail;
2459 if(!ret)
2460 break;
2461
2462 truncate_ts(s->streams[pkt.stream_index], &pkt);
2463 ret= s->oformat->write_packet(s, &pkt);
2464
2465 av_free_packet(&pkt);
2466
2467 if(ret<0)
2468 goto fail;
2469 if(url_ferror(s->pb))
2470 goto fail;
2471 }
2472
2473 if(s->oformat->write_trailer)
2474 ret = s->oformat->write_trailer(s);
2475 fail:
2476 if(ret == 0)
2477 ret=url_ferror(s->pb);
2478 for(i=0;i<s->nb_streams;i++)
2479 av_freep(&s->streams[i]->priv_data);
2480 av_freep(&s->priv_data);
2481 return ret;
2482 }
2483
2484 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2485 {
2486 int i, j;
2487 AVProgram *program=NULL;
2488 void *tmp;
2489
2490 for(i=0; i<ac->nb_programs; i++){
2491 if(ac->programs[i]->id != progid)
2492 continue;
2493 program = ac->programs[i];
2494 for(j=0; j<program->nb_stream_indexes; j++)
2495 if(program->stream_index[j] == idx)
2496 return;
2497
2498 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2499 if(!tmp)
2500 return;
2501 program->stream_index = tmp;
2502 program->stream_index[program->nb_stream_indexes++] = idx;
2503 return;
2504 }
2505 }
2506
2507 /* "user interface" functions */
2508 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2509 {
2510 char buf[256];
2511 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2512 AVStream *st = ic->streams[i];
2513 int g = ff_gcd(st->time_base.num, st->time_base.den);
2514 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2515 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2516 /* the pid is an important information, so we display it */
2517 /* XXX: add a generic system */
2518 if (flags & AVFMT_SHOW_IDS)
2519 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2520 if (strlen(st->language) > 0)
2521 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2522 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2523 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2524 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2525 if(st->r_frame_rate.den && st->r_frame_rate.num)
2526 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2527 /* else if(st->time_base.den && st->time_base.num)
2528 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2529 else
2530 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2531 }
2532 av_log(NULL, AV_LOG_INFO, "\n");
2533 }
2534
2535 void dump_format(AVFormatContext *ic,
2536 int index,
2537 const char *url,
2538 int is_output)
2539 {
2540 int i;
2541
2542 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2543 is_output ? "Output" : "Input",
2544 index,
2545 is_output ? ic->oformat->name : ic->iformat->name,
2546 is_output ? "to" : "from", url);
2547 if (!is_output) {
2548 av_log(NULL, AV_LOG_INFO, " Duration: ");
2549 if (ic->duration != AV_NOPTS_VALUE) {
2550 int hours, mins, secs, us;
2551 secs = ic->duration / AV_TIME_BASE;
2552 us = ic->duration % AV_TIME_BASE;
2553 mins = secs / 60;
2554 secs %= 60;
2555 hours = mins / 60;
2556 mins %= 60;
2557 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2558 (10 * us) / AV_TIME_BASE);
2559 } else {
2560 av_log(NULL, AV_LOG_INFO, "N/A");
2561 }
2562 if (ic->start_time != AV_NOPTS_VALUE) {
2563 int secs, us;
2564 av_log(NULL, AV_LOG_INFO, ", start: ");
2565 secs = ic->start_time / AV_TIME_BASE;
2566 us = ic->start_time % AV_TIME_BASE;
2567 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2568 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2569 }
2570 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2571 if (ic->bit_rate) {
2572 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2573 } else {
2574 av_log(NULL, AV_LOG_INFO, "N/A");
2575 }
2576 av_log(NULL, AV_LOG_INFO, "\n");
2577 }
2578 if(ic->nb_programs) {
2579 int j, k;
2580 for(j=0; j<ic->nb_programs; j++) {
2581 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2582 ic->programs[j]->name ? ic->programs[j]->name : "");
2583 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2584 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2585 }
2586 } else
2587 for(i=0;i<ic->nb_streams;i++)
2588 dump_stream_format(ic, i, index, is_output);
2589 }
2590
2591 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2592 {
2593 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2594 }
2595
2596 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2597 {
2598 AVRational frame_rate;
2599 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2600 *frame_rate_num= frame_rate.num;
2601 *frame_rate_den= frame_rate.den;
2602 return ret;
2603 }
2604
2605 /**
2606 * gets the current time in micro seconds.
2607 */
2608 int64_t av_gettime(void)
2609 {
2610 struct timeval tv;
2611 gettimeofday(&tv,NULL);
2612 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2613 }
2614
2615 int64_t parse_date(const char *datestr, int duration)
2616 {
2617 const char *p;
2618 int64_t t;
2619 struct tm dt;
2620 int i;
2621 static const char *date_fmt[] = {
2622 "%Y-%m-%d",
2623 "%Y%m%d",
2624 };
2625 static const char *time_fmt[] = {
2626 "%H:%M:%S",
2627 "%H%M%S",
2628 };
2629 const char *q;
2630 int is_utc, len;
2631 char lastch;
2632 int negative = 0;
2633
2634 #undef time
2635 time_t now = time(0);
2636
2637 len = strlen(datestr);
2638 if (len > 0)
2639 lastch = datestr[len - 1];
2640 else
2641 lastch = '\0';
2642 is_utc = (lastch == 'z' || lastch == 'Z');
2643
2644 memset(&dt, 0, sizeof(dt));
2645
2646 p = datestr;
2647 q = NULL;
2648 if (!duration) {
2649 /* parse the year-month-day part */
2650 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2651 q = small_strptime(p, date_fmt[i], &dt);
2652 if (q) {
2653 break;
2654 }
2655 }
2656
2657 /* if the year-month-day part is missing, then take the
2658 * current year-month-day time */
2659 if (!q) {
2660 if (is_utc) {
2661 dt = *gmtime(&now);
2662 } else {
2663 dt = *localtime(&now);
2664 }
2665 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2666 } else {
2667 p = q;
2668 }
2669
2670 if (*p == 'T' || *p == 't' || *p == ' ')
2671 p++;
2672
2673 /* parse the hour-minute-second part */
2674 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2675 q = small_strptime(p, time_fmt[i], &dt);
2676 if (q) {
2677 break;
2678 }
2679 }
2680 } else {
2681 /* parse datestr as a duration */
2682 if (p[0] == '-') {
2683 negative = 1;
2684 ++p;
2685 }
2686 /* parse datestr as HH:MM:SS */
2687 q = small_strptime(p, time_fmt[0], &dt);
2688 if (!q) {
2689 /* parse datestr as S+ */
2690 dt.tm_sec = strtol(p, (char **)&q, 10);
2691 if (q == p)
2692 /* the parsing didn't succeed */
2693 return INT64_MIN;
2694 dt.tm_min = 0;
2695 dt.tm_hour = 0;
2696 }
2697 }
2698
2699 /* Now we have all the fields that we can get */
2700 if (!q) {
2701 return INT64_MIN;
2702 }
2703
2704 if (duration) {
2705 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2706 } else {
2707 dt.tm_isdst = -1; /* unknown */
2708 if (is_utc) {
2709 t = mktimegm(&dt);
2710 } else {
2711 t = mktime(&dt);
2712 }
2713 }
2714
2715 t *= 1000000;
2716
2717 /* parse the .m... part */
2718 if (*q == '.') {
2719 int val, n;
2720 q++;
2721 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2722 if (!isdigit(*q))
2723 break;
2724 val += n * (*q - '0');
2725 }
2726 t += val;
2727 }
2728 return negative ? -t : t;
2729 }
2730
2731 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2732 {
2733 const char *p;
2734 char tag[128], *q;
2735
2736 p = info;
2737 if (*p == '?')
2738 p++;
2739 for(;;) {
2740 q = tag;
2741 while (*p != '\0' && *p != '=' && *p != '&') {
2742 if ((q - tag) < sizeof(tag) - 1)
2743 *q++ = *p;
2744 p++;
2745 }
2746 *q = '\0';
2747 q = arg;
2748 if (*p == '=') {
2749 p++;
2750 while (*p != '&' && *p != '\0') {
2751 if ((q - arg) < arg_size - 1) {
2752 if (*p == '+')
2753 *q++ = ' ';
2754 else
2755 *q++ = *p;
2756 }
2757 p++;
2758 }
2759 *q = '\0';
2760 }
2761 if (!strcmp(tag, tag1))
2762 return 1;
2763 if (*p != '&')
2764 break;
2765 p++;
2766 }
2767 return 0;
2768 }
2769
2770 int av_get_frame_filename(char *buf, int buf_size,
2771 const char *path, int number)
2772 {
2773 const char *p;
2774 char *q, buf1[20], c;
2775 int nd, len, percentd_found;
2776
2777 q = buf;
2778 p = path;
2779 percentd_found = 0;
2780 for(;;) {
2781 c = *p++;
2782 if (c == '\0')
2783 break;
2784 if (c == '%') {
2785 do {
2786 nd = 0;
2787 while (isdigit(*p)) {
2788 nd = nd * 10 + *p++ - '0';
2789 }
2790 c = *p++;
2791 } while (isdigit(c));
2792
2793 switch(c) {
2794 case '%':
2795 goto addchar;
2796 case 'd':
2797 if (percentd_found)
2798 goto fail;
2799 percentd_found = 1;
2800 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2801 len = strlen(buf1);
2802 if ((q - buf + len) > buf_size - 1)
2803 goto fail;
2804 memcpy(q, buf1, len);
2805 q += len;
2806 break;
2807 default:
2808 goto fail;
2809 }
2810 } else {
2811 addchar:
2812 if ((q - buf) < buf_size - 1)
2813 *q++ = c;
2814 }
2815 }
2816 if (!percentd_found)
2817 goto fail;
2818 *q = '\0';
2819 return 0;
2820 fail:
2821 *q = '\0';
2822 return -1;
2823 }
2824
2825 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
2826 {
2827 int len, i, j, c;
2828 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2829
2830 for(i=0;i<size;i+=16) {
2831 len = size - i;
2832 if (len > 16)
2833 len = 16;
2834 PRINT("%08x ", i);
2835 for(j=0;j<16;j++) {
2836 if (j < len)
2837 PRINT(" %02x", buf[i+j]);
2838 else
2839 PRINT(" ");
2840 }
2841 PRINT(" ");
2842 for(j=0;j<len;j++) {
2843 c = buf[i+j];
2844 if (c < ' ' || c > '~')
2845 c = '.';
2846 PRINT("%c", c);
2847 }
2848 PRINT("\n");
2849 }
2850 #undef PRINT
2851 }
2852
2853 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2854 {
2855 hex_dump_internal(NULL, f, 0, buf, size);
2856 }
2857
2858 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
2859 {
2860 hex_dump_internal(avcl, NULL, level, buf, size);
2861 }
2862
2863 //FIXME needs to know the time_base
2864 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
2865 {
2866 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2867 PRINT("stream #%d:\n", pkt->stream_index);
2868 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2869 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2870 /* DTS is _always_ valid after av_read_frame() */
2871 PRINT(" dts=");
2872 if (pkt->dts == AV_NOPTS_VALUE)
2873 PRINT("N/A");
2874 else
2875 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
2876 /* PTS may be not known if B frames are present */
2877 PRINT(" pts=");
2878 if (pkt->pts == AV_NOPTS_VALUE)
2879 PRINT("N/A");
2880 else
2881 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
2882 PRINT("\n");
2883 PRINT(" size=%d\n", pkt->size);
2884 #undef PRINT
2885 if (dump_payload)
2886 av_hex_dump(f, pkt->data, pkt->size);
2887 }
2888
2889 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2890 {
2891 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
2892 }
2893
2894 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
2895 {
2896 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
2897 }
2898
2899 void url_split(char *proto, int proto_size,
2900 char *authorization, int authorization_size,
2901 char *hostname, int hostname_size,
2902 int *port_ptr,
2903 char *path, int path_size,
2904 const char *url)
2905 {
2906 const char *p, *ls, *at, *col, *brk, *q;
2907
2908 if (port_ptr) *port_ptr = -1;
2909 if (proto_size > 0) proto[0] = 0;
2910 if (authorization_size > 0) authorization[0] = 0;
2911 if (hostname_size > 0) hostname[0] = 0;
2912 if (path_size > 0) path[0] = 0;
2913
2914 /* parse protocol */
2915 if ((p = strchr(url, ':'))) {
2916 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2917 p++; /* skip ':' */
2918 if (*p == '/') p++;
2919 if (*p == '/') p++;
2920 } else {
2921 /* no protocol means plain filename */
2922 av_strlcpy(path, url, path_size);
2923 return;
2924 }
2925
2926 /* separate path from hostname */
2927 if ((ls = strchr(p, '/'))) {
2928 if ((q = strchr(ls, '?')))
2929 av_strlcpy(path, ls, FFMIN(path_size, q - ls + 1));
2930 else
2931 av_strlcpy(path, ls, path_size);
2932 } else if (!(ls = strchr(p, '?')))
2933 ls = &p[strlen(p)]; // XXX
2934
2935 /* the rest is hostname, use that to parse auth/port */
2936 if (ls != p) {
2937 /* authorization (user[:pass]@hostname) */
2938 if ((at = strchr(p, '@')) && at < ls) {
2939 av_strlcpy(authorization, p,
2940 FFMIN(authorization_size, at + 1 - p));
2941 p = at + 1; /* skip '@' */
2942 }
2943
2944 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2945 /* [host]:port */
2946 av_strlcpy(hostname, p + 1,
2947 FFMIN(hostname_size, brk - p));
2948 if (brk[1] == ':' && port_ptr)
2949 *port_ptr = atoi(brk + 2);
2950 } else if ((col = strchr(p, ':')) && col < ls) {
2951 av_strlcpy(hostname, p,
2952 FFMIN(col + 1 - p, hostname_size));
2953 if (port_ptr) *port_ptr = atoi(col + 1);
2954 } else
2955 av_strlcpy(hostname, p,
2956 FFMIN(ls + 1 - p, hostname_size));
2957 }
2958 }
2959
2960 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2961 int pts_num, int pts_den)
2962 {
2963 s->pts_wrap_bits = pts_wrap_bits;
2964 s->time_base.num = pts_num;
2965 s->time_base.den = pts_den;
2966 }
2967
2968 /* fraction handling */
2969
2970 /**
2971 * f = val + (num / den) + 0.5.
2972 *
2973 * 'num' is normalized so that it is such as 0 <= num < den.
2974 *
2975 * @param f fractional number
2976 * @param val integer value
2977 * @param num must be >= 0
2978 * @param den must be >= 1
2979 */
2980 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2981 {
2982 num += (den >> 1);
2983 if (num >= den) {
2984 val += num / den;
2985 num = num % den;
2986 }
2987 f->val = val;
2988 f->num = num;
2989 f->den = den;
2990 }
2991
2992 /**
2993 * Fractionnal addition to f: f = f + (incr / f->den).
2994 *
2995 * @param f fractional number
2996 * @param incr increment, can be positive or negative
2997 */
2998 static void av_frac_add(AVFrac *f, int64_t incr)
2999 {
3000 int64_t num, den;
3001
3002 num = f->num + incr;
3003 den = f->den;
3004 if (num < 0) {
3005 f->val += num / den;
3006 num = num % den;
3007 if (num < 0) {
3008 num += den;
3009 f->val--;
3010 }
3011 } else if (num >= den) {
3012 f->val += num / den;
3013 num = num % den;
3014 }
3015 f->num = num;
3016 }