Nellymoser ASAO decoder
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "opt.h"
23 #include "avstring.h"
24 #include "riff.h"
25 #include <sys/time.h>
26 #include <time.h>
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 /**
32 * @file libavformat/utils.c
33 * Various utility functions for using ffmpeg library.
34 */
35
36 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
37 static void av_frac_add(AVFrac *f, int64_t incr);
38
39 /** head of registered input format linked list. */
40 AVInputFormat *first_iformat = NULL;
41 /** head of registered output format linked list. */
42 AVOutputFormat *first_oformat = NULL;
43
44 void av_register_input_format(AVInputFormat *format)
45 {
46 AVInputFormat **p;
47 p = &first_iformat;
48 while (*p != NULL) p = &(*p)->next;
49 *p = format;
50 format->next = NULL;
51 }
52
53 void av_register_output_format(AVOutputFormat *format)
54 {
55 AVOutputFormat **p;
56 p = &first_oformat;
57 while (*p != NULL) p = &(*p)->next;
58 *p = format;
59 format->next = NULL;
60 }
61
62 int match_ext(const char *filename, const char *extensions)
63 {
64 const char *ext, *p;
65 char ext1[32], *q;
66
67 if(!filename)
68 return 0;
69
70 ext = strrchr(filename, '.');
71 if (ext) {
72 ext++;
73 p = extensions;
74 for(;;) {
75 q = ext1;
76 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
77 *q++ = *p++;
78 *q = '\0';
79 if (!strcasecmp(ext1, ext))
80 return 1;
81 if (*p == '\0')
82 break;
83 p++;
84 }
85 }
86 return 0;
87 }
88
89 AVOutputFormat *guess_format(const char *short_name, const char *filename,
90 const char *mime_type)
91 {
92 AVOutputFormat *fmt, *fmt_found;
93 int score_max, score;
94
95 /* specific test for image sequences */
96 #ifdef CONFIG_IMAGE2_MUXER
97 if (!short_name && filename &&
98 av_filename_number_test(filename) &&
99 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
100 return guess_format("image2", NULL, NULL);
101 }
102 #endif
103 /* find the proper file type */
104 fmt_found = NULL;
105 score_max = 0;
106 fmt = first_oformat;
107 while (fmt != NULL) {
108 score = 0;
109 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
110 score += 100;
111 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
112 score += 10;
113 if (filename && fmt->extensions &&
114 match_ext(filename, fmt->extensions)) {
115 score += 5;
116 }
117 if (score > score_max) {
118 score_max = score;
119 fmt_found = fmt;
120 }
121 fmt = fmt->next;
122 }
123 return fmt_found;
124 }
125
126 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
127 const char *mime_type)
128 {
129 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
130
131 if (fmt) {
132 AVOutputFormat *stream_fmt;
133 char stream_format_name[64];
134
135 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
136 stream_fmt = guess_format(stream_format_name, NULL, NULL);
137
138 if (stream_fmt)
139 fmt = stream_fmt;
140 }
141
142 return fmt;
143 }
144
145 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
146 const char *filename, const char *mime_type, enum CodecType type){
147 if(type == CODEC_TYPE_VIDEO){
148 enum CodecID codec_id= CODEC_ID_NONE;
149
150 #ifdef CONFIG_IMAGE2_MUXER
151 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
152 codec_id= av_guess_image2_codec(filename);
153 }
154 #endif
155 if(codec_id == CODEC_ID_NONE)
156 codec_id= fmt->video_codec;
157 return codec_id;
158 }else if(type == CODEC_TYPE_AUDIO)
159 return fmt->audio_codec;
160 else
161 return CODEC_ID_NONE;
162 }
163
164 AVInputFormat *av_find_input_format(const char *short_name)
165 {
166 AVInputFormat *fmt;
167 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
168 if (!strcmp(fmt->name, short_name))
169 return fmt;
170 }
171 return NULL;
172 }
173
174 /* memory handling */
175
176 void av_destruct_packet(AVPacket *pkt)
177 {
178 av_free(pkt->data);
179 pkt->data = NULL; pkt->size = 0;
180 }
181
182 void av_init_packet(AVPacket *pkt)
183 {
184 pkt->pts = AV_NOPTS_VALUE;
185 pkt->dts = AV_NOPTS_VALUE;
186 pkt->pos = -1;
187 pkt->duration = 0;
188 pkt->flags = 0;
189 pkt->stream_index = 0;
190 pkt->destruct= av_destruct_packet_nofree;
191 }
192
193 int av_new_packet(AVPacket *pkt, int size)
194 {
195 uint8_t *data;
196 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
197 return AVERROR(ENOMEM);
198 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
199 if (!data)
200 return AVERROR(ENOMEM);
201 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
202
203 av_init_packet(pkt);
204 pkt->data = data;
205 pkt->size = size;
206 pkt->destruct = av_destruct_packet;
207 return 0;
208 }
209
210 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
211 {
212 int ret= av_new_packet(pkt, size);
213
214 if(ret<0)
215 return ret;
216
217 pkt->pos= url_ftell(s);
218
219 ret= get_buffer(s, pkt->data, size);
220 if(ret<=0)
221 av_free_packet(pkt);
222 else
223 pkt->size= ret;
224
225 return ret;
226 }
227
228 int av_dup_packet(AVPacket *pkt)
229 {
230 if (pkt->destruct != av_destruct_packet) {
231 uint8_t *data;
232 /* we duplicate the packet and don't forget to put the padding
233 again */
234 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
235 return AVERROR(ENOMEM);
236 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
237 if (!data) {
238 return AVERROR(ENOMEM);
239 }
240 memcpy(data, pkt->data, pkt->size);
241 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
242 pkt->data = data;
243 pkt->destruct = av_destruct_packet;
244 }
245 return 0;
246 }
247
248 int av_filename_number_test(const char *filename)
249 {
250 char buf[1024];
251 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
252 }
253
254 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
255 {
256 AVInputFormat *fmt1, *fmt;
257 int score;
258
259 fmt = NULL;
260 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
261 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
262 continue;
263 score = 0;
264 if (fmt1->read_probe) {
265 score = fmt1->read_probe(pd);
266 } else if (fmt1->extensions) {
267 if (match_ext(pd->filename, fmt1->extensions)) {
268 score = 50;
269 }
270 }
271 if (score > *score_max) {
272 *score_max = score;
273 fmt = fmt1;
274 }
275 }
276 return fmt;
277 }
278
279 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
280 int score=0;
281 return av_probe_input_format2(pd, is_opened, &score);
282 }
283
284 /************************************************************/
285 /* input media file */
286
287 /**
288 * Open a media file from an IO stream. 'fmt' must be specified.
289 */
290 static const char* format_to_name(void* ptr)
291 {
292 AVFormatContext* fc = (AVFormatContext*) ptr;
293 if(fc->iformat) return fc->iformat->name;
294 else if(fc->oformat) return fc->oformat->name;
295 else return "NULL";
296 }
297
298 #define OFFSET(x) offsetof(AVFormatContext,x)
299 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
300 //these names are too long to be readable
301 #define E AV_OPT_FLAG_ENCODING_PARAM
302 #define D AV_OPT_FLAG_DECODING_PARAM
303
304 static const AVOption options[]={
305 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
306 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
307 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
308 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
309 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
310 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
311 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
312 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
313 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
314 {NULL},
315 };
316
317 #undef E
318 #undef D
319 #undef DEFAULT
320
321 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
322
323 static void avformat_get_context_defaults(AVFormatContext *s)
324 {
325 memset(s, 0, sizeof(AVFormatContext));
326
327 s->av_class = &av_format_context_class;
328
329 av_opt_set_defaults(s);
330 }
331
332 AVFormatContext *av_alloc_format_context(void)
333 {
334 AVFormatContext *ic;
335 ic = av_malloc(sizeof(AVFormatContext));
336 if (!ic) return ic;
337 avformat_get_context_defaults(ic);
338 ic->av_class = &av_format_context_class;
339 return ic;
340 }
341
342 int av_open_input_stream(AVFormatContext **ic_ptr,
343 ByteIOContext *pb, const char *filename,
344 AVInputFormat *fmt, AVFormatParameters *ap)
345 {
346 int err;
347 AVFormatContext *ic;
348 AVFormatParameters default_ap;
349
350 if(!ap){
351 ap=&default_ap;
352 memset(ap, 0, sizeof(default_ap));
353 }
354
355 if(!ap->prealloced_context)
356 ic = av_alloc_format_context();
357 else
358 ic = *ic_ptr;
359 if (!ic) {
360 err = AVERROR(ENOMEM);
361 goto fail;
362 }
363 ic->iformat = fmt;
364 if (pb)
365 ic->pb = *pb;
366 ic->duration = AV_NOPTS_VALUE;
367 ic->start_time = AV_NOPTS_VALUE;
368 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
369
370 /* allocate private data */
371 if (fmt->priv_data_size > 0) {
372 ic->priv_data = av_mallocz(fmt->priv_data_size);
373 if (!ic->priv_data) {
374 err = AVERROR(ENOMEM);
375 goto fail;
376 }
377 } else {
378 ic->priv_data = NULL;
379 }
380
381 err = ic->iformat->read_header(ic, ap);
382 if (err < 0)
383 goto fail;
384
385 if (pb && !ic->data_offset)
386 ic->data_offset = url_ftell(&ic->pb);
387
388 *ic_ptr = ic;
389 return 0;
390 fail:
391 if (ic) {
392 av_freep(&ic->priv_data);
393 }
394 av_free(ic);
395 *ic_ptr = NULL;
396 return err;
397 }
398
399 /** Size of probe buffer, for guessing file type from file contents. */
400 #define PROBE_BUF_MIN 2048
401 #define PROBE_BUF_MAX (1<<20)
402
403 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
404 AVInputFormat *fmt,
405 int buf_size,
406 AVFormatParameters *ap)
407 {
408 int err, must_open_file, file_opened, probe_size;
409 AVProbeData probe_data, *pd = &probe_data;
410 ByteIOContext pb1, *pb = &pb1;
411
412 file_opened = 0;
413 pd->filename = "";
414 if (filename)
415 pd->filename = filename;
416 pd->buf = NULL;
417 pd->buf_size = 0;
418
419 if (!fmt) {
420 /* guess format if no file can be opened */
421 fmt = av_probe_input_format(pd, 0);
422 }
423
424 /* do not open file if the format does not need it. XXX: specific
425 hack needed to handle RTSP/TCP */
426 must_open_file = 1;
427 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
428 must_open_file = 0;
429 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise it is uninitialized
430 }
431
432 if (!fmt || must_open_file) {
433 /* if no file needed do not try to open one */
434 if ((err=url_fopen(pb, filename, URL_RDONLY)) < 0) {
435 goto fail;
436 }
437 file_opened = 1;
438 if (buf_size > 0) {
439 url_setbufsize(pb, buf_size);
440 }
441
442 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
443 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
444 /* read probe data */
445 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
446 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
447 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
448 if (url_fseek(pb, 0, SEEK_SET) < 0) {
449 url_fclose(pb);
450 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
451 file_opened = 0;
452 err = AVERROR(EIO);
453 goto fail;
454 }
455 }
456 /* guess file format */
457 fmt = av_probe_input_format2(pd, 1, &score);
458 }
459 av_freep(&pd->buf);
460 }
461
462 /* if still no format found, error */
463 if (!fmt) {
464 err = AVERROR_NOFMT;
465 goto fail;
466 }
467
468 /* XXX: suppress this hack for redirectors */
469 #ifdef CONFIG_REDIR_DEMUXER
470 if (!strcmp(fmt->name, "redir")) {
471 int redir_open(AVFormatContext **ic_ptr, ByteIOContext *f);
472 err = redir_open(ic_ptr, pb);
473 url_fclose(pb);
474 return err;
475 }
476 #endif
477
478 /* check filename in case of an image number is expected */
479 if (fmt->flags & AVFMT_NEEDNUMBER) {
480 if (!av_filename_number_test(filename)) {
481 err = AVERROR_NUMEXPECTED;
482 goto fail;
483 }
484 }
485 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
486 if (err)
487 goto fail;
488 return 0;
489 fail:
490 av_freep(&pd->buf);
491 if (file_opened)
492 url_fclose(pb);
493 *ic_ptr = NULL;
494 return err;
495
496 }
497
498 /*******************************************************/
499
500 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
501 {
502 av_init_packet(pkt);
503 return s->iformat->read_packet(s, pkt);
504 }
505
506 /**********************************************************/
507
508 /**
509 * Get the number of samples of an audio frame. Return (-1) if error.
510 */
511 static int get_audio_frame_size(AVCodecContext *enc, int size)
512 {
513 int frame_size;
514
515 if (enc->frame_size <= 1) {
516 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
517
518 if (bits_per_sample) {
519 if (enc->channels == 0)
520 return -1;
521 frame_size = (size << 3) / (bits_per_sample * enc->channels);
522 } else {
523 /* used for example by ADPCM codecs */
524 if (enc->bit_rate == 0)
525 return -1;
526 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
527 }
528 } else {
529 frame_size = enc->frame_size;
530 }
531 return frame_size;
532 }
533
534
535 /**
536 * Return the frame duration in seconds, return 0 if not available.
537 */
538 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
539 AVCodecParserContext *pc, AVPacket *pkt)
540 {
541 int frame_size;
542
543 *pnum = 0;
544 *pden = 0;
545 switch(st->codec->codec_type) {
546 case CODEC_TYPE_VIDEO:
547 if(st->time_base.num*1000LL > st->time_base.den){
548 *pnum = st->time_base.num;
549 *pden = st->time_base.den;
550 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
551 *pnum = st->codec->time_base.num;
552 *pden = st->codec->time_base.den;
553 if (pc && pc->repeat_pict) {
554 *pden *= 2;
555 *pnum = (*pnum) * (2 + pc->repeat_pict);
556 }
557 }
558 break;
559 case CODEC_TYPE_AUDIO:
560 frame_size = get_audio_frame_size(st->codec, pkt->size);
561 if (frame_size < 0)
562 break;
563 *pnum = frame_size;
564 *pden = st->codec->sample_rate;
565 break;
566 default:
567 break;
568 }
569 }
570
571 static int is_intra_only(AVCodecContext *enc){
572 if(enc->codec_type == CODEC_TYPE_AUDIO){
573 return 1;
574 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
575 switch(enc->codec_id){
576 case CODEC_ID_MJPEG:
577 case CODEC_ID_MJPEGB:
578 case CODEC_ID_LJPEG:
579 case CODEC_ID_RAWVIDEO:
580 case CODEC_ID_DVVIDEO:
581 case CODEC_ID_HUFFYUV:
582 case CODEC_ID_FFVHUFF:
583 case CODEC_ID_ASV1:
584 case CODEC_ID_ASV2:
585 case CODEC_ID_VCR1:
586 return 1;
587 default: break;
588 }
589 }
590 return 0;
591 }
592
593 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
594 int64_t dts, int64_t pts)
595 {
596 AVStream *st= s->streams[stream_index];
597 AVPacketList *pktl= s->packet_buffer;
598
599 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)
600 return;
601
602 st->first_dts= dts - st->cur_dts;
603 st->cur_dts= dts;
604
605 for(; pktl; pktl= pktl->next){
606 if(pktl->pkt.stream_index != stream_index)
607 continue;
608 //FIXME think more about this check
609 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
610 pktl->pkt.pts += st->first_dts;
611
612 if(pktl->pkt.dts != AV_NOPTS_VALUE)
613 pktl->pkt.dts += st->first_dts;
614
615 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
616 st->start_time= pktl->pkt.pts;
617 }
618 if (st->start_time == AV_NOPTS_VALUE)
619 st->start_time = pts;
620 }
621
622 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
623 AVCodecParserContext *pc, AVPacket *pkt)
624 {
625 int num, den, presentation_delayed, delay, i;
626 int64_t offset;
627
628 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
629 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
630 pkt->dts -= 1LL<<st->pts_wrap_bits;
631 }
632
633 if (pkt->duration == 0) {
634 compute_frame_duration(&num, &den, st, pc, pkt);
635 if (den && num) {
636 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
637 }
638 }
639
640 /* correct timestamps with byte offset if demuxers only have timestamps on packet boundaries */
641 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
642 /* this will estimate bitrate based on this frame's duration and size */
643 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
644 if(pkt->pts != AV_NOPTS_VALUE)
645 pkt->pts += offset;
646 if(pkt->dts != AV_NOPTS_VALUE)
647 pkt->dts += offset;
648 }
649
650 if(is_intra_only(st->codec))
651 pkt->flags |= PKT_FLAG_KEY;
652
653 /* do we have a video B frame ? */
654 delay= st->codec->has_b_frames;
655 presentation_delayed = 0;
656 /* XXX: need has_b_frame, but cannot get it if the codec is
657 not initialized */
658 if (delay &&
659 pc && pc->pict_type != FF_B_TYPE)
660 presentation_delayed = 1;
661 /* This may be redundant, but it should not hurt. */
662 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
663 presentation_delayed = 1;
664
665 if(st->cur_dts == AV_NOPTS_VALUE){
666 st->cur_dts = 0; //FIXME maybe set it to 0 during init
667 }
668
669 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
670 /* interpolate PTS and DTS if they are not present */
671 if(delay <=1){
672 if (presentation_delayed) {
673 /* DTS = decompression time stamp */
674 /* PTS = presentation time stamp */
675 if (pkt->dts == AV_NOPTS_VALUE)
676 pkt->dts = st->last_IP_pts;
677 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
678 if (pkt->dts == AV_NOPTS_VALUE)
679 pkt->dts = st->cur_dts;
680
681 /* this is tricky: the dts must be incremented by the duration
682 of the frame we are displaying, i.e. the last I or P frame */
683 if (st->last_IP_duration == 0)
684 st->last_IP_duration = pkt->duration;
685 st->cur_dts = pkt->dts + st->last_IP_duration;
686 st->last_IP_duration = pkt->duration;
687 st->last_IP_pts= pkt->pts;
688 /* cannot compute PTS if not present (we can compute it only
689 by knowing the futur */
690 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
691 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
692 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
693 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
694 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
695 pkt->pts += pkt->duration;
696 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
697 }
698 }
699
700 /* presentation is not delayed : PTS and DTS are the same */
701 if(pkt->pts == AV_NOPTS_VALUE)
702 pkt->pts = pkt->dts;
703 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
704 if(pkt->pts == AV_NOPTS_VALUE)
705 pkt->pts = st->cur_dts;
706 pkt->dts = pkt->pts;
707 st->cur_dts = pkt->pts + pkt->duration;
708 }
709 }
710
711 if(pkt->pts != AV_NOPTS_VALUE){
712 st->pts_buffer[0]= pkt->pts;
713 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
714 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
715 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
716 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
717 if(pkt->dts == AV_NOPTS_VALUE)
718 pkt->dts= st->pts_buffer[0];
719 if(delay>1){
720 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
721 }
722 if(pkt->dts > st->cur_dts)
723 st->cur_dts = pkt->dts;
724 }
725
726 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
727
728 /* update flags */
729 if (pc) {
730 pkt->flags = 0;
731 /* key frame computation */
732 if (pc->pict_type == FF_I_TYPE)
733 pkt->flags |= PKT_FLAG_KEY;
734 }
735 }
736
737 void av_destruct_packet_nofree(AVPacket *pkt)
738 {
739 pkt->data = NULL; pkt->size = 0;
740 }
741
742 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
743 {
744 AVStream *st;
745 int len, ret, i;
746
747 av_init_packet(pkt);
748
749 for(;;) {
750 /* select current input stream component */
751 st = s->cur_st;
752 if (st) {
753 if (!st->need_parsing || !st->parser) {
754 /* no parsing needed: we just output the packet as is */
755 /* raw data support */
756 *pkt = s->cur_pkt;
757 compute_pkt_fields(s, st, NULL, pkt);
758 s->cur_st = NULL;
759 break;
760 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
761 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
762 s->cur_ptr, s->cur_len,
763 s->cur_pkt.pts, s->cur_pkt.dts);
764 s->cur_pkt.pts = AV_NOPTS_VALUE;
765 s->cur_pkt.dts = AV_NOPTS_VALUE;
766 /* increment read pointer */
767 s->cur_ptr += len;
768 s->cur_len -= len;
769
770 /* return packet if any */
771 if (pkt->size) {
772 got_packet:
773 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
774 pkt->duration = 0;
775 pkt->stream_index = st->index;
776 pkt->pts = st->parser->pts;
777 pkt->dts = st->parser->dts;
778 pkt->destruct = av_destruct_packet_nofree;
779 compute_pkt_fields(s, st, st->parser, pkt);
780
781 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
782 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
783 0, 0, AVINDEX_KEYFRAME);
784 }
785
786 break;
787 }
788 } else {
789 /* free packet */
790 av_free_packet(&s->cur_pkt);
791 s->cur_st = NULL;
792 }
793 } else {
794 /* read next packet */
795 ret = av_read_packet(s, &s->cur_pkt);
796 if (ret < 0) {
797 if (ret == AVERROR(EAGAIN))
798 return ret;
799 /* return the last frames, if any */
800 for(i = 0; i < s->nb_streams; i++) {
801 st = s->streams[i];
802 if (st->parser && st->need_parsing) {
803 av_parser_parse(st->parser, st->codec,
804 &pkt->data, &pkt->size,
805 NULL, 0,
806 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
807 if (pkt->size)
808 goto got_packet;
809 }
810 }
811 /* no more packets: really terminates parsing */
812 return ret;
813 }
814
815 st = s->streams[s->cur_pkt.stream_index];
816 if(st->codec->debug & FF_DEBUG_PTS)
817 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
818 s->cur_pkt.stream_index,
819 s->cur_pkt.pts,
820 s->cur_pkt.dts,
821 s->cur_pkt.size);
822
823 s->cur_st = st;
824 s->cur_ptr = s->cur_pkt.data;
825 s->cur_len = s->cur_pkt.size;
826 if (st->need_parsing && !st->parser) {
827 st->parser = av_parser_init(st->codec->codec_id);
828 if (!st->parser) {
829 /* no parser available : just output the raw packets */
830 st->need_parsing = AVSTREAM_PARSE_NONE;
831 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
832 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
833 }
834 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
835 st->parser->last_frame_offset=
836 st->parser->cur_offset= s->cur_pkt.pos;
837 }
838 }
839 }
840 }
841 if(st->codec->debug & FF_DEBUG_PTS)
842 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
843 pkt->stream_index,
844 pkt->pts,
845 pkt->dts,
846 pkt->size);
847
848 return 0;
849 }
850
851 static AVPacket *add_to_pktbuf(AVFormatContext *s, AVPacket *pkt){
852 AVPacketList *pktl= s->packet_buffer;
853 AVPacketList **plast_pktl= &s->packet_buffer;
854
855 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
856
857 pktl = av_mallocz(sizeof(AVPacketList));
858 if (!pktl)
859 return NULL;
860
861 /* add the packet in the buffered packet list */
862 *plast_pktl = pktl;
863 pktl->pkt= *pkt;
864 return &pktl->pkt;
865 }
866
867 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
868 {
869 AVPacketList *pktl;
870 int eof=0;
871 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
872
873 for(;;){
874 pktl = s->packet_buffer;
875 if (pktl) {
876 AVPacket *next_pkt= &pktl->pkt;
877
878 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
879 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
880 if( pktl->pkt.stream_index == next_pkt->stream_index
881 && next_pkt->dts < pktl->pkt.dts
882 && pktl->pkt.pts != pktl->pkt.dts //not b frame
883 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
884 next_pkt->pts= pktl->pkt.dts;
885 }
886 pktl= pktl->next;
887 }
888 pktl = s->packet_buffer;
889 }
890
891 if( next_pkt->pts != AV_NOPTS_VALUE
892 || next_pkt->dts == AV_NOPTS_VALUE
893 || !genpts || eof){
894 /* read packet from packet buffer, if there is data */
895 *pkt = *next_pkt;
896 s->packet_buffer = pktl->next;
897 av_free(pktl);
898 return 0;
899 }
900 }
901 if(genpts){
902 int ret= av_read_frame_internal(s, pkt);
903 if(ret<0){
904 if(pktl && ret != AVERROR(EAGAIN)){
905 eof=1;
906 continue;
907 }else
908 return ret;
909 }
910
911 if(av_dup_packet(add_to_pktbuf(s, pkt)) < 0)
912 return AVERROR(ENOMEM);
913 }else{
914 assert(!s->packet_buffer);
915 return av_read_frame_internal(s, pkt);
916 }
917 }
918 }
919
920 /* XXX: suppress the packet queue */
921 static void flush_packet_queue(AVFormatContext *s)
922 {
923 AVPacketList *pktl;
924
925 for(;;) {
926 pktl = s->packet_buffer;
927 if (!pktl)
928 break;
929 s->packet_buffer = pktl->next;
930 av_free_packet(&pktl->pkt);
931 av_free(pktl);
932 }
933 }
934
935 /*******************************************************/
936 /* seek support */
937
938 int av_find_default_stream_index(AVFormatContext *s)
939 {
940 int i;
941 AVStream *st;
942
943 if (s->nb_streams <= 0)
944 return -1;
945 for(i = 0; i < s->nb_streams; i++) {
946 st = s->streams[i];
947 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
948 return i;
949 }
950 }
951 return 0;
952 }
953
954 /**
955 * Flush the frame reader.
956 */
957 static void av_read_frame_flush(AVFormatContext *s)
958 {
959 AVStream *st;
960 int i;
961
962 flush_packet_queue(s);
963
964 /* free previous packet */
965 if (s->cur_st) {
966 if (s->cur_st->parser)
967 av_free_packet(&s->cur_pkt);
968 s->cur_st = NULL;
969 }
970 /* fail safe */
971 s->cur_ptr = NULL;
972 s->cur_len = 0;
973
974 /* for each stream, reset read state */
975 for(i = 0; i < s->nb_streams; i++) {
976 st = s->streams[i];
977
978 if (st->parser) {
979 av_parser_close(st->parser);
980 st->parser = NULL;
981 }
982 st->last_IP_pts = AV_NOPTS_VALUE;
983 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
984 }
985 }
986
987 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
988 int i;
989
990 for(i = 0; i < s->nb_streams; i++) {
991 AVStream *st = s->streams[i];
992
993 st->cur_dts = av_rescale(timestamp,
994 st->time_base.den * (int64_t)ref_st->time_base.num,
995 st->time_base.num * (int64_t)ref_st->time_base.den);
996 }
997 }
998
999 int av_add_index_entry(AVStream *st,
1000 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1001 {
1002 AVIndexEntry *entries, *ie;
1003 int index;
1004
1005 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1006 return -1;
1007
1008 entries = av_fast_realloc(st->index_entries,
1009 &st->index_entries_allocated_size,
1010 (st->nb_index_entries + 1) *
1011 sizeof(AVIndexEntry));
1012 if(!entries)
1013 return -1;
1014
1015 st->index_entries= entries;
1016
1017 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1018
1019 if(index<0){
1020 index= st->nb_index_entries++;
1021 ie= &entries[index];
1022 assert(index==0 || ie[-1].timestamp < timestamp);
1023 }else{
1024 ie= &entries[index];
1025 if(ie->timestamp != timestamp){
1026 if(ie->timestamp <= timestamp)
1027 return -1;
1028 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1029 st->nb_index_entries++;
1030 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1031 distance= ie->min_distance;
1032 }
1033
1034 ie->pos = pos;
1035 ie->timestamp = timestamp;
1036 ie->min_distance= distance;
1037 ie->size= size;
1038 ie->flags = flags;
1039
1040 return index;
1041 }
1042
1043 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1044 int flags)
1045 {
1046 AVIndexEntry *entries= st->index_entries;
1047 int nb_entries= st->nb_index_entries;
1048 int a, b, m;
1049 int64_t timestamp;
1050
1051 a = - 1;
1052 b = nb_entries;
1053
1054 while (b - a > 1) {
1055 m = (a + b) >> 1;
1056 timestamp = entries[m].timestamp;
1057 if(timestamp >= wanted_timestamp)
1058 b = m;
1059 if(timestamp <= wanted_timestamp)
1060 a = m;
1061 }
1062 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1063
1064 if(!(flags & AVSEEK_FLAG_ANY)){
1065 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1066 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1067 }
1068 }
1069
1070 if(m == nb_entries)
1071 return -1;
1072 return m;
1073 }
1074
1075 #define DEBUG_SEEK
1076
1077 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1078 AVInputFormat *avif= s->iformat;
1079 int64_t pos_min, pos_max, pos, pos_limit;
1080 int64_t ts_min, ts_max, ts;
1081 int index;
1082 AVStream *st;
1083
1084 if (stream_index < 0)
1085 return -1;
1086
1087 #ifdef DEBUG_SEEK
1088 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1089 #endif
1090
1091 ts_max=
1092 ts_min= AV_NOPTS_VALUE;
1093 pos_limit= -1; //gcc falsely says it may be uninitialized
1094
1095 st= s->streams[stream_index];
1096 if(st->index_entries){
1097 AVIndexEntry *e;
1098
1099 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1100 index= FFMAX(index, 0);
1101 e= &st->index_entries[index];
1102
1103 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1104 pos_min= e->pos;
1105 ts_min= e->timestamp;
1106 #ifdef DEBUG_SEEK
1107 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1108 pos_min,ts_min);
1109 #endif
1110 }else{
1111 assert(index==0);
1112 }
1113
1114 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1115 assert(index < st->nb_index_entries);
1116 if(index >= 0){
1117 e= &st->index_entries[index];
1118 assert(e->timestamp >= target_ts);
1119 pos_max= e->pos;
1120 ts_max= e->timestamp;
1121 pos_limit= pos_max - e->min_distance;
1122 #ifdef DEBUG_SEEK
1123 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1124 pos_max,pos_limit, ts_max);
1125 #endif
1126 }
1127 }
1128
1129 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1130 if(pos<0)
1131 return -1;
1132
1133 /* do the seek */
1134 url_fseek(&s->pb, pos, SEEK_SET);
1135
1136 av_update_cur_dts(s, st, ts);
1137
1138 return 0;
1139 }
1140
1141 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1142 int64_t pos, ts;
1143 int64_t start_pos, filesize;
1144 int no_change;
1145
1146 #ifdef DEBUG_SEEK
1147 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1148 #endif
1149
1150 if(ts_min == AV_NOPTS_VALUE){
1151 pos_min = s->data_offset;
1152 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1153 if (ts_min == AV_NOPTS_VALUE)
1154 return -1;
1155 }
1156
1157 if(ts_max == AV_NOPTS_VALUE){
1158 int step= 1024;
1159 filesize = url_fsize(&s->pb);
1160 pos_max = filesize - 1;
1161 do{
1162 pos_max -= step;
1163 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1164 step += step;
1165 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1166 if (ts_max == AV_NOPTS_VALUE)
1167 return -1;
1168
1169 for(;;){
1170 int64_t tmp_pos= pos_max + 1;
1171 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1172 if(tmp_ts == AV_NOPTS_VALUE)
1173 break;
1174 ts_max= tmp_ts;
1175 pos_max= tmp_pos;
1176 if(tmp_pos >= filesize)
1177 break;
1178 }
1179 pos_limit= pos_max;
1180 }
1181
1182 if(ts_min > ts_max){
1183 return -1;
1184 }else if(ts_min == ts_max){
1185 pos_limit= pos_min;
1186 }
1187
1188 no_change=0;
1189 while (pos_min < pos_limit) {
1190 #ifdef DEBUG_SEEK
1191 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1192 pos_min, pos_max,
1193 ts_min, ts_max);
1194 #endif
1195 assert(pos_limit <= pos_max);
1196
1197 if(no_change==0){
1198 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1199 // interpolate position (better than dichotomy)
1200 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1201 + pos_min - approximate_keyframe_distance;
1202 }else if(no_change==1){
1203 // bisection, if interpolation failed to change min or max pos last time
1204 pos = (pos_min + pos_limit)>>1;
1205 }else{
1206 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1207 pos=pos_min;
1208 }
1209 if(pos <= pos_min)
1210 pos= pos_min + 1;
1211 else if(pos > pos_limit)
1212 pos= pos_limit;
1213 start_pos= pos;
1214
1215 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1216 if(pos == pos_max)
1217 no_change++;
1218 else
1219 no_change=0;
1220 #ifdef DEBUG_SEEK
1221 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1222 #endif
1223 if(ts == AV_NOPTS_VALUE){
1224 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1225 return -1;
1226 }
1227 assert(ts != AV_NOPTS_VALUE);
1228 if (target_ts <= ts) {
1229 pos_limit = start_pos - 1;
1230 pos_max = pos;
1231 ts_max = ts;
1232 }
1233 if (target_ts >= ts) {
1234 pos_min = pos;
1235 ts_min = ts;
1236 }
1237 }
1238
1239 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1240 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1241 #ifdef DEBUG_SEEK
1242 pos_min = pos;
1243 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1244 pos_min++;
1245 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1246 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1247 pos, ts_min, target_ts, ts_max);
1248 #endif
1249 *ts_ret= ts;
1250 return pos;
1251 }
1252
1253 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1254 int64_t pos_min, pos_max;
1255 #if 0
1256 AVStream *st;
1257
1258 if (stream_index < 0)
1259 return -1;
1260
1261 st= s->streams[stream_index];
1262 #endif
1263
1264 pos_min = s->data_offset;
1265 pos_max = url_fsize(&s->pb) - 1;
1266
1267 if (pos < pos_min) pos= pos_min;
1268 else if(pos > pos_max) pos= pos_max;
1269
1270 url_fseek(&s->pb, pos, SEEK_SET);
1271
1272 #if 0
1273 av_update_cur_dts(s, st, ts);
1274 #endif
1275 return 0;
1276 }
1277
1278 static int av_seek_frame_generic(AVFormatContext *s,
1279 int stream_index, int64_t timestamp, int flags)
1280 {
1281 int index;
1282 AVStream *st;
1283 AVIndexEntry *ie;
1284
1285 st = s->streams[stream_index];
1286
1287 index = av_index_search_timestamp(st, timestamp, flags);
1288
1289 if(index < 0 || index==st->nb_index_entries-1){
1290 int i;
1291 AVPacket pkt;
1292
1293 if(st->index_entries && st->nb_index_entries){
1294 ie= &st->index_entries[st->nb_index_entries-1];
1295 url_fseek(&s->pb, ie->pos, SEEK_SET);
1296 av_update_cur_dts(s, st, ie->timestamp);
1297 }else
1298 url_fseek(&s->pb, 0, SEEK_SET);
1299
1300 for(i=0;; i++) {
1301 int ret = av_read_frame(s, &pkt);
1302 if(ret<0)
1303 break;
1304 av_free_packet(&pkt);
1305 if(stream_index == pkt.stream_index){
1306 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1307 break;
1308 }
1309 }
1310 index = av_index_search_timestamp(st, timestamp, flags);
1311 }
1312 if (index < 0)
1313 return -1;
1314
1315 av_read_frame_flush(s);
1316 if (s->iformat->read_seek){
1317 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1318 return 0;
1319 }
1320 ie = &st->index_entries[index];
1321 url_fseek(&s->pb, ie->pos, SEEK_SET);
1322
1323 av_update_cur_dts(s, st, ie->timestamp);
1324
1325 return 0;
1326 }
1327
1328 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1329 {
1330 int ret;
1331 AVStream *st;
1332
1333 av_read_frame_flush(s);
1334
1335 if(flags & AVSEEK_FLAG_BYTE)
1336 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1337
1338 if(stream_index < 0){
1339 stream_index= av_find_default_stream_index(s);
1340 if(stream_index < 0)
1341 return -1;
1342
1343 st= s->streams[stream_index];
1344 /* timestamp for default must be expressed in AV_TIME_BASE units */
1345 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1346 }
1347 st= s->streams[stream_index];
1348
1349 /* first, we try the format specific seek */
1350 if (s->iformat->read_seek)
1351 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1352 else
1353 ret = -1;
1354 if (ret >= 0) {
1355 return 0;
1356 }
1357
1358 if(s->iformat->read_timestamp)
1359 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1360 else
1361 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1362 }
1363
1364 /*******************************************************/
1365
1366 /**
1367 * Returns TRUE if the stream has accurate duration in any stream.
1368 *
1369 * @return TRUE if the stream has accurate duration for at least one component.
1370 */
1371 static int av_has_duration(AVFormatContext *ic)
1372 {
1373 int i;
1374 AVStream *st;
1375
1376 for(i = 0;i < ic->nb_streams; i++) {
1377 st = ic->streams[i];
1378 if (st->duration != AV_NOPTS_VALUE)
1379 return 1;
1380 }
1381 return 0;
1382 }
1383
1384 /**
1385 * Estimate the stream timings from the one of each components.
1386 *
1387 * Also computes the global bitrate if possible.
1388 */
1389 static void av_update_stream_timings(AVFormatContext *ic)
1390 {
1391 int64_t start_time, start_time1, end_time, end_time1;
1392 int64_t duration, duration1;
1393 int i;
1394 AVStream *st;
1395
1396 start_time = INT64_MAX;
1397 end_time = INT64_MIN;
1398 duration = INT64_MIN;
1399 for(i = 0;i < ic->nb_streams; i++) {
1400 st = ic->streams[i];
1401 if (st->start_time != AV_NOPTS_VALUE) {
1402 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1403 if (start_time1 < start_time)
1404 start_time = start_time1;
1405 if (st->duration != AV_NOPTS_VALUE) {
1406 end_time1 = start_time1
1407 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1408 if (end_time1 > end_time)
1409 end_time = end_time1;
1410 }
1411 }
1412 if (st->duration != AV_NOPTS_VALUE) {
1413 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1414 if (duration1 > duration)
1415 duration = duration1;
1416 }
1417 }
1418 if (start_time != INT64_MAX) {
1419 ic->start_time = start_time;
1420 if (end_time != INT64_MIN) {
1421 if (end_time - start_time > duration)
1422 duration = end_time - start_time;
1423 }
1424 }
1425 if (duration != INT64_MIN) {
1426 ic->duration = duration;
1427 if (ic->file_size > 0) {
1428 /* compute the bit rate */
1429 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1430 (double)ic->duration;
1431 }
1432 }
1433 }
1434
1435 static void fill_all_stream_timings(AVFormatContext *ic)
1436 {
1437 int i;
1438 AVStream *st;
1439
1440 av_update_stream_timings(ic);
1441 for(i = 0;i < ic->nb_streams; i++) {
1442 st = ic->streams[i];
1443 if (st->start_time == AV_NOPTS_VALUE) {
1444 if(ic->start_time != AV_NOPTS_VALUE)
1445 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1446 if(ic->duration != AV_NOPTS_VALUE)
1447 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1448 }
1449 }
1450 }
1451
1452 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1453 {
1454 int64_t filesize, duration;
1455 int bit_rate, i;
1456 AVStream *st;
1457
1458 /* if bit_rate is already set, we believe it */
1459 if (ic->bit_rate == 0) {
1460 bit_rate = 0;
1461 for(i=0;i<ic->nb_streams;i++) {
1462 st = ic->streams[i];
1463 bit_rate += st->codec->bit_rate;
1464 }
1465 ic->bit_rate = bit_rate;
1466 }
1467
1468 /* if duration is already set, we believe it */
1469 if (ic->duration == AV_NOPTS_VALUE &&
1470 ic->bit_rate != 0 &&
1471 ic->file_size != 0) {
1472 filesize = ic->file_size;
1473 if (filesize > 0) {
1474 for(i = 0; i < ic->nb_streams; i++) {
1475 st = ic->streams[i];
1476 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1477 if (st->duration == AV_NOPTS_VALUE)
1478 st->duration = duration;
1479 }
1480 }
1481 }
1482 }
1483
1484 #define DURATION_MAX_READ_SIZE 250000
1485
1486 /* only usable for MPEG-PS streams */
1487 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1488 {
1489 AVPacket pkt1, *pkt = &pkt1;
1490 AVStream *st;
1491 int read_size, i, ret;
1492 int64_t end_time;
1493 int64_t filesize, offset, duration;
1494
1495 /* free previous packet */
1496 if (ic->cur_st && ic->cur_st->parser)
1497 av_free_packet(&ic->cur_pkt);
1498 ic->cur_st = NULL;
1499
1500 /* flush packet queue */
1501 flush_packet_queue(ic);
1502
1503 for(i=0;i<ic->nb_streams;i++) {
1504 st = ic->streams[i];
1505 if (st->parser) {
1506 av_parser_close(st->parser);
1507 st->parser= NULL;
1508 }
1509 }
1510
1511 /* we read the first packets to get the first PTS (not fully
1512 accurate, but it is enough now) */
1513 url_fseek(&ic->pb, 0, SEEK_SET);
1514 read_size = 0;
1515 for(;;) {
1516 if (read_size >= DURATION_MAX_READ_SIZE)
1517 break;
1518 /* if all info is available, we can stop */
1519 for(i = 0;i < ic->nb_streams; i++) {
1520 st = ic->streams[i];
1521 if (st->start_time == AV_NOPTS_VALUE)
1522 break;
1523 }
1524 if (i == ic->nb_streams)
1525 break;
1526
1527 ret = av_read_packet(ic, pkt);
1528 if (ret != 0)
1529 break;
1530 read_size += pkt->size;
1531 st = ic->streams[pkt->stream_index];
1532 if (pkt->pts != AV_NOPTS_VALUE) {
1533 if (st->start_time == AV_NOPTS_VALUE)
1534 st->start_time = pkt->pts;
1535 }
1536 av_free_packet(pkt);
1537 }
1538
1539 /* estimate the end time (duration) */
1540 /* XXX: may need to support wrapping */
1541 filesize = ic->file_size;
1542 offset = filesize - DURATION_MAX_READ_SIZE;
1543 if (offset < 0)
1544 offset = 0;
1545
1546 url_fseek(&ic->pb, offset, SEEK_SET);
1547 read_size = 0;
1548 for(;;) {
1549 if (read_size >= DURATION_MAX_READ_SIZE)
1550 break;
1551
1552 ret = av_read_packet(ic, pkt);
1553 if (ret != 0)
1554 break;
1555 read_size += pkt->size;
1556 st = ic->streams[pkt->stream_index];
1557 if (pkt->pts != AV_NOPTS_VALUE &&
1558 st->start_time != AV_NOPTS_VALUE) {
1559 end_time = pkt->pts;
1560 duration = end_time - st->start_time;
1561 if (duration > 0) {
1562 if (st->duration == AV_NOPTS_VALUE ||
1563 st->duration < duration)
1564 st->duration = duration;
1565 }
1566 }
1567 av_free_packet(pkt);
1568 }
1569
1570 fill_all_stream_timings(ic);
1571
1572 url_fseek(&ic->pb, old_offset, SEEK_SET);
1573 for(i=0; i<ic->nb_streams; i++){
1574 st= ic->streams[i];
1575 st->cur_dts= st->first_dts;
1576 st->last_IP_pts = AV_NOPTS_VALUE;
1577 }
1578 }
1579
1580 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1581 {
1582 int64_t file_size;
1583
1584 /* get the file size, if possible */
1585 if (ic->iformat->flags & AVFMT_NOFILE) {
1586 file_size = 0;
1587 } else {
1588 file_size = url_fsize(&ic->pb);
1589 if (file_size < 0)
1590 file_size = 0;
1591 }
1592 ic->file_size = file_size;
1593
1594 if ((!strcmp(ic->iformat->name, "mpeg") ||
1595 !strcmp(ic->iformat->name, "mpegts")) &&
1596 file_size && !ic->pb.is_streamed) {
1597 /* get accurate estimate from the PTSes */
1598 av_estimate_timings_from_pts(ic, old_offset);
1599 } else if (av_has_duration(ic)) {
1600 /* at least one components has timings - we use them for all
1601 the components */
1602 fill_all_stream_timings(ic);
1603 } else {
1604 /* less precise: use bit rate info */
1605 av_estimate_timings_from_bit_rate(ic);
1606 }
1607 av_update_stream_timings(ic);
1608
1609 #if 0
1610 {
1611 int i;
1612 AVStream *st;
1613 for(i = 0;i < ic->nb_streams; i++) {
1614 st = ic->streams[i];
1615 printf("%d: start_time: %0.3f duration: %0.3f\n",
1616 i, (double)st->start_time / AV_TIME_BASE,
1617 (double)st->duration / AV_TIME_BASE);
1618 }
1619 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1620 (double)ic->start_time / AV_TIME_BASE,
1621 (double)ic->duration / AV_TIME_BASE,
1622 ic->bit_rate / 1000);
1623 }
1624 #endif
1625 }
1626
1627 static int has_codec_parameters(AVCodecContext *enc)
1628 {
1629 int val;
1630 switch(enc->codec_type) {
1631 case CODEC_TYPE_AUDIO:
1632 val = enc->sample_rate;
1633 break;
1634 case CODEC_TYPE_VIDEO:
1635 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1636 break;
1637 default:
1638 val = 1;
1639 break;
1640 }
1641 return (enc->codec_id != CODEC_ID_NONE && val != 0);
1642 }
1643
1644 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1645 {
1646 int16_t *samples;
1647 AVCodec *codec;
1648 int got_picture, data_size, ret=0;
1649 AVFrame picture;
1650
1651 if(!st->codec->codec){
1652 codec = avcodec_find_decoder(st->codec->codec_id);
1653 if (!codec)
1654 return -1;
1655 ret = avcodec_open(st->codec, codec);
1656 if (ret < 0)
1657 return ret;
1658 }
1659
1660 if(!has_codec_parameters(st->codec)){
1661 switch(st->codec->codec_type) {
1662 case CODEC_TYPE_VIDEO:
1663 ret = avcodec_decode_video(st->codec, &picture,
1664 &got_picture, (uint8_t *)data, size);
1665 break;
1666 case CODEC_TYPE_AUDIO:
1667 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1668 samples = av_malloc(data_size);
1669 if (!samples)
1670 goto fail;
1671 ret = avcodec_decode_audio2(st->codec, samples,
1672 &data_size, (uint8_t *)data, size);
1673 av_free(samples);
1674 break;
1675 default:
1676 break;
1677 }
1678 }
1679 fail:
1680 return ret;
1681 }
1682
1683 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
1684 {
1685 AVInputFormat *fmt;
1686 fmt = av_probe_input_format2(pd, 1, &score);
1687
1688 if (fmt) {
1689 if (strncmp(fmt->name, "mp3", 3) == 0)
1690 st->codec->codec_id = CODEC_ID_MP3;
1691 else if (strncmp(fmt->name, "ac3", 3) == 0)
1692 st->codec->codec_id = CODEC_ID_AC3;
1693 }
1694 return !!fmt;
1695 }
1696
1697 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1698 {
1699 while (tags->id != CODEC_ID_NONE) {
1700 if (tags->id == id)
1701 return tags->tag;
1702 tags++;
1703 }
1704 return 0;
1705 }
1706
1707 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1708 {
1709 int i;
1710 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1711 if(tag == tags[i].tag)
1712 return tags[i].id;
1713 }
1714 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1715 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1716 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1717 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1718 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1719 return tags[i].id;
1720 }
1721 return CODEC_ID_NONE;
1722 }
1723
1724 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1725 {
1726 int i;
1727 for(i=0; tags && tags[i]; i++){
1728 int tag= codec_get_tag(tags[i], id);
1729 if(tag) return tag;
1730 }
1731 return 0;
1732 }
1733
1734 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1735 {
1736 int i;
1737 for(i=0; tags && tags[i]; i++){
1738 enum CodecID id= codec_get_id(tags[i], tag);
1739 if(id!=CODEC_ID_NONE) return id;
1740 }
1741 return CODEC_ID_NONE;
1742 }
1743
1744 /* absolute maximum size we read until we abort */
1745 #define MAX_READ_SIZE 5000000
1746
1747 #define MAX_STD_TIMEBASES (60*12+5)
1748 static int get_std_framerate(int i){
1749 if(i<60*12) return i*1001;
1750 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1751 }
1752
1753 int av_find_stream_info(AVFormatContext *ic)
1754 {
1755 int i, count, ret, read_size, j;
1756 AVStream *st;
1757 AVPacket pkt1, *pkt;
1758 int64_t last_dts[MAX_STREAMS];
1759 int duration_count[MAX_STREAMS]={0};
1760 double (*duration_error)[MAX_STD_TIMEBASES];
1761 offset_t old_offset = url_ftell(&ic->pb);
1762 int64_t codec_info_duration[MAX_STREAMS]={0};
1763 int codec_info_nb_frames[MAX_STREAMS]={0};
1764 AVProbeData probe_data[MAX_STREAMS];
1765 int codec_identified[MAX_STREAMS]={0};
1766
1767 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1768 if (!duration_error) return AVERROR(ENOMEM);
1769
1770 for(i=0;i<ic->nb_streams;i++) {
1771 st = ic->streams[i];
1772 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1773 /* if(!st->time_base.num)
1774 st->time_base= */
1775 if(!st->codec->time_base.num)
1776 st->codec->time_base= st->time_base;
1777 }
1778 //only for the split stuff
1779 if (!st->parser) {
1780 st->parser = av_parser_init(st->codec->codec_id);
1781 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1782 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1783 }
1784 }
1785 }
1786
1787 for(i=0;i<MAX_STREAMS;i++){
1788 last_dts[i]= AV_NOPTS_VALUE;
1789 }
1790
1791 memset(probe_data, 0, sizeof(probe_data));
1792 count = 0;
1793 read_size = 0;
1794 for(;;) {
1795 /* check if one codec still needs to be handled */
1796 for(i=0;i<ic->nb_streams;i++) {
1797 st = ic->streams[i];
1798 if (!has_codec_parameters(st->codec))
1799 break;
1800 /* variable fps and no guess at the real fps */
1801 if( (st->codec->time_base.den >= 101LL*st->codec->time_base.num || st->codec->codec_id == CODEC_ID_MPEG2VIDEO)
1802 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1803 break;
1804 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1805 break;
1806 if(st->first_dts == AV_NOPTS_VALUE)
1807 break;
1808 }
1809 if (i == ic->nb_streams) {
1810 /* NOTE: if the format has no header, then we need to read
1811 some packets to get most of the streams, so we cannot
1812 stop here */
1813 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1814 /* if we found the info for all the codecs, we can stop */
1815 ret = count;
1816 break;
1817 }
1818 }
1819 /* we did not get all the codec info, but we read too much data */
1820 if (read_size >= MAX_READ_SIZE) {
1821 ret = count;
1822 break;
1823 }
1824
1825 /* NOTE: a new stream can be added there if no header in file
1826 (AVFMTCTX_NOHEADER) */
1827 ret = av_read_frame_internal(ic, &pkt1);
1828 if (ret < 0) {
1829 /* EOF or error */
1830 ret = -1; /* we could not have all the codec parameters before EOF */
1831 for(i=0;i<ic->nb_streams;i++) {
1832 st = ic->streams[i];
1833 if (!has_codec_parameters(st->codec)){
1834 char buf[256];
1835 avcodec_string(buf, sizeof(buf), st->codec, 0);
1836 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1837 } else {
1838 ret = 0;
1839 }
1840 }
1841 break;
1842 }
1843
1844 pkt= add_to_pktbuf(ic, &pkt1);
1845 if(av_dup_packet(pkt) < 0)
1846 return AVERROR(ENOMEM);
1847
1848 read_size += pkt->size;
1849
1850 st = ic->streams[pkt->stream_index];
1851 if(codec_info_nb_frames[st->index]>1)
1852 codec_info_duration[st->index] += pkt->duration;
1853 if (pkt->duration != 0)
1854 codec_info_nb_frames[st->index]++;
1855
1856 {
1857 int index= pkt->stream_index;
1858 int64_t last= last_dts[index];
1859 int64_t duration= pkt->dts - last;
1860
1861 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1862 double dur= duration * av_q2d(st->time_base);
1863
1864 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1865 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1866 if(duration_count[index] < 2)
1867 memset(duration_error, 0, MAX_STREAMS * sizeof(*duration_error));
1868 for(i=1; i<MAX_STD_TIMEBASES; i++){
1869 int framerate= get_std_framerate(i);
1870 int ticks= lrintf(dur*framerate/(1001*12));
1871 double error= dur - ticks*1001*12/(double)framerate;
1872 duration_error[index][i] += error*error;
1873 }
1874 duration_count[index]++;
1875 }
1876 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1877 last_dts[pkt->stream_index]= pkt->dts;
1878
1879 if (st->codec->codec_id == CODEC_ID_NONE) {
1880 AVProbeData *pd = &(probe_data[st->index]);
1881 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
1882 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
1883 pd->buf_size += pkt->size;
1884 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
1885 }
1886 }
1887 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1888 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1889 if(i){
1890 st->codec->extradata_size= i;
1891 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1892 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1893 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1894 }
1895 }
1896
1897 /* if still no information, we try to open the codec and to
1898 decompress the frame. We try to avoid that in most cases as
1899 it takes longer and uses more memory. For MPEG4, we need to
1900 decompress for Quicktime. */
1901 if (!has_codec_parameters(st->codec) /*&&
1902 (st->codec->codec_id == CODEC_ID_FLV1 ||
1903 st->codec->codec_id == CODEC_ID_H264 ||
1904 st->codec->codec_id == CODEC_ID_H263 ||
1905 st->codec->codec_id == CODEC_ID_H261 ||
1906 st->codec->codec_id == CODEC_ID_VORBIS ||
1907 st->codec->codec_id == CODEC_ID_MJPEG ||
1908 st->codec->codec_id == CODEC_ID_PNG ||
1909 st->codec->codec_id == CODEC_ID_PAM ||
1910 st->codec->codec_id == CODEC_ID_PGM ||
1911 st->codec->codec_id == CODEC_ID_PGMYUV ||
1912 st->codec->codec_id == CODEC_ID_PBM ||
1913 st->codec->codec_id == CODEC_ID_PPM ||
1914 st->codec->codec_id == CODEC_ID_SHORTEN ||
1915 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1916 try_decode_frame(st, pkt->data, pkt->size);
1917
1918 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1919 break;
1920 }
1921 count++;
1922 }
1923
1924 // close codecs which where opened in try_decode_frame()
1925 for(i=0;i<ic->nb_streams;i++) {
1926 st = ic->streams[i];
1927 if(st->codec->codec)
1928 avcodec_close(st->codec);
1929 }
1930 for(i=0;i<ic->nb_streams;i++) {
1931 st = ic->streams[i];
1932 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1933 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1934 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1935
1936 if(duration_count[i]
1937 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&&
1938 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1939 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
1940 double best_error= 2*av_q2d(st->time_base);
1941 best_error= best_error*best_error*duration_count[i]*1000*12*30;
1942
1943 for(j=1; j<MAX_STD_TIMEBASES; j++){
1944 double error= duration_error[i][j] * get_std_framerate(j);
1945 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1946 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
1947 if(error < best_error){
1948 best_error= error;
1949 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
1950 }
1951 }
1952 }
1953
1954 if (!st->r_frame_rate.num){
1955 if( st->codec->time_base.den * (int64_t)st->time_base.num
1956 <= st->codec->time_base.num * (int64_t)st->time_base.den){
1957 st->r_frame_rate.num = st->codec->time_base.den;
1958 st->r_frame_rate.den = st->codec->time_base.num;
1959 }else{
1960 st->r_frame_rate.num = st->time_base.den;
1961 st->r_frame_rate.den = st->time_base.num;
1962 }
1963 }
1964 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
1965 if (st->codec->codec_id == CODEC_ID_NONE && probe_data[st->index].buf_size > 0) {
1966 codec_identified[st->index] = set_codec_from_probe_data(st, &(probe_data[st->index]), 1);
1967 if (codec_identified[st->index]) {
1968 st->need_parsing = AVSTREAM_PARSE_FULL;
1969 }
1970 }
1971 if(!st->codec->bits_per_sample)
1972 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
1973 }
1974 }
1975
1976 av_estimate_timings(ic, old_offset);
1977
1978 for(i=0;i<ic->nb_streams;i++) {
1979 st = ic->streams[i];
1980 if (codec_identified[st->index])
1981 break;
1982 }
1983 //FIXME this is a mess
1984 if(i!=ic->nb_streams){
1985 av_read_frame_flush(ic);
1986 for(i=0;i<ic->nb_streams;i++) {
1987 st = ic->streams[i];
1988 if (codec_identified[st->index]) {
1989 av_seek_frame(ic, st->index, 0.0, 0);
1990 }
1991 st->cur_dts= st->first_dts;
1992 }
1993 url_fseek(&ic->pb, ic->data_offset, SEEK_SET);
1994 }
1995
1996 #if 0
1997 /* correct DTS for b frame streams with no timestamps */
1998 for(i=0;i<ic->nb_streams;i++) {
1999 st = ic->streams[i];
2000 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2001 if(b-frames){
2002 ppktl = &ic->packet_buffer;
2003 while(ppkt1){
2004 if(ppkt1->stream_index != i)
2005 continue;
2006 if(ppkt1->pkt->dts < 0)
2007 break;
2008 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2009 break;
2010 ppkt1->pkt->dts -= delta;
2011 ppkt1= ppkt1->next;
2012 }
2013 if(ppkt1)
2014 continue;
2015 st->cur_dts -= delta;
2016 }
2017 }
2018 }
2019 #endif
2020
2021 av_free(duration_error);
2022 for(i=0;i<MAX_STREAMS;i++){
2023 av_freep(&(probe_data[i].buf));
2024 }
2025
2026 return ret;
2027 }
2028
2029 /*******************************************************/
2030
2031 int av_read_play(AVFormatContext *s)
2032 {
2033 if (!s->iformat->read_play)
2034 return AVERROR(ENOSYS);
2035 return s->iformat->read_play(s);
2036 }
2037
2038 int av_read_pause(AVFormatContext *s)
2039 {
2040 if (!s->iformat->read_pause)
2041 return AVERROR(ENOSYS);
2042 return s->iformat->read_pause(s);
2043 }
2044
2045 void av_close_input_file(AVFormatContext *s)
2046 {
2047 int i, must_open_file;
2048 AVStream *st;
2049
2050 /* free previous packet */
2051 if (s->cur_st && s->cur_st->parser)
2052 av_free_packet(&s->cur_pkt);
2053
2054 if (s->iformat->read_close)
2055 s->iformat->read_close(s);
2056 for(i=0;i<s->nb_streams;i++) {
2057 /* free all data in a stream component */
2058 st = s->streams[i];
2059 if (st->parser) {
2060 av_parser_close(st->parser);
2061 }
2062 av_free(st->index_entries);
2063 av_free(st->codec->extradata);
2064 av_free(st->codec);
2065 av_free(st);
2066 }
2067 for(i=s->nb_programs-1; i>=0; i--) {
2068 av_freep(&s->programs[i]->provider_name);
2069 av_freep(&s->programs[i]->name);
2070 av_freep(&s->programs[i]);
2071 }
2072 flush_packet_queue(s);
2073 must_open_file = 1;
2074 if (s->iformat->flags & AVFMT_NOFILE) {
2075 must_open_file = 0;
2076 }
2077 if (must_open_file) {
2078 url_fclose(&s->pb);
2079 }
2080 av_freep(&s->priv_data);
2081 av_free(s);
2082 }
2083
2084 AVStream *av_new_stream(AVFormatContext *s, int id)
2085 {
2086 AVStream *st;
2087 int i;
2088
2089 if (s->nb_streams >= MAX_STREAMS)
2090 return NULL;
2091
2092 st = av_mallocz(sizeof(AVStream));
2093 if (!st)
2094 return NULL;
2095
2096 st->codec= avcodec_alloc_context();
2097 if (s->iformat) {
2098 /* no default bitrate if decoding */
2099 st->codec->bit_rate = 0;
2100 }
2101 st->index = s->nb_streams;
2102 st->id = id;
2103 st->start_time = AV_NOPTS_VALUE;
2104 st->duration = AV_NOPTS_VALUE;
2105 st->cur_dts = AV_NOPTS_VALUE;
2106 st->first_dts = AV_NOPTS_VALUE;
2107
2108 /* default pts settings is MPEG like */
2109 av_set_pts_info(st, 33, 1, 90000);
2110 st->last_IP_pts = AV_NOPTS_VALUE;
2111 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2112 st->pts_buffer[i]= AV_NOPTS_VALUE;
2113
2114 s->streams[s->nb_streams++] = st;
2115 return st;
2116 }
2117
2118 AVProgram *av_new_program(AVFormatContext *ac, int id)
2119 {
2120 AVProgram *program=NULL;
2121 int i;
2122
2123 #ifdef DEBUG_SI
2124 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2125 #endif
2126
2127 for(i=0; i<ac->nb_programs; i++)
2128 if(ac->programs[i]->id == id)
2129 program = ac->programs[i];
2130
2131 if(!program){
2132 program = av_mallocz(sizeof(AVProgram));
2133 if (!program)
2134 return NULL;
2135 dynarray_add(&ac->programs, &ac->nb_programs, program);
2136 program->discard = AVDISCARD_NONE;
2137 }
2138 program->id = id;
2139
2140 return program;
2141 }
2142
2143 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2144 {
2145 assert(!provider_name == !name);
2146 if(name){
2147 av_free(program->provider_name);
2148 av_free(program-> name);
2149 program->provider_name = av_strdup(provider_name);
2150 program-> name = av_strdup( name);
2151 }
2152 }
2153
2154
2155 /************************************************************/
2156 /* output media file */
2157
2158 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2159 {
2160 int ret;
2161
2162 if (s->oformat->priv_data_size > 0) {
2163 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2164 if (!s->priv_data)
2165 return AVERROR(ENOMEM);
2166 } else
2167 s->priv_data = NULL;
2168
2169 if (s->oformat->set_parameters) {
2170 ret = s->oformat->set_parameters(s, ap);
2171 if (ret < 0)
2172 return ret;
2173 }
2174 return 0;
2175 }
2176
2177 int av_write_header(AVFormatContext *s)
2178 {
2179 int ret, i;
2180 AVStream *st;
2181
2182 // some sanity checks
2183 for(i=0;i<s->nb_streams;i++) {
2184 st = s->streams[i];
2185
2186 switch (st->codec->codec_type) {
2187 case CODEC_TYPE_AUDIO:
2188 if(st->codec->sample_rate<=0){
2189 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2190 return -1;
2191 }
2192 break;
2193 case CODEC_TYPE_VIDEO:
2194 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2195 av_log(s, AV_LOG_ERROR, "time base not set\n");
2196 return -1;
2197 }
2198 if(st->codec->width<=0 || st->codec->height<=0){
2199 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2200 return -1;
2201 }
2202 break;
2203 }
2204
2205 if(s->oformat->codec_tag){
2206 if(st->codec->codec_tag){
2207 //FIXME
2208 //check that tag + id is in the table
2209 //if neither is in the table -> ok
2210 //if tag is in the table with another id -> FAIL
2211 //if id is in the table with another tag -> FAIL unless strict < ?
2212 }else
2213 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2214 }
2215 }
2216
2217 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2218 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2219 if (!s->priv_data)
2220 return AVERROR(ENOMEM);
2221 }
2222
2223 if(s->oformat->write_header){
2224 ret = s->oformat->write_header(s);
2225 if (ret < 0)
2226 return ret;
2227 }
2228
2229 /* init PTS generation */
2230 for(i=0;i<s->nb_streams;i++) {
2231 int64_t den = AV_NOPTS_VALUE;
2232 st = s->streams[i];
2233
2234 switch (st->codec->codec_type) {
2235 case CODEC_TYPE_AUDIO:
2236 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2237 break;
2238 case CODEC_TYPE_VIDEO:
2239 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2240 break;
2241 default:
2242 break;
2243 }
2244 if (den != AV_NOPTS_VALUE) {
2245 if (den <= 0)
2246 return AVERROR_INVALIDDATA;
2247 av_frac_init(&st->pts, 0, 0, den);
2248 }
2249 }
2250 return 0;
2251 }
2252
2253 //FIXME merge with compute_pkt_fields
2254 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2255 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2256 int num, den, frame_size, i;
2257
2258 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2259
2260 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2261 return -1;*/
2262
2263 /* duration field */
2264 if (pkt->duration == 0) {
2265 compute_frame_duration(&num, &den, st, NULL, pkt);
2266 if (den && num) {
2267 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2268 }
2269 }
2270
2271 //XXX/FIXME this is a temporary hack until all encoders output pts
2272 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2273 pkt->dts=
2274 // pkt->pts= st->cur_dts;
2275 pkt->pts= st->pts.val;
2276 }
2277
2278 //calculate dts from pts
2279 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2280 st->pts_buffer[0]= pkt->pts;
2281 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2282 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2283 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2284 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2285
2286 pkt->dts= st->pts_buffer[0];
2287 }
2288
2289 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2290 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2291 return -1;
2292 }
2293 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2294 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2295 return -1;
2296 }
2297
2298 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2299 st->cur_dts= pkt->dts;
2300 st->pts.val= pkt->dts;
2301
2302 /* update pts */
2303 switch (st->codec->codec_type) {
2304 case CODEC_TYPE_AUDIO:
2305 frame_size = get_audio_frame_size(st->codec, pkt->size);
2306
2307 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2308 but it would be better if we had the real timestamps from the encoder */
2309 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2310 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2311 }
2312 break;
2313 case CODEC_TYPE_VIDEO:
2314 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2315 break;
2316 default:
2317 break;
2318 }
2319 return 0;
2320 }
2321
2322 static void truncate_ts(AVStream *st, AVPacket *pkt){
2323 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2324
2325 // if(pkt->dts < 0)
2326 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2327
2328 if (pkt->pts != AV_NOPTS_VALUE)
2329 pkt->pts &= pts_mask;
2330 if (pkt->dts != AV_NOPTS_VALUE)
2331 pkt->dts &= pts_mask;
2332 }
2333
2334 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2335 {
2336 int ret;
2337
2338 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2339 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2340 return ret;
2341
2342 truncate_ts(s->streams[pkt->stream_index], pkt);
2343
2344 ret= s->oformat->write_packet(s, pkt);
2345 if(!ret)
2346 ret= url_ferror(&s->pb);
2347 return ret;
2348 }
2349
2350 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2351 AVPacketList *pktl, **next_point, *this_pktl;
2352 int stream_count=0;
2353 int streams[MAX_STREAMS];
2354
2355 if(pkt){
2356 AVStream *st= s->streams[ pkt->stream_index];
2357
2358 // assert(pkt->destruct != av_destruct_packet); //FIXME
2359
2360 this_pktl = av_mallocz(sizeof(AVPacketList));
2361 this_pktl->pkt= *pkt;
2362 if(pkt->destruct == av_destruct_packet)
2363 pkt->destruct= NULL; // non shared -> must keep original from being freed
2364 else
2365 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2366
2367 next_point = &s->packet_buffer;
2368 while(*next_point){
2369 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2370 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2371 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2372 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2373 break;
2374 next_point= &(*next_point)->next;
2375 }
2376 this_pktl->next= *next_point;
2377 *next_point= this_pktl;
2378 }
2379
2380 memset(streams, 0, sizeof(streams));
2381 pktl= s->packet_buffer;
2382 while(pktl){
2383 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2384 if(streams[ pktl->pkt.stream_index ] == 0)
2385 stream_count++;
2386 streams[ pktl->pkt.stream_index ]++;
2387 pktl= pktl->next;
2388 }
2389
2390 if(s->nb_streams == stream_count || (flush && stream_count)){
2391 pktl= s->packet_buffer;
2392 *out= pktl->pkt;
2393
2394 s->packet_buffer= pktl->next;
2395 av_freep(&pktl);
2396 return 1;
2397 }else{
2398 av_init_packet(out);
2399 return 0;
2400 }
2401 }
2402
2403 /**
2404 * Interleaves a AVPacket correctly so it can be muxed.
2405 * @param out the interleaved packet will be output here
2406 * @param in the input packet
2407 * @param flush 1 if no further packets are available as input and all
2408 * remaining packets should be output
2409 * @return 1 if a packet was output, 0 if no packet could be output,
2410 * < 0 if an error occured
2411 */
2412 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2413 if(s->oformat->interleave_packet)
2414 return s->oformat->interleave_packet(s, out, in, flush);
2415 else
2416 return av_interleave_packet_per_dts(s, out, in, flush);
2417 }
2418
2419 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2420 AVStream *st= s->streams[ pkt->stream_index];
2421
2422 //FIXME/XXX/HACK drop zero sized packets
2423 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2424 return 0;
2425
2426 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2427 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2428 return -1;
2429
2430 if(pkt->dts == AV_NOPTS_VALUE)
2431 return -1;
2432
2433 for(;;){
2434 AVPacket opkt;
2435 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2436 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2437 return ret;
2438
2439 truncate_ts(s->streams[opkt.stream_index], &opkt);
2440 ret= s->oformat->write_packet(s, &opkt);
2441
2442 av_free_packet(&opkt);
2443 pkt= NULL;
2444
2445 if(ret<0)
2446 return ret;
2447 if(url_ferror(&s->pb))
2448 return url_ferror(&s->pb);
2449 }
2450 }
2451
2452 int av_write_trailer(AVFormatContext *s)
2453 {
2454 int ret, i;
2455
2456 for(;;){
2457 AVPacket pkt;
2458 ret= av_interleave_packet(s, &pkt, NULL, 1);
2459 if(ret<0) //FIXME cleanup needed for ret<0 ?
2460 goto fail;
2461 if(!ret)
2462 break;
2463
2464 truncate_ts(s->streams[pkt.stream_index], &pkt);
2465 ret= s->oformat->write_packet(s, &pkt);
2466
2467 av_free_packet(&pkt);
2468
2469 if(ret<0)
2470 goto fail;
2471 if(url_ferror(&s->pb))
2472 goto fail;
2473 }
2474
2475 if(s->oformat->write_trailer)
2476 ret = s->oformat->write_trailer(s);
2477 fail:
2478 if(ret == 0)
2479 ret=url_ferror(&s->pb);
2480 for(i=0;i<s->nb_streams;i++)
2481 av_freep(&s->streams[i]->priv_data);
2482 av_freep(&s->priv_data);
2483 return ret;
2484 }
2485
2486 /* "user interface" functions */
2487
2488 void dump_format(AVFormatContext *ic,
2489 int index,
2490 const char *url,
2491 int is_output)
2492 {
2493 int i, flags;
2494 char buf[256];
2495
2496 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2497 is_output ? "Output" : "Input",
2498 index,
2499 is_output ? ic->oformat->name : ic->iformat->name,
2500 is_output ? "to" : "from", url);
2501 if (!is_output) {
2502 av_log(NULL, AV_LOG_INFO, " Duration: ");
2503 if (ic->duration != AV_NOPTS_VALUE) {
2504 int hours, mins, secs, us;
2505 secs = ic->duration / AV_TIME_BASE;
2506 us = ic->duration % AV_TIME_BASE;
2507 mins = secs / 60;
2508 secs %= 60;
2509 hours = mins / 60;
2510 mins %= 60;
2511 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2512 (10 * us) / AV_TIME_BASE);
2513 } else {
2514 av_log(NULL, AV_LOG_INFO, "N/A");
2515 }
2516 if (ic->start_time != AV_NOPTS_VALUE) {
2517 int secs, us;
2518 av_log(NULL, AV_LOG_INFO, ", start: ");
2519 secs = ic->start_time / AV_TIME_BASE;
2520 us = ic->start_time % AV_TIME_BASE;
2521 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2522 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2523 }
2524 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2525 if (ic->bit_rate) {
2526 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2527 } else {
2528 av_log(NULL, AV_LOG_INFO, "N/A");
2529 }
2530 av_log(NULL, AV_LOG_INFO, "\n");
2531 }
2532 for(i=0;i<ic->nb_streams;i++) {
2533 AVStream *st = ic->streams[i];
2534 int g= ff_gcd(st->time_base.num, st->time_base.den);
2535 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2536 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2537 /* the pid is an important information, so we display it */
2538 /* XXX: add a generic system */
2539 if (is_output)
2540 flags = ic->oformat->flags;
2541 else
2542 flags = ic->iformat->flags;
2543 if (flags & AVFMT_SHOW_IDS) {
2544 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2545 }
2546 if (strlen(st->language) > 0) {
2547 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2548 }
2549 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2550 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2551 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2552 if(st->r_frame_rate.den && st->r_frame_rate.num)
2553 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2554 /* else if(st->time_base.den && st->time_base.num)
2555 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2556 else
2557 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2558 }
2559 av_log(NULL, AV_LOG_INFO, "\n");
2560 }
2561 }
2562
2563 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2564 {
2565 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2566 }
2567
2568 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2569 {
2570 AVRational frame_rate;
2571 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2572 *frame_rate_num= frame_rate.num;
2573 *frame_rate_den= frame_rate.den;
2574 return ret;
2575 }
2576
2577 /**
2578 * gets the current time in micro seconds.
2579 */
2580 int64_t av_gettime(void)
2581 {
2582 struct timeval tv;
2583 gettimeofday(&tv,NULL);
2584 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2585 }
2586
2587 int64_t parse_date(const char *datestr, int duration)
2588 {
2589 const char *p;
2590 int64_t t;
2591 struct tm dt;
2592 int i;
2593 static const char *date_fmt[] = {
2594 "%Y-%m-%d",
2595 "%Y%m%d",
2596 };
2597 static const char *time_fmt[] = {
2598 "%H:%M:%S",
2599 "%H%M%S",
2600 };
2601 const char *q;
2602 int is_utc, len;
2603 char lastch;
2604 int negative = 0;
2605
2606 #undef time
2607 time_t now = time(0);
2608
2609 len = strlen(datestr);
2610 if (len > 0)
2611 lastch = datestr[len - 1];
2612 else
2613 lastch = '\0';
2614 is_utc = (lastch == 'z' || lastch == 'Z');
2615
2616 memset(&dt, 0, sizeof(dt));
2617
2618 p = datestr;
2619 q = NULL;
2620 if (!duration) {
2621 /* parse the year-month-day part */
2622 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2623 q = small_strptime(p, date_fmt[i], &dt);
2624 if (q) {
2625 break;
2626 }
2627 }
2628
2629 /* if the year-month-day part is missing, then take the
2630 * current year-month-day time */
2631 if (!q) {
2632 if (is_utc) {
2633 dt = *gmtime(&now);
2634 } else {
2635 dt = *localtime(&now);
2636 }
2637 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2638 } else {
2639 p = q;
2640 }
2641
2642 if (*p == 'T' || *p == 't' || *p == ' ')
2643 p++;
2644
2645 /* parse the hour-minute-second part */
2646 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2647 q = small_strptime(p, time_fmt[i], &dt);
2648 if (q) {
2649 break;
2650 }
2651 }
2652 } else {
2653 /* parse datestr as a duration */
2654 if (p[0] == '-') {
2655 negative = 1;
2656 ++p;
2657 }
2658 /* parse datestr as HH:MM:SS */
2659 q = small_strptime(p, time_fmt[0], &dt);
2660 if (!q) {
2661 /* parse datestr as S+ */
2662 dt.tm_sec = strtol(p, (char **)&q, 10);
2663 if (q == p)
2664 /* the parsing didn't succeed */
2665 return INT64_MIN;
2666 dt.tm_min = 0;
2667 dt.tm_hour = 0;
2668 }
2669 }
2670
2671 /* Now we have all the fields that we can get */
2672 if (!q) {
2673 return INT64_MIN;
2674 }
2675
2676 if (duration) {
2677 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2678 } else {
2679 dt.tm_isdst = -1; /* unknown */
2680 if (is_utc) {
2681 t = mktimegm(&dt);
2682 } else {
2683 t = mktime(&dt);
2684 }
2685 }
2686
2687 t *= 1000000;
2688
2689 /* parse the .m... part */
2690 if (*q == '.') {
2691 int val, n;
2692 q++;
2693 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2694 if (!isdigit(*q))
2695 break;
2696 val += n * (*q - '0');
2697 }
2698 t += val;
2699 }
2700 return negative ? -t : t;
2701 }
2702
2703 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2704 {
2705 const char *p;
2706 char tag[128], *q;
2707
2708 p = info;
2709 if (*p == '?')
2710 p++;
2711 for(;;) {
2712 q = tag;
2713 while (*p != '\0' && *p != '=' && *p != '&') {
2714 if ((q - tag) < sizeof(tag) - 1)
2715 *q++ = *p;
2716 p++;
2717 }
2718 *q = '\0';
2719 q = arg;
2720 if (*p == '=') {
2721 p++;
2722 while (*p != '&' && *p != '\0') {
2723 if ((q - arg) < arg_size - 1) {
2724 if (*p == '+')
2725 *q++ = ' ';
2726 else
2727 *q++ = *p;
2728 }
2729 p++;
2730 }
2731 *q = '\0';
2732 }
2733 if (!strcmp(tag, tag1))
2734 return 1;
2735 if (*p != '&')
2736 break;
2737 p++;
2738 }
2739 return 0;
2740 }
2741
2742 int av_get_frame_filename(char *buf, int buf_size,
2743 const char *path, int number)
2744 {
2745 const char *p;
2746 char *q, buf1[20], c;
2747 int nd, len, percentd_found;
2748
2749 q = buf;
2750 p = path;
2751 percentd_found = 0;
2752 for(;;) {
2753 c = *p++;
2754 if (c == '\0')
2755 break;
2756 if (c == '%') {
2757 do {
2758 nd = 0;
2759 while (isdigit(*p)) {
2760 nd = nd * 10 + *p++ - '0';
2761 }
2762 c = *p++;
2763 } while (isdigit(c));
2764
2765 switch(c) {
2766 case '%':
2767 goto addchar;
2768 case 'd':
2769 if (percentd_found)
2770 goto fail;
2771 percentd_found = 1;
2772 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2773 len = strlen(buf1);
2774 if ((q - buf + len) > buf_size - 1)
2775 goto fail;
2776 memcpy(q, buf1, len);
2777 q += len;
2778 break;
2779 default:
2780 goto fail;
2781 }
2782 } else {
2783 addchar:
2784 if ((q - buf) < buf_size - 1)
2785 *q++ = c;
2786 }
2787 }
2788 if (!percentd_found)
2789 goto fail;
2790 *q = '\0';
2791 return 0;
2792 fail:
2793 *q = '\0';
2794 return -1;
2795 }
2796
2797 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
2798 {
2799 int len, i, j, c;
2800 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2801
2802 for(i=0;i<size;i+=16) {
2803 len = size - i;
2804 if (len > 16)
2805 len = 16;
2806 PRINT("%08x ", i);
2807 for(j=0;j<16;j++) {
2808 if (j < len)
2809 PRINT(" %02x", buf[i+j]);
2810 else
2811 PRINT(" ");
2812 }
2813 PRINT(" ");
2814 for(j=0;j<len;j++) {
2815 c = buf[i+j];
2816 if (c < ' ' || c > '~')
2817 c = '.';
2818 PRINT("%c", c);
2819 }
2820 PRINT("\n");
2821 }
2822 #undef PRINT
2823 }
2824
2825 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2826 {
2827 hex_dump_internal(NULL, f, 0, buf, size);
2828 }
2829
2830 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
2831 {
2832 hex_dump_internal(avcl, NULL, level, buf, size);
2833 }
2834
2835 //FIXME needs to know the time_base
2836 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
2837 {
2838 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2839 PRINT("stream #%d:\n", pkt->stream_index);
2840 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2841 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2842 /* DTS is _always_ valid after av_read_frame() */
2843 PRINT(" dts=");
2844 if (pkt->dts == AV_NOPTS_VALUE)
2845 PRINT("N/A");
2846 else
2847 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
2848 /* PTS may be not known if B frames are present */
2849 PRINT(" pts=");
2850 if (pkt->pts == AV_NOPTS_VALUE)
2851 PRINT("N/A");
2852 else
2853 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
2854 PRINT("\n");
2855 PRINT(" size=%d\n", pkt->size);
2856 #undef PRINT
2857 if (dump_payload)
2858 av_hex_dump(f, pkt->data, pkt->size);
2859 }
2860
2861 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2862 {
2863 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
2864 }
2865
2866 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
2867 {
2868 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
2869 }
2870
2871 void url_split(char *proto, int proto_size,
2872 char *authorization, int authorization_size,
2873 char *hostname, int hostname_size,
2874 int *port_ptr,
2875 char *path, int path_size,
2876 const char *url)
2877 {
2878 const char *p, *ls, *at, *col, *brk;
2879
2880 if (port_ptr) *port_ptr = -1;
2881 if (proto_size > 0) proto[0] = 0;
2882 if (authorization_size > 0) authorization[0] = 0;
2883 if (hostname_size > 0) hostname[0] = 0;
2884 if (path_size > 0) path[0] = 0;
2885
2886 /* parse protocol */
2887 if ((p = strchr(url, ':'))) {
2888 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2889 p++; /* skip ':' */
2890 if (*p == '/') p++;
2891 if (*p == '/') p++;
2892 } else {
2893 /* no protocol means plain filename */
2894 av_strlcpy(path, url, path_size);
2895 return;
2896 }
2897
2898 /* separate path from hostname */
2899 if ((ls = strchr(p, '/')))
2900 av_strlcpy(path, ls, path_size);
2901 else
2902 ls = &p[strlen(p)]; // XXX
2903
2904 /* the rest is hostname, use that to parse auth/port */
2905 if (ls != p) {
2906 /* authorization (user[:pass]@hostname) */
2907 if ((at = strchr(p, '@')) && at < ls) {
2908 av_strlcpy(authorization, p,
2909 FFMIN(authorization_size, at + 1 - p));
2910 p = at + 1; /* skip '@' */
2911 }
2912
2913 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2914 /* [host]:port */
2915 av_strlcpy(hostname, p + 1,
2916 FFMIN(hostname_size, brk - p));
2917 if (brk[1] == ':' && port_ptr)
2918 *port_ptr = atoi(brk + 2);
2919 } else if ((col = strchr(p, ':')) && col < ls) {
2920 av_strlcpy(hostname, p,
2921 FFMIN(col + 1 - p, hostname_size));
2922 if (port_ptr) *port_ptr = atoi(col + 1);
2923 } else
2924 av_strlcpy(hostname, p,
2925 FFMIN(ls + 1 - p, hostname_size));
2926 }
2927 }
2928
2929 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2930 int pts_num, int pts_den)
2931 {
2932 s->pts_wrap_bits = pts_wrap_bits;
2933 s->time_base.num = pts_num;
2934 s->time_base.den = pts_den;
2935 }
2936
2937 /* fraction handling */
2938
2939 /**
2940 * f = val + (num / den) + 0.5.
2941 *
2942 * 'num' is normalized so that it is such as 0 <= num < den.
2943 *
2944 * @param f fractional number
2945 * @param val integer value
2946 * @param num must be >= 0
2947 * @param den must be >= 1
2948 */
2949 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2950 {
2951 num += (den >> 1);
2952 if (num >= den) {
2953 val += num / den;
2954 num = num % den;
2955 }
2956 f->val = val;
2957 f->num = num;
2958 f->den = den;
2959 }
2960
2961 /**
2962 * Fractionnal addition to f: f = f + (incr / f->den).
2963 *
2964 * @param f fractional number
2965 * @param incr increment, can be positive or negative
2966 */
2967 static void av_frac_add(AVFrac *f, int64_t incr)
2968 {
2969 int64_t num, den;
2970
2971 num = f->num + incr;
2972 den = f->den;
2973 if (num < 0) {
2974 f->val += num / den;
2975 num = num % den;
2976 if (num < 0) {
2977 num += den;
2978 f->val--;
2979 }
2980 } else if (num >= den) {
2981 f->val += num / den;
2982 num = num % den;
2983 }
2984 f->num = num;
2985 }