Remove unused variable, fixes warning:
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "opt.h"
23 #include "avstring.h"
24 #include "riff.h"
25 #include <sys/time.h>
26 #include <time.h>
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 /**
32 * @file libavformat/utils.c
33 * Various utility functions for using ffmpeg library.
34 */
35
36 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
37 static void av_frac_add(AVFrac *f, int64_t incr);
38
39 /** head of registered input format linked list. */
40 AVInputFormat *first_iformat = NULL;
41 /** head of registered output format linked list. */
42 AVOutputFormat *first_oformat = NULL;
43
44 void av_register_input_format(AVInputFormat *format)
45 {
46 AVInputFormat **p;
47 p = &first_iformat;
48 while (*p != NULL) p = &(*p)->next;
49 *p = format;
50 format->next = NULL;
51 }
52
53 void av_register_output_format(AVOutputFormat *format)
54 {
55 AVOutputFormat **p;
56 p = &first_oformat;
57 while (*p != NULL) p = &(*p)->next;
58 *p = format;
59 format->next = NULL;
60 }
61
62 int match_ext(const char *filename, const char *extensions)
63 {
64 const char *ext, *p;
65 char ext1[32], *q;
66
67 if(!filename)
68 return 0;
69
70 ext = strrchr(filename, '.');
71 if (ext) {
72 ext++;
73 p = extensions;
74 for(;;) {
75 q = ext1;
76 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
77 *q++ = *p++;
78 *q = '\0';
79 if (!strcasecmp(ext1, ext))
80 return 1;
81 if (*p == '\0')
82 break;
83 p++;
84 }
85 }
86 return 0;
87 }
88
89 AVOutputFormat *guess_format(const char *short_name, const char *filename,
90 const char *mime_type)
91 {
92 AVOutputFormat *fmt, *fmt_found;
93 int score_max, score;
94
95 /* specific test for image sequences */
96 #ifdef CONFIG_IMAGE2_MUXER
97 if (!short_name && filename &&
98 av_filename_number_test(filename) &&
99 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
100 return guess_format("image2", NULL, NULL);
101 }
102 #endif
103 /* find the proper file type */
104 fmt_found = NULL;
105 score_max = 0;
106 fmt = first_oformat;
107 while (fmt != NULL) {
108 score = 0;
109 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
110 score += 100;
111 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
112 score += 10;
113 if (filename && fmt->extensions &&
114 match_ext(filename, fmt->extensions)) {
115 score += 5;
116 }
117 if (score > score_max) {
118 score_max = score;
119 fmt_found = fmt;
120 }
121 fmt = fmt->next;
122 }
123 return fmt_found;
124 }
125
126 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
127 const char *mime_type)
128 {
129 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
130
131 if (fmt) {
132 AVOutputFormat *stream_fmt;
133 char stream_format_name[64];
134
135 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
136 stream_fmt = guess_format(stream_format_name, NULL, NULL);
137
138 if (stream_fmt)
139 fmt = stream_fmt;
140 }
141
142 return fmt;
143 }
144
145 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
146 const char *filename, const char *mime_type, enum CodecType type){
147 if(type == CODEC_TYPE_VIDEO){
148 enum CodecID codec_id= CODEC_ID_NONE;
149
150 #ifdef CONFIG_IMAGE2_MUXER
151 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
152 codec_id= av_guess_image2_codec(filename);
153 }
154 #endif
155 if(codec_id == CODEC_ID_NONE)
156 codec_id= fmt->video_codec;
157 return codec_id;
158 }else if(type == CODEC_TYPE_AUDIO)
159 return fmt->audio_codec;
160 else
161 return CODEC_ID_NONE;
162 }
163
164 AVInputFormat *av_find_input_format(const char *short_name)
165 {
166 AVInputFormat *fmt;
167 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
168 if (!strcmp(fmt->name, short_name))
169 return fmt;
170 }
171 return NULL;
172 }
173
174 /* memory handling */
175
176 void av_destruct_packet(AVPacket *pkt)
177 {
178 av_free(pkt->data);
179 pkt->data = NULL; pkt->size = 0;
180 }
181
182 void av_init_packet(AVPacket *pkt)
183 {
184 pkt->pts = AV_NOPTS_VALUE;
185 pkt->dts = AV_NOPTS_VALUE;
186 pkt->pos = -1;
187 pkt->duration = 0;
188 pkt->flags = 0;
189 pkt->stream_index = 0;
190 pkt->destruct= av_destruct_packet_nofree;
191 }
192
193 int av_new_packet(AVPacket *pkt, int size)
194 {
195 uint8_t *data;
196 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
197 return AVERROR(ENOMEM);
198 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
199 if (!data)
200 return AVERROR(ENOMEM);
201 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
202
203 av_init_packet(pkt);
204 pkt->data = data;
205 pkt->size = size;
206 pkt->destruct = av_destruct_packet;
207 return 0;
208 }
209
210 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
211 {
212 int ret= av_new_packet(pkt, size);
213
214 if(ret<0)
215 return ret;
216
217 pkt->pos= url_ftell(s);
218
219 ret= get_buffer(s, pkt->data, size);
220 if(ret<=0)
221 av_free_packet(pkt);
222 else
223 pkt->size= ret;
224
225 return ret;
226 }
227
228 int av_dup_packet(AVPacket *pkt)
229 {
230 if (pkt->destruct != av_destruct_packet) {
231 uint8_t *data;
232 /* we duplicate the packet and don't forget to put the padding
233 again */
234 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
235 return AVERROR(ENOMEM);
236 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
237 if (!data) {
238 return AVERROR(ENOMEM);
239 }
240 memcpy(data, pkt->data, pkt->size);
241 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
242 pkt->data = data;
243 pkt->destruct = av_destruct_packet;
244 }
245 return 0;
246 }
247
248 int av_filename_number_test(const char *filename)
249 {
250 char buf[1024];
251 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
252 }
253
254 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
255 {
256 AVInputFormat *fmt1, *fmt;
257 int score;
258
259 fmt = NULL;
260 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
261 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
262 continue;
263 score = 0;
264 if (fmt1->read_probe) {
265 score = fmt1->read_probe(pd);
266 } else if (fmt1->extensions) {
267 if (match_ext(pd->filename, fmt1->extensions)) {
268 score = 50;
269 }
270 }
271 if (score > *score_max) {
272 *score_max = score;
273 fmt = fmt1;
274 }
275 }
276 return fmt;
277 }
278
279 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
280 int score=0;
281 return av_probe_input_format2(pd, is_opened, &score);
282 }
283
284 /************************************************************/
285 /* input media file */
286
287 /**
288 * Open a media file from an IO stream. 'fmt' must be specified.
289 */
290 static const char* format_to_name(void* ptr)
291 {
292 AVFormatContext* fc = (AVFormatContext*) ptr;
293 if(fc->iformat) return fc->iformat->name;
294 else if(fc->oformat) return fc->oformat->name;
295 else return "NULL";
296 }
297
298 #define OFFSET(x) offsetof(AVFormatContext,x)
299 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
300 //these names are too long to be readable
301 #define E AV_OPT_FLAG_ENCODING_PARAM
302 #define D AV_OPT_FLAG_DECODING_PARAM
303
304 static const AVOption options[]={
305 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
306 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
307 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
308 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
309 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
310 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
311 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
312 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
313 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
314 {NULL},
315 };
316
317 #undef E
318 #undef D
319 #undef DEFAULT
320
321 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
322
323 static void avformat_get_context_defaults(AVFormatContext *s)
324 {
325 memset(s, 0, sizeof(AVFormatContext));
326
327 s->av_class = &av_format_context_class;
328
329 av_opt_set_defaults(s);
330 }
331
332 AVFormatContext *av_alloc_format_context(void)
333 {
334 AVFormatContext *ic;
335 ic = av_malloc(sizeof(AVFormatContext));
336 if (!ic) return ic;
337 avformat_get_context_defaults(ic);
338 ic->av_class = &av_format_context_class;
339 return ic;
340 }
341
342 int av_open_input_stream(AVFormatContext **ic_ptr,
343 ByteIOContext *pb, const char *filename,
344 AVInputFormat *fmt, AVFormatParameters *ap)
345 {
346 int err;
347 AVFormatContext *ic;
348 AVFormatParameters default_ap;
349
350 if(!ap){
351 ap=&default_ap;
352 memset(ap, 0, sizeof(default_ap));
353 }
354
355 if(!ap->prealloced_context)
356 ic = av_alloc_format_context();
357 else
358 ic = *ic_ptr;
359 if (!ic) {
360 err = AVERROR(ENOMEM);
361 goto fail;
362 }
363 ic->iformat = fmt;
364 if (pb)
365 ic->pb = *pb;
366 ic->duration = AV_NOPTS_VALUE;
367 ic->start_time = AV_NOPTS_VALUE;
368 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
369
370 /* allocate private data */
371 if (fmt->priv_data_size > 0) {
372 ic->priv_data = av_mallocz(fmt->priv_data_size);
373 if (!ic->priv_data) {
374 err = AVERROR(ENOMEM);
375 goto fail;
376 }
377 } else {
378 ic->priv_data = NULL;
379 }
380
381 err = ic->iformat->read_header(ic, ap);
382 if (err < 0)
383 goto fail;
384
385 if (pb && !ic->data_offset)
386 ic->data_offset = url_ftell(&ic->pb);
387
388 *ic_ptr = ic;
389 return 0;
390 fail:
391 if (ic) {
392 av_freep(&ic->priv_data);
393 }
394 av_free(ic);
395 *ic_ptr = NULL;
396 return err;
397 }
398
399 /** Size of probe buffer, for guessing file type from file contents. */
400 #define PROBE_BUF_MIN 2048
401 #define PROBE_BUF_MAX (1<<20)
402
403 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
404 AVInputFormat *fmt,
405 int buf_size,
406 AVFormatParameters *ap)
407 {
408 int err, must_open_file, file_opened, probe_size;
409 AVProbeData probe_data, *pd = &probe_data;
410 ByteIOContext pb1, *pb = &pb1;
411
412 file_opened = 0;
413 pd->filename = "";
414 if (filename)
415 pd->filename = filename;
416 pd->buf = NULL;
417 pd->buf_size = 0;
418
419 if (!fmt) {
420 /* guess format if no file can be opened */
421 fmt = av_probe_input_format(pd, 0);
422 }
423
424 /* do not open file if the format does not need it. XXX: specific
425 hack needed to handle RTSP/TCP */
426 must_open_file = 1;
427 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
428 must_open_file = 0;
429 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise it is uninitialized
430 }
431
432 if (!fmt || must_open_file) {
433 /* if no file needed do not try to open one */
434 if ((err=url_fopen(pb, filename, URL_RDONLY)) < 0) {
435 goto fail;
436 }
437 file_opened = 1;
438 if (buf_size > 0) {
439 url_setbufsize(pb, buf_size);
440 }
441
442 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
443 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
444 /* read probe data */
445 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
446 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
447 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
448 if (url_fseek(pb, 0, SEEK_SET) < 0) {
449 url_fclose(pb);
450 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
451 file_opened = 0;
452 err = AVERROR(EIO);
453 goto fail;
454 }
455 }
456 /* guess file format */
457 fmt = av_probe_input_format2(pd, 1, &score);
458 }
459 av_freep(&pd->buf);
460 }
461
462 /* if still no format found, error */
463 if (!fmt) {
464 err = AVERROR_NOFMT;
465 goto fail;
466 }
467
468 /* XXX: suppress this hack for redirectors */
469 #ifdef CONFIG_REDIR_DEMUXER
470 if (!strcmp(fmt->name, "redir")) {
471 int redir_open(AVFormatContext **ic_ptr, ByteIOContext *f);
472 err = redir_open(ic_ptr, pb);
473 url_fclose(pb);
474 return err;
475 }
476 #endif
477
478 /* check filename in case of an image number is expected */
479 if (fmt->flags & AVFMT_NEEDNUMBER) {
480 if (!av_filename_number_test(filename)) {
481 err = AVERROR_NUMEXPECTED;
482 goto fail;
483 }
484 }
485 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
486 if (err)
487 goto fail;
488 return 0;
489 fail:
490 av_freep(&pd->buf);
491 if (file_opened)
492 url_fclose(pb);
493 *ic_ptr = NULL;
494 return err;
495
496 }
497
498 /*******************************************************/
499
500 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
501 {
502 av_init_packet(pkt);
503 return s->iformat->read_packet(s, pkt);
504 }
505
506 /**********************************************************/
507
508 /**
509 * Get the number of samples of an audio frame. Return (-1) if error.
510 */
511 static int get_audio_frame_size(AVCodecContext *enc, int size)
512 {
513 int frame_size;
514
515 if (enc->frame_size <= 1) {
516 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
517
518 if (bits_per_sample) {
519 if (enc->channels == 0)
520 return -1;
521 frame_size = (size << 3) / (bits_per_sample * enc->channels);
522 } else {
523 /* used for example by ADPCM codecs */
524 if (enc->bit_rate == 0)
525 return -1;
526 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
527 }
528 } else {
529 frame_size = enc->frame_size;
530 }
531 return frame_size;
532 }
533
534
535 /**
536 * Return the frame duration in seconds, return 0 if not available.
537 */
538 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
539 AVCodecParserContext *pc, AVPacket *pkt)
540 {
541 int frame_size;
542
543 *pnum = 0;
544 *pden = 0;
545 switch(st->codec->codec_type) {
546 case CODEC_TYPE_VIDEO:
547 if(st->time_base.num*1000LL > st->time_base.den){
548 *pnum = st->time_base.num;
549 *pden = st->time_base.den;
550 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
551 *pnum = st->codec->time_base.num;
552 *pden = st->codec->time_base.den;
553 if (pc && pc->repeat_pict) {
554 *pden *= 2;
555 *pnum = (*pnum) * (2 + pc->repeat_pict);
556 }
557 }
558 break;
559 case CODEC_TYPE_AUDIO:
560 frame_size = get_audio_frame_size(st->codec, pkt->size);
561 if (frame_size < 0)
562 break;
563 *pnum = frame_size;
564 *pden = st->codec->sample_rate;
565 break;
566 default:
567 break;
568 }
569 }
570
571 static int is_intra_only(AVCodecContext *enc){
572 if(enc->codec_type == CODEC_TYPE_AUDIO){
573 return 1;
574 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
575 switch(enc->codec_id){
576 case CODEC_ID_MJPEG:
577 case CODEC_ID_MJPEGB:
578 case CODEC_ID_LJPEG:
579 case CODEC_ID_RAWVIDEO:
580 case CODEC_ID_DVVIDEO:
581 case CODEC_ID_HUFFYUV:
582 case CODEC_ID_FFVHUFF:
583 case CODEC_ID_ASV1:
584 case CODEC_ID_ASV2:
585 case CODEC_ID_VCR1:
586 return 1;
587 default: break;
588 }
589 }
590 return 0;
591 }
592
593 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
594 int64_t dts, int64_t pts)
595 {
596 AVStream *st= s->streams[stream_index];
597 AVPacketList *pktl= s->packet_buffer;
598
599 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)
600 return;
601
602 st->first_dts= dts - st->cur_dts;
603 st->cur_dts= dts;
604
605 for(; pktl; pktl= pktl->next){
606 if(pktl->pkt.stream_index != stream_index)
607 continue;
608 //FIXME think more about this check
609 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
610 pktl->pkt.pts += st->first_dts;
611
612 if(pktl->pkt.dts != AV_NOPTS_VALUE)
613 pktl->pkt.dts += st->first_dts;
614
615 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
616 st->start_time= pktl->pkt.pts;
617 }
618 if (st->start_time == AV_NOPTS_VALUE)
619 st->start_time = pts;
620 }
621
622 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
623 AVCodecParserContext *pc, AVPacket *pkt)
624 {
625 int num, den, presentation_delayed, delay, i;
626 int64_t offset;
627
628 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
629 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
630 pkt->dts -= 1LL<<st->pts_wrap_bits;
631 }
632
633 if (pkt->duration == 0) {
634 compute_frame_duration(&num, &den, st, pc, pkt);
635 if (den && num) {
636 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
637 }
638 }
639
640 /* correct timestamps with byte offset if demuxers only have timestamps on packet boundaries */
641 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
642 /* this will estimate bitrate based on this frame's duration and size */
643 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
644 if(pkt->pts != AV_NOPTS_VALUE)
645 pkt->pts += offset;
646 if(pkt->dts != AV_NOPTS_VALUE)
647 pkt->dts += offset;
648 }
649
650 /* do we have a video B frame ? */
651 delay= st->codec->has_b_frames;
652 presentation_delayed = 0;
653 /* XXX: need has_b_frame, but cannot get it if the codec is
654 not initialized */
655 if (delay &&
656 pc && pc->pict_type != FF_B_TYPE)
657 presentation_delayed = 1;
658 /* This may be redundant, but it should not hurt. */
659 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
660 presentation_delayed = 1;
661
662 if(st->cur_dts == AV_NOPTS_VALUE){
663 st->cur_dts = 0; //FIXME maybe set it to 0 during init
664 }
665
666 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
667 /* interpolate PTS and DTS if they are not present */
668 if(delay <=1){
669 if (presentation_delayed) {
670 /* DTS = decompression time stamp */
671 /* PTS = presentation time stamp */
672 if (pkt->dts == AV_NOPTS_VALUE)
673 pkt->dts = st->last_IP_pts;
674 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
675 if (pkt->dts == AV_NOPTS_VALUE)
676 pkt->dts = st->cur_dts;
677
678 /* this is tricky: the dts must be incremented by the duration
679 of the frame we are displaying, i.e. the last I or P frame */
680 if (st->last_IP_duration == 0)
681 st->last_IP_duration = pkt->duration;
682 st->cur_dts = pkt->dts + st->last_IP_duration;
683 st->last_IP_duration = pkt->duration;
684 st->last_IP_pts= pkt->pts;
685 /* cannot compute PTS if not present (we can compute it only
686 by knowing the futur */
687 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
688 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
689 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
690 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
691 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
692 pkt->pts += pkt->duration;
693 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
694 }
695 }
696
697 /* presentation is not delayed : PTS and DTS are the same */
698 if(pkt->pts == AV_NOPTS_VALUE)
699 pkt->pts = pkt->dts;
700 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
701 if(pkt->pts == AV_NOPTS_VALUE)
702 pkt->pts = st->cur_dts;
703 pkt->dts = pkt->pts;
704 st->cur_dts = pkt->pts + pkt->duration;
705 }
706 }
707
708 if(pkt->pts != AV_NOPTS_VALUE){
709 st->pts_buffer[0]= pkt->pts;
710 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
711 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
712 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
713 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
714 if(pkt->dts == AV_NOPTS_VALUE)
715 pkt->dts= st->pts_buffer[0];
716 if(delay>1){
717 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
718 }
719 if(pkt->dts > st->cur_dts)
720 st->cur_dts = pkt->dts;
721 }
722
723 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
724
725 /* update flags */
726 if(is_intra_only(st->codec))
727 pkt->flags |= PKT_FLAG_KEY;
728 else if (pc) {
729 pkt->flags = 0;
730 /* key frame computation */
731 if (pc->pict_type == FF_I_TYPE)
732 pkt->flags |= PKT_FLAG_KEY;
733 }
734 }
735
736 void av_destruct_packet_nofree(AVPacket *pkt)
737 {
738 pkt->data = NULL; pkt->size = 0;
739 }
740
741 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
742 {
743 AVStream *st;
744 int len, ret, i;
745
746 av_init_packet(pkt);
747
748 for(;;) {
749 /* select current input stream component */
750 st = s->cur_st;
751 if (st) {
752 if (!st->need_parsing || !st->parser) {
753 /* no parsing needed: we just output the packet as is */
754 /* raw data support */
755 *pkt = s->cur_pkt;
756 compute_pkt_fields(s, st, NULL, pkt);
757 s->cur_st = NULL;
758 break;
759 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
760 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
761 s->cur_ptr, s->cur_len,
762 s->cur_pkt.pts, s->cur_pkt.dts);
763 s->cur_pkt.pts = AV_NOPTS_VALUE;
764 s->cur_pkt.dts = AV_NOPTS_VALUE;
765 /* increment read pointer */
766 s->cur_ptr += len;
767 s->cur_len -= len;
768
769 /* return packet if any */
770 if (pkt->size) {
771 got_packet:
772 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
773 pkt->duration = 0;
774 pkt->stream_index = st->index;
775 pkt->pts = st->parser->pts;
776 pkt->dts = st->parser->dts;
777 pkt->destruct = av_destruct_packet_nofree;
778 compute_pkt_fields(s, st, st->parser, pkt);
779
780 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
781 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
782 0, 0, AVINDEX_KEYFRAME);
783 }
784
785 break;
786 }
787 } else {
788 /* free packet */
789 av_free_packet(&s->cur_pkt);
790 s->cur_st = NULL;
791 }
792 } else {
793 /* read next packet */
794 ret = av_read_packet(s, &s->cur_pkt);
795 if (ret < 0) {
796 if (ret == AVERROR(EAGAIN))
797 return ret;
798 /* return the last frames, if any */
799 for(i = 0; i < s->nb_streams; i++) {
800 st = s->streams[i];
801 if (st->parser && st->need_parsing) {
802 av_parser_parse(st->parser, st->codec,
803 &pkt->data, &pkt->size,
804 NULL, 0,
805 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
806 if (pkt->size)
807 goto got_packet;
808 }
809 }
810 /* no more packets: really terminates parsing */
811 return ret;
812 }
813
814 st = s->streams[s->cur_pkt.stream_index];
815 if(st->codec->debug & FF_DEBUG_PTS)
816 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
817 s->cur_pkt.stream_index,
818 s->cur_pkt.pts,
819 s->cur_pkt.dts,
820 s->cur_pkt.size);
821
822 s->cur_st = st;
823 s->cur_ptr = s->cur_pkt.data;
824 s->cur_len = s->cur_pkt.size;
825 if (st->need_parsing && !st->parser) {
826 st->parser = av_parser_init(st->codec->codec_id);
827 if (!st->parser) {
828 /* no parser available : just output the raw packets */
829 st->need_parsing = AVSTREAM_PARSE_NONE;
830 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
831 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
832 }
833 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
834 st->parser->last_frame_offset=
835 st->parser->cur_offset= s->cur_pkt.pos;
836 }
837 }
838 }
839 }
840 if(st->codec->debug & FF_DEBUG_PTS)
841 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
842 pkt->stream_index,
843 pkt->pts,
844 pkt->dts,
845 pkt->size);
846
847 return 0;
848 }
849
850 static AVPacket *add_to_pktbuf(AVFormatContext *s, AVPacket *pkt){
851 AVPacketList *pktl= s->packet_buffer;
852 AVPacketList **plast_pktl= &s->packet_buffer;
853
854 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
855
856 pktl = av_mallocz(sizeof(AVPacketList));
857 if (!pktl)
858 return NULL;
859
860 /* add the packet in the buffered packet list */
861 *plast_pktl = pktl;
862 pktl->pkt= *pkt;
863 return &pktl->pkt;
864 }
865
866 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
867 {
868 AVPacketList *pktl;
869 int eof=0;
870 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
871
872 for(;;){
873 pktl = s->packet_buffer;
874 if (pktl) {
875 AVPacket *next_pkt= &pktl->pkt;
876
877 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
878 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
879 if( pktl->pkt.stream_index == next_pkt->stream_index
880 && next_pkt->dts < pktl->pkt.dts
881 && pktl->pkt.pts != pktl->pkt.dts //not b frame
882 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
883 next_pkt->pts= pktl->pkt.dts;
884 }
885 pktl= pktl->next;
886 }
887 pktl = s->packet_buffer;
888 }
889
890 if( next_pkt->pts != AV_NOPTS_VALUE
891 || next_pkt->dts == AV_NOPTS_VALUE
892 || !genpts || eof){
893 /* read packet from packet buffer, if there is data */
894 *pkt = *next_pkt;
895 s->packet_buffer = pktl->next;
896 av_free(pktl);
897 return 0;
898 }
899 }
900 if(genpts){
901 int ret= av_read_frame_internal(s, pkt);
902 if(ret<0){
903 if(pktl && ret != AVERROR(EAGAIN)){
904 eof=1;
905 continue;
906 }else
907 return ret;
908 }
909
910 if(av_dup_packet(add_to_pktbuf(s, pkt)) < 0)
911 return AVERROR(ENOMEM);
912 }else{
913 assert(!s->packet_buffer);
914 return av_read_frame_internal(s, pkt);
915 }
916 }
917 }
918
919 /* XXX: suppress the packet queue */
920 static void flush_packet_queue(AVFormatContext *s)
921 {
922 AVPacketList *pktl;
923
924 for(;;) {
925 pktl = s->packet_buffer;
926 if (!pktl)
927 break;
928 s->packet_buffer = pktl->next;
929 av_free_packet(&pktl->pkt);
930 av_free(pktl);
931 }
932 }
933
934 /*******************************************************/
935 /* seek support */
936
937 int av_find_default_stream_index(AVFormatContext *s)
938 {
939 int i;
940 AVStream *st;
941
942 if (s->nb_streams <= 0)
943 return -1;
944 for(i = 0; i < s->nb_streams; i++) {
945 st = s->streams[i];
946 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
947 return i;
948 }
949 }
950 return 0;
951 }
952
953 /**
954 * Flush the frame reader.
955 */
956 static void av_read_frame_flush(AVFormatContext *s)
957 {
958 AVStream *st;
959 int i;
960
961 flush_packet_queue(s);
962
963 /* free previous packet */
964 if (s->cur_st) {
965 if (s->cur_st->parser)
966 av_free_packet(&s->cur_pkt);
967 s->cur_st = NULL;
968 }
969 /* fail safe */
970 s->cur_ptr = NULL;
971 s->cur_len = 0;
972
973 /* for each stream, reset read state */
974 for(i = 0; i < s->nb_streams; i++) {
975 st = s->streams[i];
976
977 if (st->parser) {
978 av_parser_close(st->parser);
979 st->parser = NULL;
980 }
981 st->last_IP_pts = AV_NOPTS_VALUE;
982 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
983 }
984 }
985
986 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
987 int i;
988
989 for(i = 0; i < s->nb_streams; i++) {
990 AVStream *st = s->streams[i];
991
992 st->cur_dts = av_rescale(timestamp,
993 st->time_base.den * (int64_t)ref_st->time_base.num,
994 st->time_base.num * (int64_t)ref_st->time_base.den);
995 }
996 }
997
998 int av_add_index_entry(AVStream *st,
999 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1000 {
1001 AVIndexEntry *entries, *ie;
1002 int index;
1003
1004 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1005 return -1;
1006
1007 entries = av_fast_realloc(st->index_entries,
1008 &st->index_entries_allocated_size,
1009 (st->nb_index_entries + 1) *
1010 sizeof(AVIndexEntry));
1011 if(!entries)
1012 return -1;
1013
1014 st->index_entries= entries;
1015
1016 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1017
1018 if(index<0){
1019 index= st->nb_index_entries++;
1020 ie= &entries[index];
1021 assert(index==0 || ie[-1].timestamp < timestamp);
1022 }else{
1023 ie= &entries[index];
1024 if(ie->timestamp != timestamp){
1025 if(ie->timestamp <= timestamp)
1026 return -1;
1027 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1028 st->nb_index_entries++;
1029 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1030 distance= ie->min_distance;
1031 }
1032
1033 ie->pos = pos;
1034 ie->timestamp = timestamp;
1035 ie->min_distance= distance;
1036 ie->size= size;
1037 ie->flags = flags;
1038
1039 return index;
1040 }
1041
1042 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1043 int flags)
1044 {
1045 AVIndexEntry *entries= st->index_entries;
1046 int nb_entries= st->nb_index_entries;
1047 int a, b, m;
1048 int64_t timestamp;
1049
1050 a = - 1;
1051 b = nb_entries;
1052
1053 while (b - a > 1) {
1054 m = (a + b) >> 1;
1055 timestamp = entries[m].timestamp;
1056 if(timestamp >= wanted_timestamp)
1057 b = m;
1058 if(timestamp <= wanted_timestamp)
1059 a = m;
1060 }
1061 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1062
1063 if(!(flags & AVSEEK_FLAG_ANY)){
1064 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1065 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1066 }
1067 }
1068
1069 if(m == nb_entries)
1070 return -1;
1071 return m;
1072 }
1073
1074 #define DEBUG_SEEK
1075
1076 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1077 AVInputFormat *avif= s->iformat;
1078 int64_t pos_min, pos_max, pos, pos_limit;
1079 int64_t ts_min, ts_max, ts;
1080 int index;
1081 AVStream *st;
1082
1083 if (stream_index < 0)
1084 return -1;
1085
1086 #ifdef DEBUG_SEEK
1087 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1088 #endif
1089
1090 ts_max=
1091 ts_min= AV_NOPTS_VALUE;
1092 pos_limit= -1; //gcc falsely says it may be uninitialized
1093
1094 st= s->streams[stream_index];
1095 if(st->index_entries){
1096 AVIndexEntry *e;
1097
1098 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1099 index= FFMAX(index, 0);
1100 e= &st->index_entries[index];
1101
1102 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1103 pos_min= e->pos;
1104 ts_min= e->timestamp;
1105 #ifdef DEBUG_SEEK
1106 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1107 pos_min,ts_min);
1108 #endif
1109 }else{
1110 assert(index==0);
1111 }
1112
1113 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1114 assert(index < st->nb_index_entries);
1115 if(index >= 0){
1116 e= &st->index_entries[index];
1117 assert(e->timestamp >= target_ts);
1118 pos_max= e->pos;
1119 ts_max= e->timestamp;
1120 pos_limit= pos_max - e->min_distance;
1121 #ifdef DEBUG_SEEK
1122 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1123 pos_max,pos_limit, ts_max);
1124 #endif
1125 }
1126 }
1127
1128 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1129 if(pos<0)
1130 return -1;
1131
1132 /* do the seek */
1133 url_fseek(&s->pb, pos, SEEK_SET);
1134
1135 av_update_cur_dts(s, st, ts);
1136
1137 return 0;
1138 }
1139
1140 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1141 int64_t pos, ts;
1142 int64_t start_pos, filesize;
1143 int no_change;
1144
1145 #ifdef DEBUG_SEEK
1146 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1147 #endif
1148
1149 if(ts_min == AV_NOPTS_VALUE){
1150 pos_min = s->data_offset;
1151 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1152 if (ts_min == AV_NOPTS_VALUE)
1153 return -1;
1154 }
1155
1156 if(ts_max == AV_NOPTS_VALUE){
1157 int step= 1024;
1158 filesize = url_fsize(&s->pb);
1159 pos_max = filesize - 1;
1160 do{
1161 pos_max -= step;
1162 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1163 step += step;
1164 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1165 if (ts_max == AV_NOPTS_VALUE)
1166 return -1;
1167
1168 for(;;){
1169 int64_t tmp_pos= pos_max + 1;
1170 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1171 if(tmp_ts == AV_NOPTS_VALUE)
1172 break;
1173 ts_max= tmp_ts;
1174 pos_max= tmp_pos;
1175 if(tmp_pos >= filesize)
1176 break;
1177 }
1178 pos_limit= pos_max;
1179 }
1180
1181 if(ts_min > ts_max){
1182 return -1;
1183 }else if(ts_min == ts_max){
1184 pos_limit= pos_min;
1185 }
1186
1187 no_change=0;
1188 while (pos_min < pos_limit) {
1189 #ifdef DEBUG_SEEK
1190 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1191 pos_min, pos_max,
1192 ts_min, ts_max);
1193 #endif
1194 assert(pos_limit <= pos_max);
1195
1196 if(no_change==0){
1197 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1198 // interpolate position (better than dichotomy)
1199 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1200 + pos_min - approximate_keyframe_distance;
1201 }else if(no_change==1){
1202 // bisection, if interpolation failed to change min or max pos last time
1203 pos = (pos_min + pos_limit)>>1;
1204 }else{
1205 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1206 pos=pos_min;
1207 }
1208 if(pos <= pos_min)
1209 pos= pos_min + 1;
1210 else if(pos > pos_limit)
1211 pos= pos_limit;
1212 start_pos= pos;
1213
1214 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1215 if(pos == pos_max)
1216 no_change++;
1217 else
1218 no_change=0;
1219 #ifdef DEBUG_SEEK
1220 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1221 #endif
1222 if(ts == AV_NOPTS_VALUE){
1223 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1224 return -1;
1225 }
1226 assert(ts != AV_NOPTS_VALUE);
1227 if (target_ts <= ts) {
1228 pos_limit = start_pos - 1;
1229 pos_max = pos;
1230 ts_max = ts;
1231 }
1232 if (target_ts >= ts) {
1233 pos_min = pos;
1234 ts_min = ts;
1235 }
1236 }
1237
1238 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1239 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1240 #ifdef DEBUG_SEEK
1241 pos_min = pos;
1242 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1243 pos_min++;
1244 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1245 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1246 pos, ts_min, target_ts, ts_max);
1247 #endif
1248 *ts_ret= ts;
1249 return pos;
1250 }
1251
1252 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1253 int64_t pos_min, pos_max;
1254 #if 0
1255 AVStream *st;
1256
1257 if (stream_index < 0)
1258 return -1;
1259
1260 st= s->streams[stream_index];
1261 #endif
1262
1263 pos_min = s->data_offset;
1264 pos_max = url_fsize(&s->pb) - 1;
1265
1266 if (pos < pos_min) pos= pos_min;
1267 else if(pos > pos_max) pos= pos_max;
1268
1269 url_fseek(&s->pb, pos, SEEK_SET);
1270
1271 #if 0
1272 av_update_cur_dts(s, st, ts);
1273 #endif
1274 return 0;
1275 }
1276
1277 static int av_seek_frame_generic(AVFormatContext *s,
1278 int stream_index, int64_t timestamp, int flags)
1279 {
1280 int index;
1281 AVStream *st;
1282 AVIndexEntry *ie;
1283
1284 st = s->streams[stream_index];
1285
1286 index = av_index_search_timestamp(st, timestamp, flags);
1287
1288 if(index < 0 || index==st->nb_index_entries-1){
1289 int i;
1290 AVPacket pkt;
1291
1292 if(st->index_entries && st->nb_index_entries){
1293 ie= &st->index_entries[st->nb_index_entries-1];
1294 url_fseek(&s->pb, ie->pos, SEEK_SET);
1295 av_update_cur_dts(s, st, ie->timestamp);
1296 }else
1297 url_fseek(&s->pb, 0, SEEK_SET);
1298
1299 for(i=0;; i++) {
1300 int ret = av_read_frame(s, &pkt);
1301 if(ret<0)
1302 break;
1303 av_free_packet(&pkt);
1304 if(stream_index == pkt.stream_index){
1305 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1306 break;
1307 }
1308 }
1309 index = av_index_search_timestamp(st, timestamp, flags);
1310 }
1311 if (index < 0)
1312 return -1;
1313
1314 av_read_frame_flush(s);
1315 if (s->iformat->read_seek){
1316 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1317 return 0;
1318 }
1319 ie = &st->index_entries[index];
1320 url_fseek(&s->pb, ie->pos, SEEK_SET);
1321
1322 av_update_cur_dts(s, st, ie->timestamp);
1323
1324 return 0;
1325 }
1326
1327 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1328 {
1329 int ret;
1330 AVStream *st;
1331
1332 av_read_frame_flush(s);
1333
1334 if(flags & AVSEEK_FLAG_BYTE)
1335 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1336
1337 if(stream_index < 0){
1338 stream_index= av_find_default_stream_index(s);
1339 if(stream_index < 0)
1340 return -1;
1341
1342 st= s->streams[stream_index];
1343 /* timestamp for default must be expressed in AV_TIME_BASE units */
1344 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1345 }
1346 st= s->streams[stream_index];
1347
1348 /* first, we try the format specific seek */
1349 if (s->iformat->read_seek)
1350 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1351 else
1352 ret = -1;
1353 if (ret >= 0) {
1354 return 0;
1355 }
1356
1357 if(s->iformat->read_timestamp)
1358 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1359 else
1360 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1361 }
1362
1363 /*******************************************************/
1364
1365 /**
1366 * Returns TRUE if the stream has accurate duration in any stream.
1367 *
1368 * @return TRUE if the stream has accurate duration for at least one component.
1369 */
1370 static int av_has_duration(AVFormatContext *ic)
1371 {
1372 int i;
1373 AVStream *st;
1374
1375 for(i = 0;i < ic->nb_streams; i++) {
1376 st = ic->streams[i];
1377 if (st->duration != AV_NOPTS_VALUE)
1378 return 1;
1379 }
1380 return 0;
1381 }
1382
1383 /**
1384 * Estimate the stream timings from the one of each components.
1385 *
1386 * Also computes the global bitrate if possible.
1387 */
1388 static void av_update_stream_timings(AVFormatContext *ic)
1389 {
1390 int64_t start_time, start_time1, end_time, end_time1;
1391 int64_t duration, duration1;
1392 int i;
1393 AVStream *st;
1394
1395 start_time = INT64_MAX;
1396 end_time = INT64_MIN;
1397 duration = INT64_MIN;
1398 for(i = 0;i < ic->nb_streams; i++) {
1399 st = ic->streams[i];
1400 if (st->start_time != AV_NOPTS_VALUE) {
1401 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1402 if (start_time1 < start_time)
1403 start_time = start_time1;
1404 if (st->duration != AV_NOPTS_VALUE) {
1405 end_time1 = start_time1
1406 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1407 if (end_time1 > end_time)
1408 end_time = end_time1;
1409 }
1410 }
1411 if (st->duration != AV_NOPTS_VALUE) {
1412 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1413 if (duration1 > duration)
1414 duration = duration1;
1415 }
1416 }
1417 if (start_time != INT64_MAX) {
1418 ic->start_time = start_time;
1419 if (end_time != INT64_MIN) {
1420 if (end_time - start_time > duration)
1421 duration = end_time - start_time;
1422 }
1423 }
1424 if (duration != INT64_MIN) {
1425 ic->duration = duration;
1426 if (ic->file_size > 0) {
1427 /* compute the bit rate */
1428 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1429 (double)ic->duration;
1430 }
1431 }
1432 }
1433
1434 static void fill_all_stream_timings(AVFormatContext *ic)
1435 {
1436 int i;
1437 AVStream *st;
1438
1439 av_update_stream_timings(ic);
1440 for(i = 0;i < ic->nb_streams; i++) {
1441 st = ic->streams[i];
1442 if (st->start_time == AV_NOPTS_VALUE) {
1443 if(ic->start_time != AV_NOPTS_VALUE)
1444 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1445 if(ic->duration != AV_NOPTS_VALUE)
1446 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1447 }
1448 }
1449 }
1450
1451 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1452 {
1453 int64_t filesize, duration;
1454 int bit_rate, i;
1455 AVStream *st;
1456
1457 /* if bit_rate is already set, we believe it */
1458 if (ic->bit_rate == 0) {
1459 bit_rate = 0;
1460 for(i=0;i<ic->nb_streams;i++) {
1461 st = ic->streams[i];
1462 bit_rate += st->codec->bit_rate;
1463 }
1464 ic->bit_rate = bit_rate;
1465 }
1466
1467 /* if duration is already set, we believe it */
1468 if (ic->duration == AV_NOPTS_VALUE &&
1469 ic->bit_rate != 0 &&
1470 ic->file_size != 0) {
1471 filesize = ic->file_size;
1472 if (filesize > 0) {
1473 for(i = 0; i < ic->nb_streams; i++) {
1474 st = ic->streams[i];
1475 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1476 if (st->duration == AV_NOPTS_VALUE)
1477 st->duration = duration;
1478 }
1479 }
1480 }
1481 }
1482
1483 #define DURATION_MAX_READ_SIZE 250000
1484
1485 /* only usable for MPEG-PS streams */
1486 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1487 {
1488 AVPacket pkt1, *pkt = &pkt1;
1489 AVStream *st;
1490 int read_size, i, ret;
1491 int64_t end_time;
1492 int64_t filesize, offset, duration;
1493
1494 /* free previous packet */
1495 if (ic->cur_st && ic->cur_st->parser)
1496 av_free_packet(&ic->cur_pkt);
1497 ic->cur_st = NULL;
1498
1499 /* flush packet queue */
1500 flush_packet_queue(ic);
1501
1502 for(i=0;i<ic->nb_streams;i++) {
1503 st = ic->streams[i];
1504 if (st->parser) {
1505 av_parser_close(st->parser);
1506 st->parser= NULL;
1507 }
1508 }
1509
1510 /* we read the first packets to get the first PTS (not fully
1511 accurate, but it is enough now) */
1512 url_fseek(&ic->pb, 0, SEEK_SET);
1513 read_size = 0;
1514 for(;;) {
1515 if (read_size >= DURATION_MAX_READ_SIZE)
1516 break;
1517 /* if all info is available, we can stop */
1518 for(i = 0;i < ic->nb_streams; i++) {
1519 st = ic->streams[i];
1520 if (st->start_time == AV_NOPTS_VALUE)
1521 break;
1522 }
1523 if (i == ic->nb_streams)
1524 break;
1525
1526 ret = av_read_packet(ic, pkt);
1527 if (ret != 0)
1528 break;
1529 read_size += pkt->size;
1530 st = ic->streams[pkt->stream_index];
1531 if (pkt->pts != AV_NOPTS_VALUE) {
1532 if (st->start_time == AV_NOPTS_VALUE)
1533 st->start_time = pkt->pts;
1534 }
1535 av_free_packet(pkt);
1536 }
1537
1538 /* estimate the end time (duration) */
1539 /* XXX: may need to support wrapping */
1540 filesize = ic->file_size;
1541 offset = filesize - DURATION_MAX_READ_SIZE;
1542 if (offset < 0)
1543 offset = 0;
1544
1545 url_fseek(&ic->pb, offset, SEEK_SET);
1546 read_size = 0;
1547 for(;;) {
1548 if (read_size >= DURATION_MAX_READ_SIZE)
1549 break;
1550
1551 ret = av_read_packet(ic, pkt);
1552 if (ret != 0)
1553 break;
1554 read_size += pkt->size;
1555 st = ic->streams[pkt->stream_index];
1556 if (pkt->pts != AV_NOPTS_VALUE &&
1557 st->start_time != AV_NOPTS_VALUE) {
1558 end_time = pkt->pts;
1559 duration = end_time - st->start_time;
1560 if (duration > 0) {
1561 if (st->duration == AV_NOPTS_VALUE ||
1562 st->duration < duration)
1563 st->duration = duration;
1564 }
1565 }
1566 av_free_packet(pkt);
1567 }
1568
1569 fill_all_stream_timings(ic);
1570
1571 url_fseek(&ic->pb, old_offset, SEEK_SET);
1572 for(i=0; i<ic->nb_streams; i++){
1573 st= ic->streams[i];
1574 st->cur_dts= st->first_dts;
1575 st->last_IP_pts = AV_NOPTS_VALUE;
1576 }
1577 }
1578
1579 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1580 {
1581 int64_t file_size;
1582
1583 /* get the file size, if possible */
1584 if (ic->iformat->flags & AVFMT_NOFILE) {
1585 file_size = 0;
1586 } else {
1587 file_size = url_fsize(&ic->pb);
1588 if (file_size < 0)
1589 file_size = 0;
1590 }
1591 ic->file_size = file_size;
1592
1593 if ((!strcmp(ic->iformat->name, "mpeg") ||
1594 !strcmp(ic->iformat->name, "mpegts")) &&
1595 file_size && !ic->pb.is_streamed) {
1596 /* get accurate estimate from the PTSes */
1597 av_estimate_timings_from_pts(ic, old_offset);
1598 } else if (av_has_duration(ic)) {
1599 /* at least one components has timings - we use them for all
1600 the components */
1601 fill_all_stream_timings(ic);
1602 } else {
1603 /* less precise: use bit rate info */
1604 av_estimate_timings_from_bit_rate(ic);
1605 }
1606 av_update_stream_timings(ic);
1607
1608 #if 0
1609 {
1610 int i;
1611 AVStream *st;
1612 for(i = 0;i < ic->nb_streams; i++) {
1613 st = ic->streams[i];
1614 printf("%d: start_time: %0.3f duration: %0.3f\n",
1615 i, (double)st->start_time / AV_TIME_BASE,
1616 (double)st->duration / AV_TIME_BASE);
1617 }
1618 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1619 (double)ic->start_time / AV_TIME_BASE,
1620 (double)ic->duration / AV_TIME_BASE,
1621 ic->bit_rate / 1000);
1622 }
1623 #endif
1624 }
1625
1626 static int has_codec_parameters(AVCodecContext *enc)
1627 {
1628 int val;
1629 switch(enc->codec_type) {
1630 case CODEC_TYPE_AUDIO:
1631 val = enc->sample_rate;
1632 break;
1633 case CODEC_TYPE_VIDEO:
1634 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1635 break;
1636 default:
1637 val = 1;
1638 break;
1639 }
1640 return (enc->codec_id != CODEC_ID_NONE && val != 0);
1641 }
1642
1643 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1644 {
1645 int16_t *samples;
1646 AVCodec *codec;
1647 int got_picture, data_size, ret=0;
1648 AVFrame picture;
1649
1650 if(!st->codec->codec){
1651 codec = avcodec_find_decoder(st->codec->codec_id);
1652 if (!codec)
1653 return -1;
1654 ret = avcodec_open(st->codec, codec);
1655 if (ret < 0)
1656 return ret;
1657 }
1658
1659 if(!has_codec_parameters(st->codec)){
1660 switch(st->codec->codec_type) {
1661 case CODEC_TYPE_VIDEO:
1662 ret = avcodec_decode_video(st->codec, &picture,
1663 &got_picture, (uint8_t *)data, size);
1664 break;
1665 case CODEC_TYPE_AUDIO:
1666 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1667 samples = av_malloc(data_size);
1668 if (!samples)
1669 goto fail;
1670 ret = avcodec_decode_audio2(st->codec, samples,
1671 &data_size, (uint8_t *)data, size);
1672 av_free(samples);
1673 break;
1674 default:
1675 break;
1676 }
1677 }
1678 fail:
1679 return ret;
1680 }
1681
1682 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
1683 {
1684 AVInputFormat *fmt;
1685 fmt = av_probe_input_format2(pd, 1, &score);
1686
1687 if (fmt) {
1688 if (strncmp(fmt->name, "mp3", 3) == 0)
1689 st->codec->codec_id = CODEC_ID_MP3;
1690 else if (strncmp(fmt->name, "ac3", 3) == 0)
1691 st->codec->codec_id = CODEC_ID_AC3;
1692 }
1693 return !!fmt;
1694 }
1695
1696 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1697 {
1698 while (tags->id != CODEC_ID_NONE) {
1699 if (tags->id == id)
1700 return tags->tag;
1701 tags++;
1702 }
1703 return 0;
1704 }
1705
1706 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1707 {
1708 int i;
1709 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1710 if(tag == tags[i].tag)
1711 return tags[i].id;
1712 }
1713 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1714 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1715 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1716 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1717 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1718 return tags[i].id;
1719 }
1720 return CODEC_ID_NONE;
1721 }
1722
1723 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1724 {
1725 int i;
1726 for(i=0; tags && tags[i]; i++){
1727 int tag= codec_get_tag(tags[i], id);
1728 if(tag) return tag;
1729 }
1730 return 0;
1731 }
1732
1733 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1734 {
1735 int i;
1736 for(i=0; tags && tags[i]; i++){
1737 enum CodecID id= codec_get_id(tags[i], tag);
1738 if(id!=CODEC_ID_NONE) return id;
1739 }
1740 return CODEC_ID_NONE;
1741 }
1742
1743 /* absolute maximum size we read until we abort */
1744 #define MAX_READ_SIZE 5000000
1745
1746 #define MAX_STD_TIMEBASES (60*12+5)
1747 static int get_std_framerate(int i){
1748 if(i<60*12) return i*1001;
1749 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1750 }
1751
1752 int av_find_stream_info(AVFormatContext *ic)
1753 {
1754 int i, count, ret, read_size, j;
1755 AVStream *st;
1756 AVPacket pkt1, *pkt;
1757 int64_t last_dts[MAX_STREAMS];
1758 int duration_count[MAX_STREAMS]={0};
1759 double (*duration_error)[MAX_STD_TIMEBASES];
1760 offset_t old_offset = url_ftell(&ic->pb);
1761 int64_t codec_info_duration[MAX_STREAMS]={0};
1762 int codec_info_nb_frames[MAX_STREAMS]={0};
1763 AVProbeData probe_data[MAX_STREAMS];
1764 int codec_identified[MAX_STREAMS]={0};
1765
1766 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1767 if (!duration_error) return AVERROR(ENOMEM);
1768
1769 for(i=0;i<ic->nb_streams;i++) {
1770 st = ic->streams[i];
1771 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1772 /* if(!st->time_base.num)
1773 st->time_base= */
1774 if(!st->codec->time_base.num)
1775 st->codec->time_base= st->time_base;
1776 }
1777 //only for the split stuff
1778 if (!st->parser) {
1779 st->parser = av_parser_init(st->codec->codec_id);
1780 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1781 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1782 }
1783 }
1784 }
1785
1786 for(i=0;i<MAX_STREAMS;i++){
1787 last_dts[i]= AV_NOPTS_VALUE;
1788 }
1789
1790 memset(probe_data, 0, sizeof(probe_data));
1791 count = 0;
1792 read_size = 0;
1793 for(;;) {
1794 /* check if one codec still needs to be handled */
1795 for(i=0;i<ic->nb_streams;i++) {
1796 st = ic->streams[i];
1797 if (!has_codec_parameters(st->codec))
1798 break;
1799 /* variable fps and no guess at the real fps */
1800 if( (st->codec->time_base.den >= 101LL*st->codec->time_base.num || st->codec->codec_id == CODEC_ID_MPEG2VIDEO)
1801 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1802 break;
1803 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1804 break;
1805 if(st->first_dts == AV_NOPTS_VALUE)
1806 break;
1807 }
1808 if (i == ic->nb_streams) {
1809 /* NOTE: if the format has no header, then we need to read
1810 some packets to get most of the streams, so we cannot
1811 stop here */
1812 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1813 /* if we found the info for all the codecs, we can stop */
1814 ret = count;
1815 break;
1816 }
1817 }
1818 /* we did not get all the codec info, but we read too much data */
1819 if (read_size >= MAX_READ_SIZE) {
1820 ret = count;
1821 break;
1822 }
1823
1824 /* NOTE: a new stream can be added there if no header in file
1825 (AVFMTCTX_NOHEADER) */
1826 ret = av_read_frame_internal(ic, &pkt1);
1827 if (ret < 0) {
1828 /* EOF or error */
1829 ret = -1; /* we could not have all the codec parameters before EOF */
1830 for(i=0;i<ic->nb_streams;i++) {
1831 st = ic->streams[i];
1832 if (!has_codec_parameters(st->codec)){
1833 char buf[256];
1834 avcodec_string(buf, sizeof(buf), st->codec, 0);
1835 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1836 } else {
1837 ret = 0;
1838 }
1839 }
1840 break;
1841 }
1842
1843 pkt= add_to_pktbuf(ic, &pkt1);
1844 if(av_dup_packet(pkt) < 0)
1845 return AVERROR(ENOMEM);
1846
1847 read_size += pkt->size;
1848
1849 st = ic->streams[pkt->stream_index];
1850 if(codec_info_nb_frames[st->index]>1)
1851 codec_info_duration[st->index] += pkt->duration;
1852 if (pkt->duration != 0)
1853 codec_info_nb_frames[st->index]++;
1854
1855 {
1856 int index= pkt->stream_index;
1857 int64_t last= last_dts[index];
1858 int64_t duration= pkt->dts - last;
1859
1860 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1861 double dur= duration * av_q2d(st->time_base);
1862
1863 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1864 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1865 if(duration_count[index] < 2)
1866 memset(duration_error, 0, MAX_STREAMS * sizeof(*duration_error));
1867 for(i=1; i<MAX_STD_TIMEBASES; i++){
1868 int framerate= get_std_framerate(i);
1869 int ticks= lrintf(dur*framerate/(1001*12));
1870 double error= dur - ticks*1001*12/(double)framerate;
1871 duration_error[index][i] += error*error;
1872 }
1873 duration_count[index]++;
1874 }
1875 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1876 last_dts[pkt->stream_index]= pkt->dts;
1877
1878 if (st->codec->codec_id == CODEC_ID_NONE) {
1879 AVProbeData *pd = &(probe_data[st->index]);
1880 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
1881 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
1882 pd->buf_size += pkt->size;
1883 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
1884 }
1885 }
1886 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1887 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1888 if(i){
1889 st->codec->extradata_size= i;
1890 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1891 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1892 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1893 }
1894 }
1895
1896 /* if still no information, we try to open the codec and to
1897 decompress the frame. We try to avoid that in most cases as
1898 it takes longer and uses more memory. For MPEG4, we need to
1899 decompress for Quicktime. */
1900 if (!has_codec_parameters(st->codec) /*&&
1901 (st->codec->codec_id == CODEC_ID_FLV1 ||
1902 st->codec->codec_id == CODEC_ID_H264 ||
1903 st->codec->codec_id == CODEC_ID_H263 ||
1904 st->codec->codec_id == CODEC_ID_H261 ||
1905 st->codec->codec_id == CODEC_ID_VORBIS ||
1906 st->codec->codec_id == CODEC_ID_MJPEG ||
1907 st->codec->codec_id == CODEC_ID_PNG ||
1908 st->codec->codec_id == CODEC_ID_PAM ||
1909 st->codec->codec_id == CODEC_ID_PGM ||
1910 st->codec->codec_id == CODEC_ID_PGMYUV ||
1911 st->codec->codec_id == CODEC_ID_PBM ||
1912 st->codec->codec_id == CODEC_ID_PPM ||
1913 st->codec->codec_id == CODEC_ID_SHORTEN ||
1914 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1915 try_decode_frame(st, pkt->data, pkt->size);
1916
1917 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1918 break;
1919 }
1920 count++;
1921 }
1922
1923 // close codecs which where opened in try_decode_frame()
1924 for(i=0;i<ic->nb_streams;i++) {
1925 st = ic->streams[i];
1926 if(st->codec->codec)
1927 avcodec_close(st->codec);
1928 }
1929 for(i=0;i<ic->nb_streams;i++) {
1930 st = ic->streams[i];
1931 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1932 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1933 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1934
1935 if(duration_count[i]
1936 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&&
1937 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1938 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
1939 double best_error= 2*av_q2d(st->time_base);
1940 best_error= best_error*best_error*duration_count[i]*1000*12*30;
1941
1942 for(j=1; j<MAX_STD_TIMEBASES; j++){
1943 double error= duration_error[i][j] * get_std_framerate(j);
1944 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1945 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
1946 if(error < best_error){
1947 best_error= error;
1948 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
1949 }
1950 }
1951 }
1952
1953 if (!st->r_frame_rate.num){
1954 if( st->codec->time_base.den * (int64_t)st->time_base.num
1955 <= st->codec->time_base.num * (int64_t)st->time_base.den){
1956 st->r_frame_rate.num = st->codec->time_base.den;
1957 st->r_frame_rate.den = st->codec->time_base.num;
1958 }else{
1959 st->r_frame_rate.num = st->time_base.den;
1960 st->r_frame_rate.den = st->time_base.num;
1961 }
1962 }
1963 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
1964 if (st->codec->codec_id == CODEC_ID_NONE && probe_data[st->index].buf_size > 0) {
1965 codec_identified[st->index] = set_codec_from_probe_data(st, &(probe_data[st->index]), 1);
1966 if (codec_identified[st->index]) {
1967 st->need_parsing = AVSTREAM_PARSE_FULL;
1968 }
1969 }
1970 if(!st->codec->bits_per_sample)
1971 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
1972 }
1973 }
1974
1975 av_estimate_timings(ic, old_offset);
1976
1977 for(i=0;i<ic->nb_streams;i++) {
1978 st = ic->streams[i];
1979 if (codec_identified[st->index])
1980 break;
1981 }
1982 //FIXME this is a mess
1983 if(i!=ic->nb_streams){
1984 av_read_frame_flush(ic);
1985 for(i=0;i<ic->nb_streams;i++) {
1986 st = ic->streams[i];
1987 if (codec_identified[st->index]) {
1988 av_seek_frame(ic, st->index, 0.0, 0);
1989 }
1990 st->cur_dts= st->first_dts;
1991 }
1992 url_fseek(&ic->pb, ic->data_offset, SEEK_SET);
1993 }
1994
1995 #if 0
1996 /* correct DTS for b frame streams with no timestamps */
1997 for(i=0;i<ic->nb_streams;i++) {
1998 st = ic->streams[i];
1999 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2000 if(b-frames){
2001 ppktl = &ic->packet_buffer;
2002 while(ppkt1){
2003 if(ppkt1->stream_index != i)
2004 continue;
2005 if(ppkt1->pkt->dts < 0)
2006 break;
2007 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2008 break;
2009 ppkt1->pkt->dts -= delta;
2010 ppkt1= ppkt1->next;
2011 }
2012 if(ppkt1)
2013 continue;
2014 st->cur_dts -= delta;
2015 }
2016 }
2017 }
2018 #endif
2019
2020 av_free(duration_error);
2021 for(i=0;i<MAX_STREAMS;i++){
2022 av_freep(&(probe_data[i].buf));
2023 }
2024
2025 return ret;
2026 }
2027
2028 /*******************************************************/
2029
2030 int av_read_play(AVFormatContext *s)
2031 {
2032 if (!s->iformat->read_play)
2033 return AVERROR(ENOSYS);
2034 return s->iformat->read_play(s);
2035 }
2036
2037 int av_read_pause(AVFormatContext *s)
2038 {
2039 if (!s->iformat->read_pause)
2040 return AVERROR(ENOSYS);
2041 return s->iformat->read_pause(s);
2042 }
2043
2044 void av_close_input_file(AVFormatContext *s)
2045 {
2046 int i, must_open_file;
2047 AVStream *st;
2048
2049 /* free previous packet */
2050 if (s->cur_st && s->cur_st->parser)
2051 av_free_packet(&s->cur_pkt);
2052
2053 if (s->iformat->read_close)
2054 s->iformat->read_close(s);
2055 for(i=0;i<s->nb_streams;i++) {
2056 /* free all data in a stream component */
2057 st = s->streams[i];
2058 if (st->parser) {
2059 av_parser_close(st->parser);
2060 }
2061 av_free(st->index_entries);
2062 av_free(st->codec->extradata);
2063 av_free(st->codec);
2064 av_free(st);
2065 }
2066 for(i=s->nb_programs-1; i>=0; i--) {
2067 av_freep(&s->programs[i]->provider_name);
2068 av_freep(&s->programs[i]->name);
2069 av_freep(&s->programs[i]->stream_index);
2070 av_freep(&s->programs[i]);
2071 }
2072 flush_packet_queue(s);
2073 must_open_file = 1;
2074 if (s->iformat->flags & AVFMT_NOFILE) {
2075 must_open_file = 0;
2076 }
2077 if (must_open_file) {
2078 url_fclose(&s->pb);
2079 }
2080 av_freep(&s->priv_data);
2081 av_free(s);
2082 }
2083
2084 AVStream *av_new_stream(AVFormatContext *s, int id)
2085 {
2086 AVStream *st;
2087 int i;
2088
2089 if (s->nb_streams >= MAX_STREAMS)
2090 return NULL;
2091
2092 st = av_mallocz(sizeof(AVStream));
2093 if (!st)
2094 return NULL;
2095
2096 st->codec= avcodec_alloc_context();
2097 if (s->iformat) {
2098 /* no default bitrate if decoding */
2099 st->codec->bit_rate = 0;
2100 }
2101 st->index = s->nb_streams;
2102 st->id = id;
2103 st->start_time = AV_NOPTS_VALUE;
2104 st->duration = AV_NOPTS_VALUE;
2105 st->cur_dts = AV_NOPTS_VALUE;
2106 st->first_dts = AV_NOPTS_VALUE;
2107
2108 /* default pts settings is MPEG like */
2109 av_set_pts_info(st, 33, 1, 90000);
2110 st->last_IP_pts = AV_NOPTS_VALUE;
2111 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2112 st->pts_buffer[i]= AV_NOPTS_VALUE;
2113
2114 s->streams[s->nb_streams++] = st;
2115 return st;
2116 }
2117
2118 AVProgram *av_new_program(AVFormatContext *ac, int id)
2119 {
2120 AVProgram *program=NULL;
2121 int i;
2122
2123 #ifdef DEBUG_SI
2124 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2125 #endif
2126
2127 for(i=0; i<ac->nb_programs; i++)
2128 if(ac->programs[i]->id == id)
2129 program = ac->programs[i];
2130
2131 if(!program){
2132 program = av_mallocz(sizeof(AVProgram));
2133 if (!program)
2134 return NULL;
2135 dynarray_add(&ac->programs, &ac->nb_programs, program);
2136 program->discard = AVDISCARD_NONE;
2137 }
2138 program->id = id;
2139
2140 return program;
2141 }
2142
2143 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2144 {
2145 assert(!provider_name == !name);
2146 if(name){
2147 av_free(program->provider_name);
2148 av_free(program-> name);
2149 program->provider_name = av_strdup(provider_name);
2150 program-> name = av_strdup( name);
2151 }
2152 }
2153
2154
2155 /************************************************************/
2156 /* output media file */
2157
2158 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2159 {
2160 int ret;
2161
2162 if (s->oformat->priv_data_size > 0) {
2163 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2164 if (!s->priv_data)
2165 return AVERROR(ENOMEM);
2166 } else
2167 s->priv_data = NULL;
2168
2169 if (s->oformat->set_parameters) {
2170 ret = s->oformat->set_parameters(s, ap);
2171 if (ret < 0)
2172 return ret;
2173 }
2174 return 0;
2175 }
2176
2177 int av_write_header(AVFormatContext *s)
2178 {
2179 int ret, i;
2180 AVStream *st;
2181
2182 // some sanity checks
2183 for(i=0;i<s->nb_streams;i++) {
2184 st = s->streams[i];
2185
2186 switch (st->codec->codec_type) {
2187 case CODEC_TYPE_AUDIO:
2188 if(st->codec->sample_rate<=0){
2189 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2190 return -1;
2191 }
2192 break;
2193 case CODEC_TYPE_VIDEO:
2194 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2195 av_log(s, AV_LOG_ERROR, "time base not set\n");
2196 return -1;
2197 }
2198 if(st->codec->width<=0 || st->codec->height<=0){
2199 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2200 return -1;
2201 }
2202 break;
2203 }
2204
2205 if(s->oformat->codec_tag){
2206 if(st->codec->codec_tag){
2207 //FIXME
2208 //check that tag + id is in the table
2209 //if neither is in the table -> ok
2210 //if tag is in the table with another id -> FAIL
2211 //if id is in the table with another tag -> FAIL unless strict < ?
2212 }else
2213 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2214 }
2215 }
2216
2217 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2218 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2219 if (!s->priv_data)
2220 return AVERROR(ENOMEM);
2221 }
2222
2223 if(s->oformat->write_header){
2224 ret = s->oformat->write_header(s);
2225 if (ret < 0)
2226 return ret;
2227 }
2228
2229 /* init PTS generation */
2230 for(i=0;i<s->nb_streams;i++) {
2231 int64_t den = AV_NOPTS_VALUE;
2232 st = s->streams[i];
2233
2234 switch (st->codec->codec_type) {
2235 case CODEC_TYPE_AUDIO:
2236 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2237 break;
2238 case CODEC_TYPE_VIDEO:
2239 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2240 break;
2241 default:
2242 break;
2243 }
2244 if (den != AV_NOPTS_VALUE) {
2245 if (den <= 0)
2246 return AVERROR_INVALIDDATA;
2247 av_frac_init(&st->pts, 0, 0, den);
2248 }
2249 }
2250 return 0;
2251 }
2252
2253 //FIXME merge with compute_pkt_fields
2254 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2255 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2256 int num, den, frame_size, i;
2257
2258 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2259
2260 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2261 return -1;*/
2262
2263 /* duration field */
2264 if (pkt->duration == 0) {
2265 compute_frame_duration(&num, &den, st, NULL, pkt);
2266 if (den && num) {
2267 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2268 }
2269 }
2270
2271 //XXX/FIXME this is a temporary hack until all encoders output pts
2272 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2273 pkt->dts=
2274 // pkt->pts= st->cur_dts;
2275 pkt->pts= st->pts.val;
2276 }
2277
2278 //calculate dts from pts
2279 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2280 st->pts_buffer[0]= pkt->pts;
2281 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2282 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2283 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2284 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2285
2286 pkt->dts= st->pts_buffer[0];
2287 }
2288
2289 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2290 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2291 return -1;
2292 }
2293 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2294 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2295 return -1;
2296 }
2297
2298 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2299 st->cur_dts= pkt->dts;
2300 st->pts.val= pkt->dts;
2301
2302 /* update pts */
2303 switch (st->codec->codec_type) {
2304 case CODEC_TYPE_AUDIO:
2305 frame_size = get_audio_frame_size(st->codec, pkt->size);
2306
2307 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2308 but it would be better if we had the real timestamps from the encoder */
2309 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2310 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2311 }
2312 break;
2313 case CODEC_TYPE_VIDEO:
2314 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2315 break;
2316 default:
2317 break;
2318 }
2319 return 0;
2320 }
2321
2322 static void truncate_ts(AVStream *st, AVPacket *pkt){
2323 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2324
2325 // if(pkt->dts < 0)
2326 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2327
2328 if (pkt->pts != AV_NOPTS_VALUE)
2329 pkt->pts &= pts_mask;
2330 if (pkt->dts != AV_NOPTS_VALUE)
2331 pkt->dts &= pts_mask;
2332 }
2333
2334 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2335 {
2336 int ret;
2337
2338 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2339 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2340 return ret;
2341
2342 truncate_ts(s->streams[pkt->stream_index], pkt);
2343
2344 ret= s->oformat->write_packet(s, pkt);
2345 if(!ret)
2346 ret= url_ferror(&s->pb);
2347 return ret;
2348 }
2349
2350 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2351 AVPacketList *pktl, **next_point, *this_pktl;
2352 int stream_count=0;
2353 int streams[MAX_STREAMS];
2354
2355 if(pkt){
2356 AVStream *st= s->streams[ pkt->stream_index];
2357
2358 // assert(pkt->destruct != av_destruct_packet); //FIXME
2359
2360 this_pktl = av_mallocz(sizeof(AVPacketList));
2361 this_pktl->pkt= *pkt;
2362 if(pkt->destruct == av_destruct_packet)
2363 pkt->destruct= NULL; // non shared -> must keep original from being freed
2364 else
2365 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2366
2367 next_point = &s->packet_buffer;
2368 while(*next_point){
2369 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2370 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2371 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2372 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2373 break;
2374 next_point= &(*next_point)->next;
2375 }
2376 this_pktl->next= *next_point;
2377 *next_point= this_pktl;
2378 }
2379
2380 memset(streams, 0, sizeof(streams));
2381 pktl= s->packet_buffer;
2382 while(pktl){
2383 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2384 if(streams[ pktl->pkt.stream_index ] == 0)
2385 stream_count++;
2386 streams[ pktl->pkt.stream_index ]++;
2387 pktl= pktl->next;
2388 }
2389
2390 if(s->nb_streams == stream_count || (flush && stream_count)){
2391 pktl= s->packet_buffer;
2392 *out= pktl->pkt;
2393
2394 s->packet_buffer= pktl->next;
2395 av_freep(&pktl);
2396 return 1;
2397 }else{
2398 av_init_packet(out);
2399 return 0;
2400 }
2401 }
2402
2403 /**
2404 * Interleaves a AVPacket correctly so it can be muxed.
2405 * @param out the interleaved packet will be output here
2406 * @param in the input packet
2407 * @param flush 1 if no further packets are available as input and all
2408 * remaining packets should be output
2409 * @return 1 if a packet was output, 0 if no packet could be output,
2410 * < 0 if an error occured
2411 */
2412 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2413 if(s->oformat->interleave_packet)
2414 return s->oformat->interleave_packet(s, out, in, flush);
2415 else
2416 return av_interleave_packet_per_dts(s, out, in, flush);
2417 }
2418
2419 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2420 AVStream *st= s->streams[ pkt->stream_index];
2421
2422 //FIXME/XXX/HACK drop zero sized packets
2423 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2424 return 0;
2425
2426 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2427 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2428 return -1;
2429
2430 if(pkt->dts == AV_NOPTS_VALUE)
2431 return -1;
2432
2433 for(;;){
2434 AVPacket opkt;
2435 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2436 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2437 return ret;
2438
2439 truncate_ts(s->streams[opkt.stream_index], &opkt);
2440 ret= s->oformat->write_packet(s, &opkt);
2441
2442 av_free_packet(&opkt);
2443 pkt= NULL;
2444
2445 if(ret<0)
2446 return ret;
2447 if(url_ferror(&s->pb))
2448 return url_ferror(&s->pb);
2449 }
2450 }
2451
2452 int av_write_trailer(AVFormatContext *s)
2453 {
2454 int ret, i;
2455
2456 for(;;){
2457 AVPacket pkt;
2458 ret= av_interleave_packet(s, &pkt, NULL, 1);
2459 if(ret<0) //FIXME cleanup needed for ret<0 ?
2460 goto fail;
2461 if(!ret)
2462 break;
2463
2464 truncate_ts(s->streams[pkt.stream_index], &pkt);
2465 ret= s->oformat->write_packet(s, &pkt);
2466
2467 av_free_packet(&pkt);
2468
2469 if(ret<0)
2470 goto fail;
2471 if(url_ferror(&s->pb))
2472 goto fail;
2473 }
2474
2475 if(s->oformat->write_trailer)
2476 ret = s->oformat->write_trailer(s);
2477 fail:
2478 if(ret == 0)
2479 ret=url_ferror(&s->pb);
2480 for(i=0;i<s->nb_streams;i++)
2481 av_freep(&s->streams[i]->priv_data);
2482 av_freep(&s->priv_data);
2483 return ret;
2484 }
2485
2486 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2487 {
2488 int i, j;
2489 AVProgram *program=NULL;
2490 void *tmp;
2491
2492 for(i=0; i<ac->nb_programs; i++){
2493 if(ac->programs[i]->id != progid)
2494 continue;
2495 program = ac->programs[i];
2496 for(j=0; j<program->nb_stream_indexes; j++)
2497 if(program->stream_index[j] == idx)
2498 return;
2499
2500 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2501 if(!tmp)
2502 return;
2503 program->stream_index = tmp;
2504 program->stream_index[program->nb_stream_indexes++] = idx;
2505 return;
2506 }
2507 }
2508
2509 /* "user interface" functions */
2510 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2511 {
2512 char buf[256];
2513 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2514 AVStream *st = ic->streams[i];
2515 int g = ff_gcd(st->time_base.num, st->time_base.den);
2516 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2517 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2518 /* the pid is an important information, so we display it */
2519 /* XXX: add a generic system */
2520 if (flags & AVFMT_SHOW_IDS)
2521 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2522 if (strlen(st->language) > 0)
2523 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2524 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2525 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2526 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2527 if(st->r_frame_rate.den && st->r_frame_rate.num)
2528 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2529 /* else if(st->time_base.den && st->time_base.num)
2530 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2531 else
2532 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2533 }
2534 av_log(NULL, AV_LOG_INFO, "\n");
2535 }
2536
2537 void dump_format(AVFormatContext *ic,
2538 int index,
2539 const char *url,
2540 int is_output)
2541 {
2542 int i;
2543
2544 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2545 is_output ? "Output" : "Input",
2546 index,
2547 is_output ? ic->oformat->name : ic->iformat->name,
2548 is_output ? "to" : "from", url);
2549 if (!is_output) {
2550 av_log(NULL, AV_LOG_INFO, " Duration: ");
2551 if (ic->duration != AV_NOPTS_VALUE) {
2552 int hours, mins, secs, us;
2553 secs = ic->duration / AV_TIME_BASE;
2554 us = ic->duration % AV_TIME_BASE;
2555 mins = secs / 60;
2556 secs %= 60;
2557 hours = mins / 60;
2558 mins %= 60;
2559 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2560 (10 * us) / AV_TIME_BASE);
2561 } else {
2562 av_log(NULL, AV_LOG_INFO, "N/A");
2563 }
2564 if (ic->start_time != AV_NOPTS_VALUE) {
2565 int secs, us;
2566 av_log(NULL, AV_LOG_INFO, ", start: ");
2567 secs = ic->start_time / AV_TIME_BASE;
2568 us = ic->start_time % AV_TIME_BASE;
2569 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2570 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2571 }
2572 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2573 if (ic->bit_rate) {
2574 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2575 } else {
2576 av_log(NULL, AV_LOG_INFO, "N/A");
2577 }
2578 av_log(NULL, AV_LOG_INFO, "\n");
2579 }
2580 if(ic->nb_programs) {
2581 int j, k;
2582 for(j=0; j<ic->nb_programs; j++) {
2583 av_log(NULL, AV_LOG_INFO, " Program %d", ic->programs[j]->id);
2584 if(ic->programs[j]->name)
2585 av_log(NULL, AV_LOG_INFO, " \"%s\"\n", ic->programs[j]->name);
2586 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2587 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2588 }
2589 } else
2590 for(i=0;i<ic->nb_streams;i++)
2591 dump_stream_format(ic, i, index, is_output);
2592 }
2593
2594 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2595 {
2596 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2597 }
2598
2599 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2600 {
2601 AVRational frame_rate;
2602 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2603 *frame_rate_num= frame_rate.num;
2604 *frame_rate_den= frame_rate.den;
2605 return ret;
2606 }
2607
2608 /**
2609 * gets the current time in micro seconds.
2610 */
2611 int64_t av_gettime(void)
2612 {
2613 struct timeval tv;
2614 gettimeofday(&tv,NULL);
2615 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2616 }
2617
2618 int64_t parse_date(const char *datestr, int duration)
2619 {
2620 const char *p;
2621 int64_t t;
2622 struct tm dt;
2623 int i;
2624 static const char *date_fmt[] = {
2625 "%Y-%m-%d",
2626 "%Y%m%d",
2627 };
2628 static const char *time_fmt[] = {
2629 "%H:%M:%S",
2630 "%H%M%S",
2631 };
2632 const char *q;
2633 int is_utc, len;
2634 char lastch;
2635 int negative = 0;
2636
2637 #undef time
2638 time_t now = time(0);
2639
2640 len = strlen(datestr);
2641 if (len > 0)
2642 lastch = datestr[len - 1];
2643 else
2644 lastch = '\0';
2645 is_utc = (lastch == 'z' || lastch == 'Z');
2646
2647 memset(&dt, 0, sizeof(dt));
2648
2649 p = datestr;
2650 q = NULL;
2651 if (!duration) {
2652 /* parse the year-month-day part */
2653 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2654 q = small_strptime(p, date_fmt[i], &dt);
2655 if (q) {
2656 break;
2657 }
2658 }
2659
2660 /* if the year-month-day part is missing, then take the
2661 * current year-month-day time */
2662 if (!q) {
2663 if (is_utc) {
2664 dt = *gmtime(&now);
2665 } else {
2666 dt = *localtime(&now);
2667 }
2668 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2669 } else {
2670 p = q;
2671 }
2672
2673 if (*p == 'T' || *p == 't' || *p == ' ')
2674 p++;
2675
2676 /* parse the hour-minute-second part */
2677 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2678 q = small_strptime(p, time_fmt[i], &dt);
2679 if (q) {
2680 break;
2681 }
2682 }
2683 } else {
2684 /* parse datestr as a duration */
2685 if (p[0] == '-') {
2686 negative = 1;
2687 ++p;
2688 }
2689 /* parse datestr as HH:MM:SS */
2690 q = small_strptime(p, time_fmt[0], &dt);
2691 if (!q) {
2692 /* parse datestr as S+ */
2693 dt.tm_sec = strtol(p, (char **)&q, 10);
2694 if (q == p)
2695 /* the parsing didn't succeed */
2696 return INT64_MIN;
2697 dt.tm_min = 0;
2698 dt.tm_hour = 0;
2699 }
2700 }
2701
2702 /* Now we have all the fields that we can get */
2703 if (!q) {
2704 return INT64_MIN;
2705 }
2706
2707 if (duration) {
2708 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2709 } else {
2710 dt.tm_isdst = -1; /* unknown */
2711 if (is_utc) {
2712 t = mktimegm(&dt);
2713 } else {
2714 t = mktime(&dt);
2715 }
2716 }
2717
2718 t *= 1000000;
2719
2720 /* parse the .m... part */
2721 if (*q == '.') {
2722 int val, n;
2723 q++;
2724 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2725 if (!isdigit(*q))
2726 break;
2727 val += n * (*q - '0');
2728 }
2729 t += val;
2730 }
2731 return negative ? -t : t;
2732 }
2733
2734 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2735 {
2736 const char *p;
2737 char tag[128], *q;
2738
2739 p = info;
2740 if (*p == '?')
2741 p++;
2742 for(;;) {
2743 q = tag;
2744 while (*p != '\0' && *p != '=' && *p != '&') {
2745 if ((q - tag) < sizeof(tag) - 1)
2746 *q++ = *p;
2747 p++;
2748 }
2749 *q = '\0';
2750 q = arg;
2751 if (*p == '=') {
2752 p++;
2753 while (*p != '&' && *p != '\0') {
2754 if ((q - arg) < arg_size - 1) {
2755 if (*p == '+')
2756 *q++ = ' ';
2757 else
2758 *q++ = *p;
2759 }
2760 p++;
2761 }
2762 *q = '\0';
2763 }
2764 if (!strcmp(tag, tag1))
2765 return 1;
2766 if (*p != '&')
2767 break;
2768 p++;
2769 }
2770 return 0;
2771 }
2772
2773 int av_get_frame_filename(char *buf, int buf_size,
2774 const char *path, int number)
2775 {
2776 const char *p;
2777 char *q, buf1[20], c;
2778 int nd, len, percentd_found;
2779
2780 q = buf;
2781 p = path;
2782 percentd_found = 0;
2783 for(;;) {
2784 c = *p++;
2785 if (c == '\0')
2786 break;
2787 if (c == '%') {
2788 do {
2789 nd = 0;
2790 while (isdigit(*p)) {
2791 nd = nd * 10 + *p++ - '0';
2792 }
2793 c = *p++;
2794 } while (isdigit(c));
2795
2796 switch(c) {
2797 case '%':
2798 goto addchar;
2799 case 'd':
2800 if (percentd_found)
2801 goto fail;
2802 percentd_found = 1;
2803 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2804 len = strlen(buf1);
2805 if ((q - buf + len) > buf_size - 1)
2806 goto fail;
2807 memcpy(q, buf1, len);
2808 q += len;
2809 break;
2810 default:
2811 goto fail;
2812 }
2813 } else {
2814 addchar:
2815 if ((q - buf) < buf_size - 1)
2816 *q++ = c;
2817 }
2818 }
2819 if (!percentd_found)
2820 goto fail;
2821 *q = '\0';
2822 return 0;
2823 fail:
2824 *q = '\0';
2825 return -1;
2826 }
2827
2828 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
2829 {
2830 int len, i, j, c;
2831 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2832
2833 for(i=0;i<size;i+=16) {
2834 len = size - i;
2835 if (len > 16)
2836 len = 16;
2837 PRINT("%08x ", i);
2838 for(j=0;j<16;j++) {
2839 if (j < len)
2840 PRINT(" %02x", buf[i+j]);
2841 else
2842 PRINT(" ");
2843 }
2844 PRINT(" ");
2845 for(j=0;j<len;j++) {
2846 c = buf[i+j];
2847 if (c < ' ' || c > '~')
2848 c = '.';
2849 PRINT("%c", c);
2850 }
2851 PRINT("\n");
2852 }
2853 #undef PRINT
2854 }
2855
2856 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2857 {
2858 hex_dump_internal(NULL, f, 0, buf, size);
2859 }
2860
2861 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
2862 {
2863 hex_dump_internal(avcl, NULL, level, buf, size);
2864 }
2865
2866 //FIXME needs to know the time_base
2867 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
2868 {
2869 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2870 PRINT("stream #%d:\n", pkt->stream_index);
2871 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2872 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2873 /* DTS is _always_ valid after av_read_frame() */
2874 PRINT(" dts=");
2875 if (pkt->dts == AV_NOPTS_VALUE)
2876 PRINT("N/A");
2877 else
2878 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
2879 /* PTS may be not known if B frames are present */
2880 PRINT(" pts=");
2881 if (pkt->pts == AV_NOPTS_VALUE)
2882 PRINT("N/A");
2883 else
2884 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
2885 PRINT("\n");
2886 PRINT(" size=%d\n", pkt->size);
2887 #undef PRINT
2888 if (dump_payload)
2889 av_hex_dump(f, pkt->data, pkt->size);
2890 }
2891
2892 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2893 {
2894 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
2895 }
2896
2897 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
2898 {
2899 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
2900 }
2901
2902 void url_split(char *proto, int proto_size,
2903 char *authorization, int authorization_size,
2904 char *hostname, int hostname_size,
2905 int *port_ptr,
2906 char *path, int path_size,
2907 const char *url)
2908 {
2909 const char *p, *ls, *at, *col, *brk, *q;
2910
2911 if (port_ptr) *port_ptr = -1;
2912 if (proto_size > 0) proto[0] = 0;
2913 if (authorization_size > 0) authorization[0] = 0;
2914 if (hostname_size > 0) hostname[0] = 0;
2915 if (path_size > 0) path[0] = 0;
2916
2917 /* parse protocol */
2918 if ((p = strchr(url, ':'))) {
2919 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2920 p++; /* skip ':' */
2921 if (*p == '/') p++;
2922 if (*p == '/') p++;
2923 } else {
2924 /* no protocol means plain filename */
2925 av_strlcpy(path, url, path_size);
2926 return;
2927 }
2928
2929 /* separate path from hostname */
2930 if ((ls = strchr(p, '/'))) {
2931 if ((q = strchr(ls, '?')))
2932 av_strlcpy(path, ls, FFMIN(path_size, q - ls + 1));
2933 else
2934 av_strlcpy(path, ls, path_size);
2935 } else if (!(ls = strchr(p, '?')))
2936 ls = &p[strlen(p)]; // XXX
2937
2938 /* the rest is hostname, use that to parse auth/port */
2939 if (ls != p) {
2940 /* authorization (user[:pass]@hostname) */
2941 if ((at = strchr(p, '@')) && at < ls) {
2942 av_strlcpy(authorization, p,
2943 FFMIN(authorization_size, at + 1 - p));
2944 p = at + 1; /* skip '@' */
2945 }
2946
2947 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2948 /* [host]:port */
2949 av_strlcpy(hostname, p + 1,
2950 FFMIN(hostname_size, brk - p));
2951 if (brk[1] == ':' && port_ptr)
2952 *port_ptr = atoi(brk + 2);
2953 } else if ((col = strchr(p, ':')) && col < ls) {
2954 av_strlcpy(hostname, p,
2955 FFMIN(col + 1 - p, hostname_size));
2956 if (port_ptr) *port_ptr = atoi(col + 1);
2957 } else
2958 av_strlcpy(hostname, p,
2959 FFMIN(ls + 1 - p, hostname_size));
2960 }
2961 }
2962
2963 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2964 int pts_num, int pts_den)
2965 {
2966 s->pts_wrap_bits = pts_wrap_bits;
2967 s->time_base.num = pts_num;
2968 s->time_base.den = pts_den;
2969 }
2970
2971 /* fraction handling */
2972
2973 /**
2974 * f = val + (num / den) + 0.5.
2975 *
2976 * 'num' is normalized so that it is such as 0 <= num < den.
2977 *
2978 * @param f fractional number
2979 * @param val integer value
2980 * @param num must be >= 0
2981 * @param den must be >= 1
2982 */
2983 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2984 {
2985 num += (den >> 1);
2986 if (num >= den) {
2987 val += num / den;
2988 num = num % den;
2989 }
2990 f->val = val;
2991 f->num = num;
2992 f->den = den;
2993 }
2994
2995 /**
2996 * Fractionnal addition to f: f = f + (incr / f->den).
2997 *
2998 * @param f fractional number
2999 * @param incr increment, can be positive or negative
3000 */
3001 static void av_frac_add(AVFrac *f, int64_t incr)
3002 {
3003 int64_t num, den;
3004
3005 num = f->num + incr;
3006 den = f->den;
3007 if (num < 0) {
3008 f->val += num / den;
3009 num = num % den;
3010 if (num < 0) {
3011 num += den;
3012 f->val--;
3013 }
3014 } else if (num >= den) {
3015 f->val += num / den;
3016 num = num % den;
3017 }
3018 f->num = num;
3019 }