correct initial timestamps which have AV_NOPTS_VALUE
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "allformats.h"
23 #include "opt.h"
24 #include "avstring.h"
25 #include "riff.h"
26 #include <sys/time.h>
27 #include <time.h>
28
29 #undef NDEBUG
30 #include <assert.h>
31
32 /**
33 * @file libavformat/utils.c
34 * Various utility functions for using ffmpeg library.
35 */
36
37 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
38 static void av_frac_add(AVFrac *f, int64_t incr);
39
40 /** head of registered input format linked list. */
41 AVInputFormat *first_iformat = NULL;
42 /** head of registered output format linked list. */
43 AVOutputFormat *first_oformat = NULL;
44
45 void av_register_input_format(AVInputFormat *format)
46 {
47 AVInputFormat **p;
48 p = &first_iformat;
49 while (*p != NULL) p = &(*p)->next;
50 *p = format;
51 format->next = NULL;
52 }
53
54 void av_register_output_format(AVOutputFormat *format)
55 {
56 AVOutputFormat **p;
57 p = &first_oformat;
58 while (*p != NULL) p = &(*p)->next;
59 *p = format;
60 format->next = NULL;
61 }
62
63 int match_ext(const char *filename, const char *extensions)
64 {
65 const char *ext, *p;
66 char ext1[32], *q;
67
68 if(!filename)
69 return 0;
70
71 ext = strrchr(filename, '.');
72 if (ext) {
73 ext++;
74 p = extensions;
75 for(;;) {
76 q = ext1;
77 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
78 *q++ = *p++;
79 *q = '\0';
80 if (!strcasecmp(ext1, ext))
81 return 1;
82 if (*p == '\0')
83 break;
84 p++;
85 }
86 }
87 return 0;
88 }
89
90 AVOutputFormat *guess_format(const char *short_name, const char *filename,
91 const char *mime_type)
92 {
93 AVOutputFormat *fmt, *fmt_found;
94 int score_max, score;
95
96 /* specific test for image sequences */
97 #ifdef CONFIG_IMAGE2_MUXER
98 if (!short_name && filename &&
99 av_filename_number_test(filename) &&
100 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
101 return guess_format("image2", NULL, NULL);
102 }
103 #endif
104 /* find the proper file type */
105 fmt_found = NULL;
106 score_max = 0;
107 fmt = first_oformat;
108 while (fmt != NULL) {
109 score = 0;
110 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
111 score += 100;
112 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
113 score += 10;
114 if (filename && fmt->extensions &&
115 match_ext(filename, fmt->extensions)) {
116 score += 5;
117 }
118 if (score > score_max) {
119 score_max = score;
120 fmt_found = fmt;
121 }
122 fmt = fmt->next;
123 }
124 return fmt_found;
125 }
126
127 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
128 const char *mime_type)
129 {
130 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
131
132 if (fmt) {
133 AVOutputFormat *stream_fmt;
134 char stream_format_name[64];
135
136 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
137 stream_fmt = guess_format(stream_format_name, NULL, NULL);
138
139 if (stream_fmt)
140 fmt = stream_fmt;
141 }
142
143 return fmt;
144 }
145
146 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
147 const char *filename, const char *mime_type, enum CodecType type){
148 if(type == CODEC_TYPE_VIDEO){
149 enum CodecID codec_id= CODEC_ID_NONE;
150
151 #ifdef CONFIG_IMAGE2_MUXER
152 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
153 codec_id= av_guess_image2_codec(filename);
154 }
155 #endif
156 if(codec_id == CODEC_ID_NONE)
157 codec_id= fmt->video_codec;
158 return codec_id;
159 }else if(type == CODEC_TYPE_AUDIO)
160 return fmt->audio_codec;
161 else
162 return CODEC_ID_NONE;
163 }
164
165 AVInputFormat *av_find_input_format(const char *short_name)
166 {
167 AVInputFormat *fmt;
168 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
169 if (!strcmp(fmt->name, short_name))
170 return fmt;
171 }
172 return NULL;
173 }
174
175 /* memory handling */
176
177 void av_destruct_packet(AVPacket *pkt)
178 {
179 av_free(pkt->data);
180 pkt->data = NULL; pkt->size = 0;
181 }
182
183 void av_init_packet(AVPacket *pkt)
184 {
185 pkt->pts = AV_NOPTS_VALUE;
186 pkt->dts = AV_NOPTS_VALUE;
187 pkt->pos = -1;
188 pkt->duration = 0;
189 pkt->flags = 0;
190 pkt->stream_index = 0;
191 pkt->destruct= av_destruct_packet_nofree;
192 }
193
194 int av_new_packet(AVPacket *pkt, int size)
195 {
196 uint8_t *data;
197 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
198 return AVERROR(ENOMEM);
199 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
200 if (!data)
201 return AVERROR(ENOMEM);
202 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
203
204 av_init_packet(pkt);
205 pkt->data = data;
206 pkt->size = size;
207 pkt->destruct = av_destruct_packet;
208 return 0;
209 }
210
211 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
212 {
213 int ret= av_new_packet(pkt, size);
214
215 if(ret<0)
216 return ret;
217
218 pkt->pos= url_ftell(s);
219
220 ret= get_buffer(s, pkt->data, size);
221 if(ret<=0)
222 av_free_packet(pkt);
223 else
224 pkt->size= ret;
225
226 return ret;
227 }
228
229 int av_dup_packet(AVPacket *pkt)
230 {
231 if (pkt->destruct != av_destruct_packet) {
232 uint8_t *data;
233 /* we duplicate the packet and don't forget to put the padding
234 again */
235 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
236 return AVERROR(ENOMEM);
237 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
238 if (!data) {
239 return AVERROR(ENOMEM);
240 }
241 memcpy(data, pkt->data, pkt->size);
242 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
243 pkt->data = data;
244 pkt->destruct = av_destruct_packet;
245 }
246 return 0;
247 }
248
249 int av_filename_number_test(const char *filename)
250 {
251 char buf[1024];
252 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
253 }
254
255 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
256 {
257 AVInputFormat *fmt1, *fmt;
258 int score;
259
260 fmt = NULL;
261 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
262 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
263 continue;
264 score = 0;
265 if (fmt1->read_probe) {
266 score = fmt1->read_probe(pd);
267 } else if (fmt1->extensions) {
268 if (match_ext(pd->filename, fmt1->extensions)) {
269 score = 50;
270 }
271 }
272 if (score > *score_max) {
273 *score_max = score;
274 fmt = fmt1;
275 }
276 }
277 return fmt;
278 }
279
280 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
281 int score=0;
282 return av_probe_input_format2(pd, is_opened, &score);
283 }
284
285 /************************************************************/
286 /* input media file */
287
288 /**
289 * Open a media file from an IO stream. 'fmt' must be specified.
290 */
291 static const char* format_to_name(void* ptr)
292 {
293 AVFormatContext* fc = (AVFormatContext*) ptr;
294 if(fc->iformat) return fc->iformat->name;
295 else if(fc->oformat) return fc->oformat->name;
296 else return "NULL";
297 }
298
299 #define OFFSET(x) offsetof(AVFormatContext,x)
300 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
301 //these names are too long to be readable
302 #define E AV_OPT_FLAG_ENCODING_PARAM
303 #define D AV_OPT_FLAG_DECODING_PARAM
304
305 static const AVOption options[]={
306 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
307 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
308 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
309 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
310 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
311 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
312 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
313 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
314 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
315 {NULL},
316 };
317
318 #undef E
319 #undef D
320 #undef DEFAULT
321
322 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
323
324 static void avformat_get_context_defaults(AVFormatContext *s)
325 {
326 memset(s, 0, sizeof(AVFormatContext));
327
328 s->av_class = &av_format_context_class;
329
330 av_opt_set_defaults(s);
331 }
332
333 AVFormatContext *av_alloc_format_context(void)
334 {
335 AVFormatContext *ic;
336 ic = av_malloc(sizeof(AVFormatContext));
337 if (!ic) return ic;
338 avformat_get_context_defaults(ic);
339 ic->av_class = &av_format_context_class;
340 return ic;
341 }
342
343 int av_open_input_stream(AVFormatContext **ic_ptr,
344 ByteIOContext *pb, const char *filename,
345 AVInputFormat *fmt, AVFormatParameters *ap)
346 {
347 int err;
348 AVFormatContext *ic;
349 AVFormatParameters default_ap;
350
351 if(!ap){
352 ap=&default_ap;
353 memset(ap, 0, sizeof(default_ap));
354 }
355
356 if(!ap->prealloced_context)
357 ic = av_alloc_format_context();
358 else
359 ic = *ic_ptr;
360 if (!ic) {
361 err = AVERROR(ENOMEM);
362 goto fail;
363 }
364 ic->iformat = fmt;
365 if (pb)
366 ic->pb = *pb;
367 ic->duration = AV_NOPTS_VALUE;
368 ic->start_time = AV_NOPTS_VALUE;
369 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
370
371 /* allocate private data */
372 if (fmt->priv_data_size > 0) {
373 ic->priv_data = av_mallocz(fmt->priv_data_size);
374 if (!ic->priv_data) {
375 err = AVERROR(ENOMEM);
376 goto fail;
377 }
378 } else {
379 ic->priv_data = NULL;
380 }
381
382 err = ic->iformat->read_header(ic, ap);
383 if (err < 0)
384 goto fail;
385
386 if (pb && !ic->data_offset)
387 ic->data_offset = url_ftell(&ic->pb);
388
389 *ic_ptr = ic;
390 return 0;
391 fail:
392 if (ic) {
393 av_freep(&ic->priv_data);
394 }
395 av_free(ic);
396 *ic_ptr = NULL;
397 return err;
398 }
399
400 /** Size of probe buffer, for guessing file type from file contents. */
401 #define PROBE_BUF_MIN 2048
402 #define PROBE_BUF_MAX (1<<20)
403
404 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
405 AVInputFormat *fmt,
406 int buf_size,
407 AVFormatParameters *ap)
408 {
409 int err, must_open_file, file_opened, probe_size;
410 AVProbeData probe_data, *pd = &probe_data;
411 ByteIOContext pb1, *pb = &pb1;
412
413 file_opened = 0;
414 pd->filename = "";
415 if (filename)
416 pd->filename = filename;
417 pd->buf = NULL;
418 pd->buf_size = 0;
419
420 if (!fmt) {
421 /* guess format if no file can be opened */
422 fmt = av_probe_input_format(pd, 0);
423 }
424
425 /* do not open file if the format does not need it. XXX: specific
426 hack needed to handle RTSP/TCP */
427 must_open_file = 1;
428 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
429 must_open_file = 0;
430 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise it is uninitialized
431 }
432
433 if (!fmt || must_open_file) {
434 /* if no file needed do not try to open one */
435 if ((err=url_fopen(pb, filename, URL_RDONLY)) < 0) {
436 goto fail;
437 }
438 file_opened = 1;
439 if (buf_size > 0) {
440 url_setbufsize(pb, buf_size);
441 }
442
443 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
444 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
445 /* read probe data */
446 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
447 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
448 if (url_fseek(pb, 0, SEEK_SET) < 0) {
449 url_fclose(pb);
450 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
451 file_opened = 0;
452 err = AVERROR(EIO);
453 goto fail;
454 }
455 }
456 /* guess file format */
457 fmt = av_probe_input_format2(pd, 1, &score);
458 }
459 av_freep(&pd->buf);
460 }
461
462 /* if still no format found, error */
463 if (!fmt) {
464 err = AVERROR_NOFMT;
465 goto fail;
466 }
467
468 /* XXX: suppress this hack for redirectors */
469 #ifdef CONFIG_REDIR_DEMUXER
470 if (fmt == &redir_demuxer) {
471 err = redir_open(ic_ptr, pb);
472 url_fclose(pb);
473 return err;
474 }
475 #endif
476
477 /* check filename in case of an image number is expected */
478 if (fmt->flags & AVFMT_NEEDNUMBER) {
479 if (!av_filename_number_test(filename)) {
480 err = AVERROR_NUMEXPECTED;
481 goto fail;
482 }
483 }
484 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
485 if (err)
486 goto fail;
487 return 0;
488 fail:
489 av_freep(&pd->buf);
490 if (file_opened)
491 url_fclose(pb);
492 *ic_ptr = NULL;
493 return err;
494
495 }
496
497 /*******************************************************/
498
499 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
500 {
501 av_init_packet(pkt);
502 return s->iformat->read_packet(s, pkt);
503 }
504
505 /**********************************************************/
506
507 /**
508 * Get the number of samples of an audio frame. Return (-1) if error.
509 */
510 static int get_audio_frame_size(AVCodecContext *enc, int size)
511 {
512 int frame_size;
513
514 if (enc->frame_size <= 1) {
515 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
516
517 if (bits_per_sample) {
518 if (enc->channels == 0)
519 return -1;
520 frame_size = (size << 3) / (bits_per_sample * enc->channels);
521 } else {
522 /* used for example by ADPCM codecs */
523 if (enc->bit_rate == 0)
524 return -1;
525 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
526 }
527 } else {
528 frame_size = enc->frame_size;
529 }
530 return frame_size;
531 }
532
533
534 /**
535 * Return the frame duration in seconds, return 0 if not available.
536 */
537 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
538 AVCodecParserContext *pc, AVPacket *pkt)
539 {
540 int frame_size;
541
542 *pnum = 0;
543 *pden = 0;
544 switch(st->codec->codec_type) {
545 case CODEC_TYPE_VIDEO:
546 if(st->time_base.num*1000LL > st->time_base.den){
547 *pnum = st->time_base.num;
548 *pden = st->time_base.den;
549 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
550 *pnum = st->codec->time_base.num;
551 *pden = st->codec->time_base.den;
552 if (pc && pc->repeat_pict) {
553 *pden *= 2;
554 *pnum = (*pnum) * (2 + pc->repeat_pict);
555 }
556 }
557 break;
558 case CODEC_TYPE_AUDIO:
559 frame_size = get_audio_frame_size(st->codec, pkt->size);
560 if (frame_size < 0)
561 break;
562 *pnum = frame_size;
563 *pden = st->codec->sample_rate;
564 break;
565 default:
566 break;
567 }
568 }
569
570 static int is_intra_only(AVCodecContext *enc){
571 if(enc->codec_type == CODEC_TYPE_AUDIO){
572 return 1;
573 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
574 switch(enc->codec_id){
575 case CODEC_ID_MJPEG:
576 case CODEC_ID_MJPEGB:
577 case CODEC_ID_LJPEG:
578 case CODEC_ID_RAWVIDEO:
579 case CODEC_ID_DVVIDEO:
580 case CODEC_ID_HUFFYUV:
581 case CODEC_ID_FFVHUFF:
582 case CODEC_ID_ASV1:
583 case CODEC_ID_ASV2:
584 case CODEC_ID_VCR1:
585 return 1;
586 default: break;
587 }
588 }
589 return 0;
590 }
591
592 static void update_initial_timestamps(AVFormatContext *s, int stream_index, int64_t dts){
593 AVStream *st= s->streams[stream_index];
594 AVPacketList *pktl= s->packet_buffer;
595
596 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)
597 return;
598
599 st->first_dts= dts - st->cur_dts;
600 st->cur_dts= dts;
601
602 for(; pktl; pktl= pktl->next){
603 if(pktl->pkt.stream_index != stream_index)
604 continue;
605 //FIXME think more about this check
606 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
607 pktl->pkt.pts += st->first_dts;
608
609 if(pktl->pkt.dts != AV_NOPTS_VALUE)
610 pktl->pkt.dts += st->first_dts;
611 }
612 }
613
614 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
615 AVCodecParserContext *pc, AVPacket *pkt)
616 {
617 int num, den, presentation_delayed, delay, i;
618 int64_t offset;
619
620 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
621 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
622 pkt->dts -= 1LL<<st->pts_wrap_bits;
623 }
624
625 if (pkt->duration == 0) {
626 compute_frame_duration(&num, &den, st, pc, pkt);
627 if (den && num) {
628 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
629 }
630 }
631
632 /* correct timestamps with byte offset if demuxers only have timestamps on packet boundaries */
633 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
634 /* this will estimate bitrate based on this frame's duration and size */
635 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
636 if(pkt->pts != AV_NOPTS_VALUE)
637 pkt->pts += offset;
638 if(pkt->dts != AV_NOPTS_VALUE)
639 pkt->dts += offset;
640 }
641
642 if(is_intra_only(st->codec))
643 pkt->flags |= PKT_FLAG_KEY;
644
645 /* do we have a video B frame ? */
646 delay= st->codec->has_b_frames;
647 presentation_delayed = 0;
648 /* XXX: need has_b_frame, but cannot get it if the codec is
649 not initialized */
650 if (delay &&
651 pc && pc->pict_type != FF_B_TYPE)
652 presentation_delayed = 1;
653 /* This may be redundant, but it should not hurt. */
654 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
655 presentation_delayed = 1;
656
657 if(st->cur_dts == AV_NOPTS_VALUE){
658 st->cur_dts = 0; //FIXME maybe set it to 0 during init
659 }
660
661 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
662 /* interpolate PTS and DTS if they are not present */
663 if(delay <=1){
664 if (presentation_delayed) {
665 /* DTS = decompression time stamp */
666 /* PTS = presentation time stamp */
667 if (pkt->dts == AV_NOPTS_VALUE)
668 pkt->dts = st->last_IP_pts;
669 update_initial_timestamps(s, pkt->stream_index, pkt->dts);
670 if (pkt->dts == AV_NOPTS_VALUE)
671 pkt->dts = st->cur_dts;
672
673 /* this is tricky: the dts must be incremented by the duration
674 of the frame we are displaying, i.e. the last I or P frame */
675 if (st->last_IP_duration == 0)
676 st->last_IP_duration = pkt->duration;
677 st->cur_dts = pkt->dts + st->last_IP_duration;
678 st->last_IP_duration = pkt->duration;
679 st->last_IP_pts= pkt->pts;
680 /* cannot compute PTS if not present (we can compute it only
681 by knowing the futur */
682 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
683 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
684 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
685 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
686 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
687 pkt->pts += pkt->duration;
688 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
689 }
690 }
691
692 /* presentation is not delayed : PTS and DTS are the same */
693 if(pkt->pts == AV_NOPTS_VALUE)
694 pkt->pts = pkt->dts;
695 update_initial_timestamps(s, pkt->stream_index, pkt->pts);
696 if(pkt->pts == AV_NOPTS_VALUE)
697 pkt->pts = st->cur_dts;
698 pkt->dts = pkt->pts;
699 st->cur_dts = pkt->pts + pkt->duration;
700 }
701 }
702
703 if(pkt->pts != AV_NOPTS_VALUE){
704 st->pts_buffer[0]= pkt->pts;
705 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
706 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
707 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
708 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
709 if(pkt->dts == AV_NOPTS_VALUE)
710 pkt->dts= st->pts_buffer[0];
711 if(delay>1){
712 update_initial_timestamps(s, pkt->stream_index, pkt->dts); // this should happen on the first packet
713 }
714 if(pkt->dts > st->cur_dts)
715 st->cur_dts = pkt->dts;
716 }
717
718 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
719
720 /* update flags */
721 if (pc) {
722 pkt->flags = 0;
723 /* key frame computation */
724 if (pc->pict_type == FF_I_TYPE)
725 pkt->flags |= PKT_FLAG_KEY;
726 }
727 }
728
729 void av_destruct_packet_nofree(AVPacket *pkt)
730 {
731 pkt->data = NULL; pkt->size = 0;
732 }
733
734 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
735 {
736 AVStream *st;
737 int len, ret, i;
738
739 av_init_packet(pkt);
740
741 for(;;) {
742 /* select current input stream component */
743 st = s->cur_st;
744 if (st) {
745 if (!st->need_parsing || !st->parser) {
746 /* no parsing needed: we just output the packet as is */
747 /* raw data support */
748 *pkt = s->cur_pkt;
749 compute_pkt_fields(s, st, NULL, pkt);
750 s->cur_st = NULL;
751 break;
752 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
753 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
754 s->cur_ptr, s->cur_len,
755 s->cur_pkt.pts, s->cur_pkt.dts);
756 s->cur_pkt.pts = AV_NOPTS_VALUE;
757 s->cur_pkt.dts = AV_NOPTS_VALUE;
758 /* increment read pointer */
759 s->cur_ptr += len;
760 s->cur_len -= len;
761
762 /* return packet if any */
763 if (pkt->size) {
764 got_packet:
765 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
766 pkt->duration = 0;
767 pkt->stream_index = st->index;
768 pkt->pts = st->parser->pts;
769 pkt->dts = st->parser->dts;
770 pkt->destruct = av_destruct_packet_nofree;
771 compute_pkt_fields(s, st, st->parser, pkt);
772
773 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
774 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
775 0, 0, AVINDEX_KEYFRAME);
776 }
777
778 break;
779 }
780 } else {
781 /* free packet */
782 av_free_packet(&s->cur_pkt);
783 s->cur_st = NULL;
784 }
785 } else {
786 /* read next packet */
787 ret = av_read_packet(s, &s->cur_pkt);
788 if (ret < 0) {
789 if (ret == AVERROR(EAGAIN))
790 return ret;
791 /* return the last frames, if any */
792 for(i = 0; i < s->nb_streams; i++) {
793 st = s->streams[i];
794 if (st->parser && st->need_parsing) {
795 av_parser_parse(st->parser, st->codec,
796 &pkt->data, &pkt->size,
797 NULL, 0,
798 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
799 if (pkt->size)
800 goto got_packet;
801 }
802 }
803 /* no more packets: really terminates parsing */
804 return ret;
805 }
806
807 st = s->streams[s->cur_pkt.stream_index];
808 if(st->codec->debug & FF_DEBUG_PTS)
809 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
810 s->cur_pkt.stream_index,
811 s->cur_pkt.pts,
812 s->cur_pkt.dts,
813 s->cur_pkt.size);
814
815 s->cur_st = st;
816 s->cur_ptr = s->cur_pkt.data;
817 s->cur_len = s->cur_pkt.size;
818 if (st->need_parsing && !st->parser) {
819 st->parser = av_parser_init(st->codec->codec_id);
820 if (!st->parser) {
821 /* no parser available : just output the raw packets */
822 st->need_parsing = AVSTREAM_PARSE_NONE;
823 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
824 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
825 }
826 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
827 st->parser->last_frame_offset=
828 st->parser->cur_offset= s->cur_pkt.pos;
829 }
830 }
831 }
832 }
833 if(st->codec->debug & FF_DEBUG_PTS)
834 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
835 pkt->stream_index,
836 pkt->pts,
837 pkt->dts,
838 pkt->size);
839
840 return 0;
841 }
842
843 static AVPacket *add_to_pktbuf(AVFormatContext *s, AVPacket *pkt){
844 AVPacketList *pktl= s->packet_buffer;
845 AVPacketList **plast_pktl= &s->packet_buffer;
846
847 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
848
849 pktl = av_mallocz(sizeof(AVPacketList));
850 if (!pktl)
851 return NULL;
852
853 /* add the packet in the buffered packet list */
854 *plast_pktl = pktl;
855 pktl->pkt= *pkt;
856 return &pktl->pkt;
857 }
858
859 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
860 {
861 AVPacketList *pktl;
862 int eof=0;
863 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
864
865 for(;;){
866 pktl = s->packet_buffer;
867 if (pktl) {
868 AVPacket *next_pkt= &pktl->pkt;
869
870 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
871 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
872 if( pktl->pkt.stream_index == next_pkt->stream_index
873 && next_pkt->dts < pktl->pkt.dts
874 && pktl->pkt.pts != pktl->pkt.dts //not b frame
875 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
876 next_pkt->pts= pktl->pkt.dts;
877 }
878 pktl= pktl->next;
879 }
880 pktl = s->packet_buffer;
881 }
882
883 if( next_pkt->pts != AV_NOPTS_VALUE
884 || next_pkt->dts == AV_NOPTS_VALUE
885 || !genpts || eof){
886 /* read packet from packet buffer, if there is data */
887 *pkt = *next_pkt;
888 s->packet_buffer = pktl->next;
889 av_free(pktl);
890 return 0;
891 }
892 }
893 if(genpts){
894 int ret= av_read_frame_internal(s, pkt);
895 if(ret<0){
896 if(pktl && ret != AVERROR(EAGAIN)){
897 eof=1;
898 continue;
899 }else
900 return ret;
901 }
902
903 if(av_dup_packet(add_to_pktbuf(s, pkt)) < 0)
904 return AVERROR(ENOMEM);
905 }else{
906 assert(!s->packet_buffer);
907 return av_read_frame_internal(s, pkt);
908 }
909 }
910 }
911
912 /* XXX: suppress the packet queue */
913 static void flush_packet_queue(AVFormatContext *s)
914 {
915 AVPacketList *pktl;
916
917 for(;;) {
918 pktl = s->packet_buffer;
919 if (!pktl)
920 break;
921 s->packet_buffer = pktl->next;
922 av_free_packet(&pktl->pkt);
923 av_free(pktl);
924 }
925 }
926
927 /*******************************************************/
928 /* seek support */
929
930 int av_find_default_stream_index(AVFormatContext *s)
931 {
932 int i;
933 AVStream *st;
934
935 if (s->nb_streams <= 0)
936 return -1;
937 for(i = 0; i < s->nb_streams; i++) {
938 st = s->streams[i];
939 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
940 return i;
941 }
942 }
943 return 0;
944 }
945
946 /**
947 * Flush the frame reader.
948 */
949 static void av_read_frame_flush(AVFormatContext *s)
950 {
951 AVStream *st;
952 int i;
953
954 flush_packet_queue(s);
955
956 /* free previous packet */
957 if (s->cur_st) {
958 if (s->cur_st->parser)
959 av_free_packet(&s->cur_pkt);
960 s->cur_st = NULL;
961 }
962 /* fail safe */
963 s->cur_ptr = NULL;
964 s->cur_len = 0;
965
966 /* for each stream, reset read state */
967 for(i = 0; i < s->nb_streams; i++) {
968 st = s->streams[i];
969
970 if (st->parser) {
971 av_parser_close(st->parser);
972 st->parser = NULL;
973 }
974 st->last_IP_pts = AV_NOPTS_VALUE;
975 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
976 }
977 }
978
979 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
980 int i;
981
982 for(i = 0; i < s->nb_streams; i++) {
983 AVStream *st = s->streams[i];
984
985 st->cur_dts = av_rescale(timestamp,
986 st->time_base.den * (int64_t)ref_st->time_base.num,
987 st->time_base.num * (int64_t)ref_st->time_base.den);
988 }
989 }
990
991 int av_add_index_entry(AVStream *st,
992 int64_t pos, int64_t timestamp, int size, int distance, int flags)
993 {
994 AVIndexEntry *entries, *ie;
995 int index;
996
997 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
998 return -1;
999
1000 entries = av_fast_realloc(st->index_entries,
1001 &st->index_entries_allocated_size,
1002 (st->nb_index_entries + 1) *
1003 sizeof(AVIndexEntry));
1004 if(!entries)
1005 return -1;
1006
1007 st->index_entries= entries;
1008
1009 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1010
1011 if(index<0){
1012 index= st->nb_index_entries++;
1013 ie= &entries[index];
1014 assert(index==0 || ie[-1].timestamp < timestamp);
1015 }else{
1016 ie= &entries[index];
1017 if(ie->timestamp != timestamp){
1018 if(ie->timestamp <= timestamp)
1019 return -1;
1020 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1021 st->nb_index_entries++;
1022 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1023 distance= ie->min_distance;
1024 }
1025
1026 ie->pos = pos;
1027 ie->timestamp = timestamp;
1028 ie->min_distance= distance;
1029 ie->size= size;
1030 ie->flags = flags;
1031
1032 return index;
1033 }
1034
1035 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1036 int flags)
1037 {
1038 AVIndexEntry *entries= st->index_entries;
1039 int nb_entries= st->nb_index_entries;
1040 int a, b, m;
1041 int64_t timestamp;
1042
1043 a = - 1;
1044 b = nb_entries;
1045
1046 while (b - a > 1) {
1047 m = (a + b) >> 1;
1048 timestamp = entries[m].timestamp;
1049 if(timestamp >= wanted_timestamp)
1050 b = m;
1051 if(timestamp <= wanted_timestamp)
1052 a = m;
1053 }
1054 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1055
1056 if(!(flags & AVSEEK_FLAG_ANY)){
1057 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1058 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1059 }
1060 }
1061
1062 if(m == nb_entries)
1063 return -1;
1064 return m;
1065 }
1066
1067 #define DEBUG_SEEK
1068
1069 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1070 AVInputFormat *avif= s->iformat;
1071 int64_t pos_min, pos_max, pos, pos_limit;
1072 int64_t ts_min, ts_max, ts;
1073 int index;
1074 AVStream *st;
1075
1076 if (stream_index < 0)
1077 return -1;
1078
1079 #ifdef DEBUG_SEEK
1080 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1081 #endif
1082
1083 ts_max=
1084 ts_min= AV_NOPTS_VALUE;
1085 pos_limit= -1; //gcc falsely says it may be uninitialized
1086
1087 st= s->streams[stream_index];
1088 if(st->index_entries){
1089 AVIndexEntry *e;
1090
1091 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1092 index= FFMAX(index, 0);
1093 e= &st->index_entries[index];
1094
1095 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1096 pos_min= e->pos;
1097 ts_min= e->timestamp;
1098 #ifdef DEBUG_SEEK
1099 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1100 pos_min,ts_min);
1101 #endif
1102 }else{
1103 assert(index==0);
1104 }
1105
1106 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1107 assert(index < st->nb_index_entries);
1108 if(index >= 0){
1109 e= &st->index_entries[index];
1110 assert(e->timestamp >= target_ts);
1111 pos_max= e->pos;
1112 ts_max= e->timestamp;
1113 pos_limit= pos_max - e->min_distance;
1114 #ifdef DEBUG_SEEK
1115 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1116 pos_max,pos_limit, ts_max);
1117 #endif
1118 }
1119 }
1120
1121 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1122 if(pos<0)
1123 return -1;
1124
1125 /* do the seek */
1126 url_fseek(&s->pb, pos, SEEK_SET);
1127
1128 av_update_cur_dts(s, st, ts);
1129
1130 return 0;
1131 }
1132
1133 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1134 int64_t pos, ts;
1135 int64_t start_pos, filesize;
1136 int no_change;
1137
1138 #ifdef DEBUG_SEEK
1139 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1140 #endif
1141
1142 if(ts_min == AV_NOPTS_VALUE){
1143 pos_min = s->data_offset;
1144 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1145 if (ts_min == AV_NOPTS_VALUE)
1146 return -1;
1147 }
1148
1149 if(ts_max == AV_NOPTS_VALUE){
1150 int step= 1024;
1151 filesize = url_fsize(&s->pb);
1152 pos_max = filesize - 1;
1153 do{
1154 pos_max -= step;
1155 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1156 step += step;
1157 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1158 if (ts_max == AV_NOPTS_VALUE)
1159 return -1;
1160
1161 for(;;){
1162 int64_t tmp_pos= pos_max + 1;
1163 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1164 if(tmp_ts == AV_NOPTS_VALUE)
1165 break;
1166 ts_max= tmp_ts;
1167 pos_max= tmp_pos;
1168 if(tmp_pos >= filesize)
1169 break;
1170 }
1171 pos_limit= pos_max;
1172 }
1173
1174 if(ts_min > ts_max){
1175 return -1;
1176 }else if(ts_min == ts_max){
1177 pos_limit= pos_min;
1178 }
1179
1180 no_change=0;
1181 while (pos_min < pos_limit) {
1182 #ifdef DEBUG_SEEK
1183 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1184 pos_min, pos_max,
1185 ts_min, ts_max);
1186 #endif
1187 assert(pos_limit <= pos_max);
1188
1189 if(no_change==0){
1190 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1191 // interpolate position (better than dichotomy)
1192 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1193 + pos_min - approximate_keyframe_distance;
1194 }else if(no_change==1){
1195 // bisection, if interpolation failed to change min or max pos last time
1196 pos = (pos_min + pos_limit)>>1;
1197 }else{
1198 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1199 pos=pos_min;
1200 }
1201 if(pos <= pos_min)
1202 pos= pos_min + 1;
1203 else if(pos > pos_limit)
1204 pos= pos_limit;
1205 start_pos= pos;
1206
1207 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1208 if(pos == pos_max)
1209 no_change++;
1210 else
1211 no_change=0;
1212 #ifdef DEBUG_SEEK
1213 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1214 #endif
1215 if(ts == AV_NOPTS_VALUE){
1216 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1217 return -1;
1218 }
1219 assert(ts != AV_NOPTS_VALUE);
1220 if (target_ts <= ts) {
1221 pos_limit = start_pos - 1;
1222 pos_max = pos;
1223 ts_max = ts;
1224 }
1225 if (target_ts >= ts) {
1226 pos_min = pos;
1227 ts_min = ts;
1228 }
1229 }
1230
1231 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1232 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1233 #ifdef DEBUG_SEEK
1234 pos_min = pos;
1235 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1236 pos_min++;
1237 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1238 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1239 pos, ts_min, target_ts, ts_max);
1240 #endif
1241 *ts_ret= ts;
1242 return pos;
1243 }
1244
1245 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1246 int64_t pos_min, pos_max;
1247 #if 0
1248 AVStream *st;
1249
1250 if (stream_index < 0)
1251 return -1;
1252
1253 st= s->streams[stream_index];
1254 #endif
1255
1256 pos_min = s->data_offset;
1257 pos_max = url_fsize(&s->pb) - 1;
1258
1259 if (pos < pos_min) pos= pos_min;
1260 else if(pos > pos_max) pos= pos_max;
1261
1262 url_fseek(&s->pb, pos, SEEK_SET);
1263
1264 #if 0
1265 av_update_cur_dts(s, st, ts);
1266 #endif
1267 return 0;
1268 }
1269
1270 static int av_seek_frame_generic(AVFormatContext *s,
1271 int stream_index, int64_t timestamp, int flags)
1272 {
1273 int index;
1274 AVStream *st;
1275 AVIndexEntry *ie;
1276
1277 st = s->streams[stream_index];
1278
1279 index = av_index_search_timestamp(st, timestamp, flags);
1280
1281 if(index < 0 || index==st->nb_index_entries-1){
1282 int i;
1283 AVPacket pkt;
1284
1285 if(st->index_entries && st->nb_index_entries){
1286 ie= &st->index_entries[st->nb_index_entries-1];
1287 url_fseek(&s->pb, ie->pos, SEEK_SET);
1288 av_update_cur_dts(s, st, ie->timestamp);
1289 }else
1290 url_fseek(&s->pb, 0, SEEK_SET);
1291
1292 for(i=0;; i++) {
1293 int ret = av_read_frame(s, &pkt);
1294 if(ret<0)
1295 break;
1296 av_free_packet(&pkt);
1297 if(stream_index == pkt.stream_index){
1298 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1299 break;
1300 }
1301 }
1302 index = av_index_search_timestamp(st, timestamp, flags);
1303 }
1304 if (index < 0)
1305 return -1;
1306
1307 av_read_frame_flush(s);
1308 if (s->iformat->read_seek){
1309 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1310 return 0;
1311 }
1312 ie = &st->index_entries[index];
1313 url_fseek(&s->pb, ie->pos, SEEK_SET);
1314
1315 av_update_cur_dts(s, st, ie->timestamp);
1316
1317 return 0;
1318 }
1319
1320 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1321 {
1322 int ret;
1323 AVStream *st;
1324
1325 av_read_frame_flush(s);
1326
1327 if(flags & AVSEEK_FLAG_BYTE)
1328 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1329
1330 if(stream_index < 0){
1331 stream_index= av_find_default_stream_index(s);
1332 if(stream_index < 0)
1333 return -1;
1334
1335 st= s->streams[stream_index];
1336 /* timestamp for default must be expressed in AV_TIME_BASE units */
1337 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1338 }
1339 st= s->streams[stream_index];
1340
1341 /* first, we try the format specific seek */
1342 if (s->iformat->read_seek)
1343 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1344 else
1345 ret = -1;
1346 if (ret >= 0) {
1347 return 0;
1348 }
1349
1350 if(s->iformat->read_timestamp)
1351 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1352 else
1353 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1354 }
1355
1356 /*******************************************************/
1357
1358 /**
1359 * Returns TRUE if the stream has accurate timings in any stream.
1360 *
1361 * @return TRUE if the stream has accurate timings for at least one component.
1362 */
1363 static int av_has_timings(AVFormatContext *ic)
1364 {
1365 int i;
1366 AVStream *st;
1367
1368 for(i = 0;i < ic->nb_streams; i++) {
1369 st = ic->streams[i];
1370 if (st->start_time != AV_NOPTS_VALUE &&
1371 st->duration != AV_NOPTS_VALUE)
1372 return 1;
1373 }
1374 return 0;
1375 }
1376
1377 /**
1378 * Estimate the stream timings from the one of each components.
1379 *
1380 * Also computes the global bitrate if possible.
1381 */
1382 static void av_update_stream_timings(AVFormatContext *ic)
1383 {
1384 int64_t start_time, start_time1, end_time, end_time1;
1385 int i;
1386 AVStream *st;
1387
1388 start_time = INT64_MAX;
1389 end_time = INT64_MIN;
1390 for(i = 0;i < ic->nb_streams; i++) {
1391 st = ic->streams[i];
1392 if (st->start_time != AV_NOPTS_VALUE) {
1393 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1394 if (start_time1 < start_time)
1395 start_time = start_time1;
1396 if (st->duration != AV_NOPTS_VALUE) {
1397 end_time1 = start_time1
1398 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1399 if (end_time1 > end_time)
1400 end_time = end_time1;
1401 }
1402 }
1403 }
1404 if (start_time != INT64_MAX) {
1405 ic->start_time = start_time;
1406 if (end_time != INT64_MIN) {
1407 ic->duration = end_time - start_time;
1408 if (ic->file_size > 0) {
1409 /* compute the bit rate */
1410 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1411 (double)ic->duration;
1412 }
1413 }
1414 }
1415
1416 }
1417
1418 static void fill_all_stream_timings(AVFormatContext *ic)
1419 {
1420 int i;
1421 AVStream *st;
1422
1423 av_update_stream_timings(ic);
1424 for(i = 0;i < ic->nb_streams; i++) {
1425 st = ic->streams[i];
1426 if (st->start_time == AV_NOPTS_VALUE) {
1427 if(ic->start_time != AV_NOPTS_VALUE)
1428 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1429 if(ic->duration != AV_NOPTS_VALUE)
1430 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1431 }
1432 }
1433 }
1434
1435 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1436 {
1437 int64_t filesize, duration;
1438 int bit_rate, i;
1439 AVStream *st;
1440
1441 /* if bit_rate is already set, we believe it */
1442 if (ic->bit_rate == 0) {
1443 bit_rate = 0;
1444 for(i=0;i<ic->nb_streams;i++) {
1445 st = ic->streams[i];
1446 bit_rate += st->codec->bit_rate;
1447 }
1448 ic->bit_rate = bit_rate;
1449 }
1450
1451 /* if duration is already set, we believe it */
1452 if (ic->duration == AV_NOPTS_VALUE &&
1453 ic->bit_rate != 0 &&
1454 ic->file_size != 0) {
1455 filesize = ic->file_size;
1456 if (filesize > 0) {
1457 for(i = 0; i < ic->nb_streams; i++) {
1458 st = ic->streams[i];
1459 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1460 if (st->start_time == AV_NOPTS_VALUE ||
1461 st->duration == AV_NOPTS_VALUE) {
1462 st->start_time = 0;
1463 st->duration = duration;
1464 }
1465 }
1466 }
1467 }
1468 }
1469
1470 #define DURATION_MAX_READ_SIZE 250000
1471
1472 /* only usable for MPEG-PS streams */
1473 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1474 {
1475 AVPacket pkt1, *pkt = &pkt1;
1476 AVStream *st;
1477 int read_size, i, ret;
1478 int64_t end_time;
1479 int64_t filesize, offset, duration;
1480
1481 /* free previous packet */
1482 if (ic->cur_st && ic->cur_st->parser)
1483 av_free_packet(&ic->cur_pkt);
1484 ic->cur_st = NULL;
1485
1486 /* flush packet queue */
1487 flush_packet_queue(ic);
1488
1489 for(i=0;i<ic->nb_streams;i++) {
1490 st = ic->streams[i];
1491 if (st->parser) {
1492 av_parser_close(st->parser);
1493 st->parser= NULL;
1494 }
1495 }
1496
1497 /* we read the first packets to get the first PTS (not fully
1498 accurate, but it is enough now) */
1499 url_fseek(&ic->pb, 0, SEEK_SET);
1500 read_size = 0;
1501 for(;;) {
1502 if (read_size >= DURATION_MAX_READ_SIZE)
1503 break;
1504 /* if all info is available, we can stop */
1505 for(i = 0;i < ic->nb_streams; i++) {
1506 st = ic->streams[i];
1507 if (st->start_time == AV_NOPTS_VALUE)
1508 break;
1509 }
1510 if (i == ic->nb_streams)
1511 break;
1512
1513 ret = av_read_packet(ic, pkt);
1514 if (ret != 0)
1515 break;
1516 read_size += pkt->size;
1517 st = ic->streams[pkt->stream_index];
1518 if (pkt->pts != AV_NOPTS_VALUE) {
1519 if (st->start_time == AV_NOPTS_VALUE)
1520 st->start_time = pkt->pts;
1521 }
1522 av_free_packet(pkt);
1523 }
1524
1525 /* estimate the end time (duration) */
1526 /* XXX: may need to support wrapping */
1527 filesize = ic->file_size;
1528 offset = filesize - DURATION_MAX_READ_SIZE;
1529 if (offset < 0)
1530 offset = 0;
1531
1532 url_fseek(&ic->pb, offset, SEEK_SET);
1533 read_size = 0;
1534 for(;;) {
1535 if (read_size >= DURATION_MAX_READ_SIZE)
1536 break;
1537 /* if all info is available, we can stop */
1538 for(i = 0;i < ic->nb_streams; i++) {
1539 st = ic->streams[i];
1540 if (st->duration == AV_NOPTS_VALUE)
1541 break;
1542 }
1543 if (i == ic->nb_streams)
1544 break;
1545
1546 ret = av_read_packet(ic, pkt);
1547 if (ret != 0)
1548 break;
1549 read_size += pkt->size;
1550 st = ic->streams[pkt->stream_index];
1551 if (pkt->pts != AV_NOPTS_VALUE) {
1552 end_time = pkt->pts;
1553 duration = end_time - st->start_time;
1554 if (duration > 0) {
1555 if (st->duration == AV_NOPTS_VALUE ||
1556 st->duration < duration)
1557 st->duration = duration;
1558 }
1559 }
1560 av_free_packet(pkt);
1561 }
1562
1563 fill_all_stream_timings(ic);
1564
1565 url_fseek(&ic->pb, old_offset, SEEK_SET);
1566 }
1567
1568 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1569 {
1570 int64_t file_size;
1571
1572 /* get the file size, if possible */
1573 if (ic->iformat->flags & AVFMT_NOFILE) {
1574 file_size = 0;
1575 } else {
1576 file_size = url_fsize(&ic->pb);
1577 if (file_size < 0)
1578 file_size = 0;
1579 }
1580 ic->file_size = file_size;
1581
1582 if ((!strcmp(ic->iformat->name, "mpeg") ||
1583 !strcmp(ic->iformat->name, "mpegts")) &&
1584 file_size && !ic->pb.is_streamed) {
1585 /* get accurate estimate from the PTSes */
1586 av_estimate_timings_from_pts(ic, old_offset);
1587 } else if (av_has_timings(ic)) {
1588 /* at least one components has timings - we use them for all
1589 the components */
1590 fill_all_stream_timings(ic);
1591 } else {
1592 /* less precise: use bit rate info */
1593 av_estimate_timings_from_bit_rate(ic);
1594 }
1595 av_update_stream_timings(ic);
1596
1597 #if 0
1598 {
1599 int i;
1600 AVStream *st;
1601 for(i = 0;i < ic->nb_streams; i++) {
1602 st = ic->streams[i];
1603 printf("%d: start_time: %0.3f duration: %0.3f\n",
1604 i, (double)st->start_time / AV_TIME_BASE,
1605 (double)st->duration / AV_TIME_BASE);
1606 }
1607 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1608 (double)ic->start_time / AV_TIME_BASE,
1609 (double)ic->duration / AV_TIME_BASE,
1610 ic->bit_rate / 1000);
1611 }
1612 #endif
1613 }
1614
1615 static int has_codec_parameters(AVCodecContext *enc)
1616 {
1617 int val;
1618 switch(enc->codec_type) {
1619 case CODEC_TYPE_AUDIO:
1620 val = enc->sample_rate;
1621 break;
1622 case CODEC_TYPE_VIDEO:
1623 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1624 break;
1625 default:
1626 val = 1;
1627 break;
1628 }
1629 return (val != 0);
1630 }
1631
1632 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1633 {
1634 int16_t *samples;
1635 AVCodec *codec;
1636 int got_picture, data_size, ret=0;
1637 AVFrame picture;
1638
1639 if(!st->codec->codec){
1640 codec = avcodec_find_decoder(st->codec->codec_id);
1641 if (!codec)
1642 return -1;
1643 ret = avcodec_open(st->codec, codec);
1644 if (ret < 0)
1645 return ret;
1646 }
1647
1648 if(!has_codec_parameters(st->codec)){
1649 switch(st->codec->codec_type) {
1650 case CODEC_TYPE_VIDEO:
1651 ret = avcodec_decode_video(st->codec, &picture,
1652 &got_picture, (uint8_t *)data, size);
1653 break;
1654 case CODEC_TYPE_AUDIO:
1655 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1656 samples = av_malloc(data_size);
1657 if (!samples)
1658 goto fail;
1659 ret = avcodec_decode_audio2(st->codec, samples,
1660 &data_size, (uint8_t *)data, size);
1661 av_free(samples);
1662 break;
1663 default:
1664 break;
1665 }
1666 }
1667 fail:
1668 return ret;
1669 }
1670
1671 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
1672 {
1673 AVInputFormat *fmt;
1674 fmt = av_probe_input_format2(pd, 1, &score);
1675
1676 if (fmt) {
1677 if (strncmp(fmt->name, "mp3", 3) == 0)
1678 st->codec->codec_id = CODEC_ID_MP3;
1679 else if (strncmp(fmt->name, "ac3", 3) == 0)
1680 st->codec->codec_id = CODEC_ID_AC3;
1681 }
1682 return !!fmt;
1683 }
1684
1685 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1686 {
1687 while (tags->id != CODEC_ID_NONE) {
1688 if (tags->id == id)
1689 return tags->tag;
1690 tags++;
1691 }
1692 return 0;
1693 }
1694
1695 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1696 {
1697 int i;
1698 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1699 if(tag == tags[i].tag)
1700 return tags[i].id;
1701 }
1702 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1703 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1704 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1705 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1706 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1707 return tags[i].id;
1708 }
1709 return CODEC_ID_NONE;
1710 }
1711
1712 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1713 {
1714 int i;
1715 for(i=0; tags && tags[i]; i++){
1716 int tag= codec_get_tag(tags[i], id);
1717 if(tag) return tag;
1718 }
1719 return 0;
1720 }
1721
1722 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1723 {
1724 int i;
1725 for(i=0; tags && tags[i]; i++){
1726 enum CodecID id= codec_get_id(tags[i], tag);
1727 if(id!=CODEC_ID_NONE) return id;
1728 }
1729 return CODEC_ID_NONE;
1730 }
1731
1732 /* absolute maximum size we read until we abort */
1733 #define MAX_READ_SIZE 5000000
1734
1735 #define MAX_STD_TIMEBASES (60*12+5)
1736 static int get_std_framerate(int i){
1737 if(i<60*12) return i*1001;
1738 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1739 }
1740
1741 int av_find_stream_info(AVFormatContext *ic)
1742 {
1743 int i, count, ret, read_size, j;
1744 AVStream *st;
1745 AVPacket pkt1, *pkt;
1746 int64_t last_dts[MAX_STREAMS];
1747 int duration_count[MAX_STREAMS]={0};
1748 double (*duration_error)[MAX_STD_TIMEBASES];
1749 offset_t old_offset = url_ftell(&ic->pb);
1750 int64_t codec_info_duration[MAX_STREAMS]={0};
1751 int codec_info_nb_frames[MAX_STREAMS]={0};
1752 AVProbeData probe_data[MAX_STREAMS];
1753 int codec_identified[MAX_STREAMS]={0};
1754
1755 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1756 if (!duration_error) return AVERROR(ENOMEM);
1757
1758 for(i=0;i<ic->nb_streams;i++) {
1759 st = ic->streams[i];
1760 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1761 /* if(!st->time_base.num)
1762 st->time_base= */
1763 if(!st->codec->time_base.num)
1764 st->codec->time_base= st->time_base;
1765 }
1766 //only for the split stuff
1767 if (!st->parser) {
1768 st->parser = av_parser_init(st->codec->codec_id);
1769 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1770 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1771 }
1772 }
1773 }
1774
1775 for(i=0;i<MAX_STREAMS;i++){
1776 last_dts[i]= AV_NOPTS_VALUE;
1777 }
1778
1779 memset(probe_data, 0, sizeof(probe_data));
1780 count = 0;
1781 read_size = 0;
1782 for(;;) {
1783 /* check if one codec still needs to be handled */
1784 for(i=0;i<ic->nb_streams;i++) {
1785 st = ic->streams[i];
1786 if (!has_codec_parameters(st->codec))
1787 break;
1788 /* variable fps and no guess at the real fps */
1789 if( (st->codec->time_base.den >= 101LL*st->codec->time_base.num || st->codec->codec_id == CODEC_ID_MPEG2VIDEO)
1790 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1791 break;
1792 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1793 break;
1794 if (st->codec->codec_type == CODEC_TYPE_AUDIO &&
1795 st->codec->codec_id == CODEC_ID_NONE)
1796 break;
1797 if(st->first_dts == AV_NOPTS_VALUE)
1798 break;
1799 }
1800 if (i == ic->nb_streams) {
1801 /* NOTE: if the format has no header, then we need to read
1802 some packets to get most of the streams, so we cannot
1803 stop here */
1804 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1805 /* if we found the info for all the codecs, we can stop */
1806 ret = count;
1807 break;
1808 }
1809 }
1810 /* we did not get all the codec info, but we read too much data */
1811 if (read_size >= MAX_READ_SIZE) {
1812 ret = count;
1813 break;
1814 }
1815
1816 /* NOTE: a new stream can be added there if no header in file
1817 (AVFMTCTX_NOHEADER) */
1818 ret = av_read_frame_internal(ic, &pkt1);
1819 if (ret < 0) {
1820 /* EOF or error */
1821 ret = -1; /* we could not have all the codec parameters before EOF */
1822 for(i=0;i<ic->nb_streams;i++) {
1823 st = ic->streams[i];
1824 if (!has_codec_parameters(st->codec)){
1825 char buf[256];
1826 avcodec_string(buf, sizeof(buf), st->codec, 0);
1827 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1828 } else {
1829 ret = 0;
1830 }
1831 }
1832 break;
1833 }
1834
1835 pkt= add_to_pktbuf(ic, &pkt1);
1836 if(av_dup_packet(pkt) < 0)
1837 return AVERROR(ENOMEM);
1838
1839 read_size += pkt->size;
1840
1841 st = ic->streams[pkt->stream_index];
1842 if(codec_info_nb_frames[st->index]>1)
1843 codec_info_duration[st->index] += pkt->duration;
1844 if (pkt->duration != 0)
1845 codec_info_nb_frames[st->index]++;
1846
1847 {
1848 int index= pkt->stream_index;
1849 int64_t last= last_dts[index];
1850 int64_t duration= pkt->dts - last;
1851
1852 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1853 double dur= duration * av_q2d(st->time_base);
1854
1855 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1856 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1857 if(duration_count[index] < 2)
1858 memset(duration_error, 0, MAX_STREAMS * sizeof(*duration_error));
1859 for(i=1; i<MAX_STD_TIMEBASES; i++){
1860 int framerate= get_std_framerate(i);
1861 int ticks= lrintf(dur*framerate/(1001*12));
1862 double error= dur - ticks*1001*12/(double)framerate;
1863 duration_error[index][i] += error*error;
1864 }
1865 duration_count[index]++;
1866 }
1867 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1868 last_dts[pkt->stream_index]= pkt->dts;
1869
1870 if (st->codec->codec_id == CODEC_ID_NONE) {
1871 AVProbeData *pd = &(probe_data[st->index]);
1872 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size);
1873 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
1874 pd->buf_size += pkt->size;
1875 }
1876 }
1877 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1878 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1879 if(i){
1880 st->codec->extradata_size= i;
1881 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1882 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1883 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1884 }
1885 }
1886
1887 /* if still no information, we try to open the codec and to
1888 decompress the frame. We try to avoid that in most cases as
1889 it takes longer and uses more memory. For MPEG4, we need to
1890 decompress for Quicktime. */
1891 if (!has_codec_parameters(st->codec) /*&&
1892 (st->codec->codec_id == CODEC_ID_FLV1 ||
1893 st->codec->codec_id == CODEC_ID_H264 ||
1894 st->codec->codec_id == CODEC_ID_H263 ||
1895 st->codec->codec_id == CODEC_ID_H261 ||
1896 st->codec->codec_id == CODEC_ID_VORBIS ||
1897 st->codec->codec_id == CODEC_ID_MJPEG ||
1898 st->codec->codec_id == CODEC_ID_PNG ||
1899 st->codec->codec_id == CODEC_ID_PAM ||
1900 st->codec->codec_id == CODEC_ID_PGM ||
1901 st->codec->codec_id == CODEC_ID_PGMYUV ||
1902 st->codec->codec_id == CODEC_ID_PBM ||
1903 st->codec->codec_id == CODEC_ID_PPM ||
1904 st->codec->codec_id == CODEC_ID_SHORTEN ||
1905 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1906 try_decode_frame(st, pkt->data, pkt->size);
1907
1908 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1909 break;
1910 }
1911 count++;
1912 }
1913
1914 // close codecs which where opened in try_decode_frame()
1915 for(i=0;i<ic->nb_streams;i++) {
1916 st = ic->streams[i];
1917 if(st->codec->codec)
1918 avcodec_close(st->codec);
1919 }
1920 for(i=0;i<ic->nb_streams;i++) {
1921 st = ic->streams[i];
1922 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1923 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1924 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1925
1926 if(duration_count[i]
1927 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&&
1928 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1929 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
1930 double best_error= 2*av_q2d(st->time_base);
1931 best_error= best_error*best_error*duration_count[i]*1000*12*30;
1932
1933 for(j=1; j<MAX_STD_TIMEBASES; j++){
1934 double error= duration_error[i][j] * get_std_framerate(j);
1935 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1936 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
1937 if(error < best_error){
1938 best_error= error;
1939 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
1940 }
1941 }
1942 }
1943
1944 if (!st->r_frame_rate.num){
1945 if( st->codec->time_base.den * (int64_t)st->time_base.num
1946 <= st->codec->time_base.num * (int64_t)st->time_base.den){
1947 st->r_frame_rate.num = st->codec->time_base.den;
1948 st->r_frame_rate.den = st->codec->time_base.num;
1949 }else{
1950 st->r_frame_rate.num = st->time_base.den;
1951 st->r_frame_rate.den = st->time_base.num;
1952 }
1953 }
1954 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
1955 if (st->codec->codec_id == CODEC_ID_NONE && probe_data[st->index].buf_size > 0) {
1956 codec_identified[st->index] = set_codec_from_probe_data(st, &(probe_data[st->index]), 1);
1957 if (codec_identified[st->index]) {
1958 st->need_parsing = AVSTREAM_PARSE_FULL;
1959 }
1960 }
1961 if(!st->codec->bits_per_sample)
1962 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
1963 }
1964 }
1965
1966 av_estimate_timings(ic, old_offset);
1967
1968 for(i=0;i<ic->nb_streams;i++) {
1969 st = ic->streams[i];
1970 if (codec_identified[st->index]) {
1971 av_read_frame_flush(ic);
1972 av_seek_frame(ic, st->index, 0.0, 0);
1973 url_fseek(&ic->pb, ic->data_offset, SEEK_SET);
1974 }
1975 }
1976
1977 #if 0
1978 /* correct DTS for b frame streams with no timestamps */
1979 for(i=0;i<ic->nb_streams;i++) {
1980 st = ic->streams[i];
1981 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1982 if(b-frames){
1983 ppktl = &ic->packet_buffer;
1984 while(ppkt1){
1985 if(ppkt1->stream_index != i)
1986 continue;
1987 if(ppkt1->pkt->dts < 0)
1988 break;
1989 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1990 break;
1991 ppkt1->pkt->dts -= delta;
1992 ppkt1= ppkt1->next;
1993 }
1994 if(ppkt1)
1995 continue;
1996 st->cur_dts -= delta;
1997 }
1998 }
1999 }
2000 #endif
2001
2002 av_free(duration_error);
2003 for(i=0;i<MAX_STREAMS;i++){
2004 av_freep(&(probe_data[i].buf));
2005 }
2006
2007 return ret;
2008 }
2009
2010 /*******************************************************/
2011
2012 int av_read_play(AVFormatContext *s)
2013 {
2014 if (!s->iformat->read_play)
2015 return AVERROR(ENOSYS);
2016 return s->iformat->read_play(s);
2017 }
2018
2019 int av_read_pause(AVFormatContext *s)
2020 {
2021 if (!s->iformat->read_pause)
2022 return AVERROR(ENOSYS);
2023 return s->iformat->read_pause(s);
2024 }
2025
2026 void av_close_input_file(AVFormatContext *s)
2027 {
2028 int i, must_open_file;
2029 AVStream *st;
2030
2031 /* free previous packet */
2032 if (s->cur_st && s->cur_st->parser)
2033 av_free_packet(&s->cur_pkt);
2034
2035 if (s->iformat->read_close)
2036 s->iformat->read_close(s);
2037 for(i=0;i<s->nb_streams;i++) {
2038 /* free all data in a stream component */
2039 st = s->streams[i];
2040 if (st->parser) {
2041 av_parser_close(st->parser);
2042 }
2043 av_free(st->index_entries);
2044 av_free(st->codec->extradata);
2045 av_free(st->codec);
2046 av_free(st);
2047 }
2048 flush_packet_queue(s);
2049 must_open_file = 1;
2050 if (s->iformat->flags & AVFMT_NOFILE) {
2051 must_open_file = 0;
2052 }
2053 if (must_open_file) {
2054 url_fclose(&s->pb);
2055 }
2056 av_freep(&s->priv_data);
2057 av_free(s);
2058 }
2059
2060 AVStream *av_new_stream(AVFormatContext *s, int id)
2061 {
2062 AVStream *st;
2063 int i;
2064
2065 if (s->nb_streams >= MAX_STREAMS)
2066 return NULL;
2067
2068 st = av_mallocz(sizeof(AVStream));
2069 if (!st)
2070 return NULL;
2071
2072 st->codec= avcodec_alloc_context();
2073 if (s->iformat) {
2074 /* no default bitrate if decoding */
2075 st->codec->bit_rate = 0;
2076 }
2077 st->index = s->nb_streams;
2078 st->id = id;
2079 st->start_time = AV_NOPTS_VALUE;
2080 st->duration = AV_NOPTS_VALUE;
2081 st->cur_dts = AV_NOPTS_VALUE;
2082 st->first_dts = AV_NOPTS_VALUE;
2083
2084 /* default pts settings is MPEG like */
2085 av_set_pts_info(st, 33, 1, 90000);
2086 st->last_IP_pts = AV_NOPTS_VALUE;
2087 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2088 st->pts_buffer[i]= AV_NOPTS_VALUE;
2089
2090 s->streams[s->nb_streams++] = st;
2091 return st;
2092 }
2093
2094 /************************************************************/
2095 /* output media file */
2096
2097 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2098 {
2099 int ret;
2100
2101 if (s->oformat->priv_data_size > 0) {
2102 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2103 if (!s->priv_data)
2104 return AVERROR(ENOMEM);
2105 } else
2106 s->priv_data = NULL;
2107
2108 if (s->oformat->set_parameters) {
2109 ret = s->oformat->set_parameters(s, ap);
2110 if (ret < 0)
2111 return ret;
2112 }
2113 return 0;
2114 }
2115
2116 int av_write_header(AVFormatContext *s)
2117 {
2118 int ret, i;
2119 AVStream *st;
2120
2121 // some sanity checks
2122 for(i=0;i<s->nb_streams;i++) {
2123 st = s->streams[i];
2124
2125 switch (st->codec->codec_type) {
2126 case CODEC_TYPE_AUDIO:
2127 if(st->codec->sample_rate<=0){
2128 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2129 return -1;
2130 }
2131 break;
2132 case CODEC_TYPE_VIDEO:
2133 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2134 av_log(s, AV_LOG_ERROR, "time base not set\n");
2135 return -1;
2136 }
2137 if(st->codec->width<=0 || st->codec->height<=0){
2138 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2139 return -1;
2140 }
2141 break;
2142 }
2143
2144 if(s->oformat->codec_tag){
2145 if(st->codec->codec_tag){
2146 //FIXME
2147 //check that tag + id is in the table
2148 //if neither is in the table -> ok
2149 //if tag is in the table with another id -> FAIL
2150 //if id is in the table with another tag -> FAIL unless strict < ?
2151 }else
2152 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2153 }
2154 }
2155
2156 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2157 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2158 if (!s->priv_data)
2159 return AVERROR(ENOMEM);
2160 }
2161
2162 if(s->oformat->write_header){
2163 ret = s->oformat->write_header(s);
2164 if (ret < 0)
2165 return ret;
2166 }
2167
2168 /* init PTS generation */
2169 for(i=0;i<s->nb_streams;i++) {
2170 int64_t den = AV_NOPTS_VALUE;
2171 st = s->streams[i];
2172
2173 switch (st->codec->codec_type) {
2174 case CODEC_TYPE_AUDIO:
2175 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2176 break;
2177 case CODEC_TYPE_VIDEO:
2178 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2179 break;
2180 default:
2181 break;
2182 }
2183 if (den != AV_NOPTS_VALUE) {
2184 if (den <= 0)
2185 return AVERROR_INVALIDDATA;
2186 av_frac_init(&st->pts, 0, 0, den);
2187 }
2188 }
2189 return 0;
2190 }
2191
2192 //FIXME merge with compute_pkt_fields
2193 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2194 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2195 int num, den, frame_size, i;
2196
2197 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2198
2199 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2200 return -1;*/
2201
2202 /* duration field */
2203 if (pkt->duration == 0) {
2204 compute_frame_duration(&num, &den, st, NULL, pkt);
2205 if (den && num) {
2206 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2207 }
2208 }
2209
2210 //XXX/FIXME this is a temporary hack until all encoders output pts
2211 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2212 pkt->dts=
2213 // pkt->pts= st->cur_dts;
2214 pkt->pts= st->pts.val;
2215 }
2216
2217 //calculate dts from pts
2218 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2219 st->pts_buffer[0]= pkt->pts;
2220 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2221 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2222 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2223 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2224
2225 pkt->dts= st->pts_buffer[0];
2226 }
2227
2228 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2229 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2230 return -1;
2231 }
2232 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2233 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2234 return -1;
2235 }
2236
2237 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2238 st->cur_dts= pkt->dts;
2239 st->pts.val= pkt->dts;
2240
2241 /* update pts */
2242 switch (st->codec->codec_type) {
2243 case CODEC_TYPE_AUDIO:
2244 frame_size = get_audio_frame_size(st->codec, pkt->size);
2245
2246 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2247 but it would be better if we had the real timestamps from the encoder */
2248 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2249 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2250 }
2251 break;
2252 case CODEC_TYPE_VIDEO:
2253 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2254 break;
2255 default:
2256 break;
2257 }
2258 return 0;
2259 }
2260
2261 static void truncate_ts(AVStream *st, AVPacket *pkt){
2262 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2263
2264 // if(pkt->dts < 0)
2265 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2266
2267 if (pkt->pts != AV_NOPTS_VALUE)
2268 pkt->pts &= pts_mask;
2269 if (pkt->dts != AV_NOPTS_VALUE)
2270 pkt->dts &= pts_mask;
2271 }
2272
2273 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2274 {
2275 int ret;
2276
2277 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2278 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2279 return ret;
2280
2281 truncate_ts(s->streams[pkt->stream_index], pkt);
2282
2283 ret= s->oformat->write_packet(s, pkt);
2284 if(!ret)
2285 ret= url_ferror(&s->pb);
2286 return ret;
2287 }
2288
2289 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2290 AVPacketList *pktl, **next_point, *this_pktl;
2291 int stream_count=0;
2292 int streams[MAX_STREAMS];
2293
2294 if(pkt){
2295 AVStream *st= s->streams[ pkt->stream_index];
2296
2297 // assert(pkt->destruct != av_destruct_packet); //FIXME
2298
2299 this_pktl = av_mallocz(sizeof(AVPacketList));
2300 this_pktl->pkt= *pkt;
2301 if(pkt->destruct == av_destruct_packet)
2302 pkt->destruct= NULL; // non shared -> must keep original from being freed
2303 else
2304 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2305
2306 next_point = &s->packet_buffer;
2307 while(*next_point){
2308 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2309 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2310 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2311 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2312 break;
2313 next_point= &(*next_point)->next;
2314 }
2315 this_pktl->next= *next_point;
2316 *next_point= this_pktl;
2317 }
2318
2319 memset(streams, 0, sizeof(streams));
2320 pktl= s->packet_buffer;
2321 while(pktl){
2322 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2323 if(streams[ pktl->pkt.stream_index ] == 0)
2324 stream_count++;
2325 streams[ pktl->pkt.stream_index ]++;
2326 pktl= pktl->next;
2327 }
2328
2329 if(s->nb_streams == stream_count || (flush && stream_count)){
2330 pktl= s->packet_buffer;
2331 *out= pktl->pkt;
2332
2333 s->packet_buffer= pktl->next;
2334 av_freep(&pktl);
2335 return 1;
2336 }else{
2337 av_init_packet(out);
2338 return 0;
2339 }
2340 }
2341
2342 /**
2343 * Interleaves a AVPacket correctly so it can be muxed.
2344 * @param out the interleaved packet will be output here
2345 * @param in the input packet
2346 * @param flush 1 if no further packets are available as input and all
2347 * remaining packets should be output
2348 * @return 1 if a packet was output, 0 if no packet could be output,
2349 * < 0 if an error occured
2350 */
2351 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2352 if(s->oformat->interleave_packet)
2353 return s->oformat->interleave_packet(s, out, in, flush);
2354 else
2355 return av_interleave_packet_per_dts(s, out, in, flush);
2356 }
2357
2358 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2359 AVStream *st= s->streams[ pkt->stream_index];
2360
2361 //FIXME/XXX/HACK drop zero sized packets
2362 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2363 return 0;
2364
2365 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2366 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2367 return -1;
2368
2369 if(pkt->dts == AV_NOPTS_VALUE)
2370 return -1;
2371
2372 for(;;){
2373 AVPacket opkt;
2374 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2375 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2376 return ret;
2377
2378 truncate_ts(s->streams[opkt.stream_index], &opkt);
2379 ret= s->oformat->write_packet(s, &opkt);
2380
2381 av_free_packet(&opkt);
2382 pkt= NULL;
2383
2384 if(ret<0)
2385 return ret;
2386 if(url_ferror(&s->pb))
2387 return url_ferror(&s->pb);
2388 }
2389 }
2390
2391 int av_write_trailer(AVFormatContext *s)
2392 {
2393 int ret, i;
2394
2395 for(;;){
2396 AVPacket pkt;
2397 ret= av_interleave_packet(s, &pkt, NULL, 1);
2398 if(ret<0) //FIXME cleanup needed for ret<0 ?
2399 goto fail;
2400 if(!ret)
2401 break;
2402
2403 truncate_ts(s->streams[pkt.stream_index], &pkt);
2404 ret= s->oformat->write_packet(s, &pkt);
2405
2406 av_free_packet(&pkt);
2407
2408 if(ret<0)
2409 goto fail;
2410 if(url_ferror(&s->pb))
2411 goto fail;
2412 }
2413
2414 if(s->oformat->write_trailer)
2415 ret = s->oformat->write_trailer(s);
2416 fail:
2417 if(ret == 0)
2418 ret=url_ferror(&s->pb);
2419 for(i=0;i<s->nb_streams;i++)
2420 av_freep(&s->streams[i]->priv_data);
2421 av_freep(&s->priv_data);
2422 return ret;
2423 }
2424
2425 /* "user interface" functions */
2426
2427 void dump_format(AVFormatContext *ic,
2428 int index,
2429 const char *url,
2430 int is_output)
2431 {
2432 int i, flags;
2433 char buf[256];
2434
2435 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2436 is_output ? "Output" : "Input",
2437 index,
2438 is_output ? ic->oformat->name : ic->iformat->name,
2439 is_output ? "to" : "from", url);
2440 if (!is_output) {
2441 av_log(NULL, AV_LOG_INFO, " Duration: ");
2442 if (ic->duration != AV_NOPTS_VALUE) {
2443 int hours, mins, secs, us;
2444 secs = ic->duration / AV_TIME_BASE;
2445 us = ic->duration % AV_TIME_BASE;
2446 mins = secs / 60;
2447 secs %= 60;
2448 hours = mins / 60;
2449 mins %= 60;
2450 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2451 (10 * us) / AV_TIME_BASE);
2452 } else {
2453 av_log(NULL, AV_LOG_INFO, "N/A");
2454 }
2455 if (ic->start_time != AV_NOPTS_VALUE) {
2456 int secs, us;
2457 av_log(NULL, AV_LOG_INFO, ", start: ");
2458 secs = ic->start_time / AV_TIME_BASE;
2459 us = ic->start_time % AV_TIME_BASE;
2460 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2461 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2462 }
2463 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2464 if (ic->bit_rate) {
2465 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2466 } else {
2467 av_log(NULL, AV_LOG_INFO, "N/A");
2468 }
2469 av_log(NULL, AV_LOG_INFO, "\n");
2470 }
2471 for(i=0;i<ic->nb_streams;i++) {
2472 AVStream *st = ic->streams[i];
2473 int g= ff_gcd(st->time_base.num, st->time_base.den);
2474 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2475 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2476 /* the pid is an important information, so we display it */
2477 /* XXX: add a generic system */
2478 if (is_output)
2479 flags = ic->oformat->flags;
2480 else
2481 flags = ic->iformat->flags;
2482 if (flags & AVFMT_SHOW_IDS) {
2483 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2484 }
2485 if (strlen(st->language) > 0) {
2486 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2487 }
2488 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2489 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2490 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2491 if(st->r_frame_rate.den && st->r_frame_rate.num)
2492 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2493 /* else if(st->time_base.den && st->time_base.num)
2494 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2495 else
2496 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2497 }
2498 av_log(NULL, AV_LOG_INFO, "\n");
2499 }
2500 }
2501
2502 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2503 {
2504 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2505 }
2506
2507 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2508 {
2509 AVRational frame_rate;
2510 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2511 *frame_rate_num= frame_rate.num;
2512 *frame_rate_den= frame_rate.den;
2513 return ret;
2514 }
2515
2516 /**
2517 * gets the current time in micro seconds.
2518 */
2519 int64_t av_gettime(void)
2520 {
2521 struct timeval tv;
2522 gettimeofday(&tv,NULL);
2523 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2524 }
2525
2526 int64_t parse_date(const char *datestr, int duration)
2527 {
2528 const char *p;
2529 int64_t t;
2530 struct tm dt;
2531 int i;
2532 static const char *date_fmt[] = {
2533 "%Y-%m-%d",
2534 "%Y%m%d",
2535 };
2536 static const char *time_fmt[] = {
2537 "%H:%M:%S",
2538 "%H%M%S",
2539 };
2540 const char *q;
2541 int is_utc, len;
2542 char lastch;
2543 int negative = 0;
2544
2545 #undef time
2546 time_t now = time(0);
2547
2548 len = strlen(datestr);
2549 if (len > 0)
2550 lastch = datestr[len - 1];
2551 else
2552 lastch = '\0';
2553 is_utc = (lastch == 'z' || lastch == 'Z');
2554
2555 memset(&dt, 0, sizeof(dt));
2556
2557 p = datestr;
2558 q = NULL;
2559 if (!duration) {
2560 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2561 q = small_strptime(p, date_fmt[i], &dt);
2562 if (q) {
2563 break;
2564 }
2565 }
2566
2567 if (!q) {
2568 if (is_utc) {
2569 dt = *gmtime(&now);
2570 } else {
2571 dt = *localtime(&now);
2572 }
2573 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2574 } else {
2575 p = q;
2576 }
2577
2578 if (*p == 'T' || *p == 't' || *p == ' ')
2579 p++;
2580
2581 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2582 q = small_strptime(p, time_fmt[i], &dt);
2583 if (q) {
2584 break;
2585 }
2586 }
2587 } else {
2588 if (p[0] == '-') {
2589 negative = 1;
2590 ++p;
2591 }
2592 q = small_strptime(p, time_fmt[0], &dt);
2593 if (!q) {
2594 dt.tm_sec = strtol(p, (char **)&q, 10);
2595 dt.tm_min = 0;
2596 dt.tm_hour = 0;
2597 }
2598 }
2599
2600 /* Now we have all the fields that we can get */
2601 if (!q) {
2602 if (duration)
2603 return 0;
2604 else
2605 return now * INT64_C(1000000);
2606 }
2607
2608 if (duration) {
2609 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2610 } else {
2611 dt.tm_isdst = -1; /* unknown */
2612 if (is_utc) {
2613 t = mktimegm(&dt);
2614 } else {
2615 t = mktime(&dt);
2616 }
2617 }
2618
2619 t *= 1000000;
2620
2621 if (*q == '.') {
2622 int val, n;
2623 q++;
2624 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2625 if (!isdigit(*q))
2626 break;
2627 val += n * (*q - '0');
2628 }
2629 t += val;
2630 }
2631 return negative ? -t : t;
2632 }
2633
2634 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2635 {
2636 const char *p;
2637 char tag[128], *q;
2638
2639 p = info;
2640 if (*p == '?')
2641 p++;
2642 for(;;) {
2643 q = tag;
2644 while (*p != '\0' && *p != '=' && *p != '&') {
2645 if ((q - tag) < sizeof(tag) - 1)
2646 *q++ = *p;
2647 p++;
2648 }
2649 *q = '\0';
2650 q = arg;
2651 if (*p == '=') {
2652 p++;
2653 while (*p != '&' && *p != '\0') {
2654 if ((q - arg) < arg_size - 1) {
2655 if (*p == '+')
2656 *q++ = ' ';
2657 else
2658 *q++ = *p;
2659 }
2660 p++;
2661 }
2662 *q = '\0';
2663 }
2664 if (!strcmp(tag, tag1))
2665 return 1;
2666 if (*p != '&')
2667 break;
2668 p++;
2669 }
2670 return 0;
2671 }
2672
2673 int av_get_frame_filename(char *buf, int buf_size,
2674 const char *path, int number)
2675 {
2676 const char *p;
2677 char *q, buf1[20], c;
2678 int nd, len, percentd_found;
2679
2680 q = buf;
2681 p = path;
2682 percentd_found = 0;
2683 for(;;) {
2684 c = *p++;
2685 if (c == '\0')
2686 break;
2687 if (c == '%') {
2688 do {
2689 nd = 0;
2690 while (isdigit(*p)) {
2691 nd = nd * 10 + *p++ - '0';
2692 }
2693 c = *p++;
2694 } while (isdigit(c));
2695
2696 switch(c) {
2697 case '%':
2698 goto addchar;
2699 case 'd':
2700 if (percentd_found)
2701 goto fail;
2702 percentd_found = 1;
2703 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2704 len = strlen(buf1);
2705 if ((q - buf + len) > buf_size - 1)
2706 goto fail;
2707 memcpy(q, buf1, len);
2708 q += len;
2709 break;
2710 default:
2711 goto fail;
2712 }
2713 } else {
2714 addchar:
2715 if ((q - buf) < buf_size - 1)
2716 *q++ = c;
2717 }
2718 }
2719 if (!percentd_found)
2720 goto fail;
2721 *q = '\0';
2722 return 0;
2723 fail:
2724 *q = '\0';
2725 return -1;
2726 }
2727
2728 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
2729 {
2730 int len, i, j, c;
2731 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2732
2733 for(i=0;i<size;i+=16) {
2734 len = size - i;
2735 if (len > 16)
2736 len = 16;
2737 PRINT("%08x ", i);
2738 for(j=0;j<16;j++) {
2739 if (j < len)
2740 PRINT(" %02x", buf[i+j]);
2741 else
2742 PRINT(" ");
2743 }
2744 PRINT(" ");
2745 for(j=0;j<len;j++) {
2746 c = buf[i+j];
2747 if (c < ' ' || c > '~')
2748 c = '.';
2749 PRINT("%c", c);
2750 }
2751 PRINT("\n");
2752 }
2753 #undef PRINT
2754 }
2755
2756 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2757 {
2758 hex_dump_internal(NULL, f, 0, buf, size);
2759 }
2760
2761 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
2762 {
2763 hex_dump_internal(avcl, NULL, level, buf, size);
2764 }
2765
2766 //FIXME needs to know the time_base
2767 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
2768 {
2769 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2770 PRINT("stream #%d:\n", pkt->stream_index);
2771 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2772 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2773 /* DTS is _always_ valid after av_read_frame() */
2774 PRINT(" dts=");
2775 if (pkt->dts == AV_NOPTS_VALUE)
2776 PRINT("N/A");
2777 else
2778 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
2779 /* PTS may be not known if B frames are present */
2780 PRINT(" pts=");
2781 if (pkt->pts == AV_NOPTS_VALUE)
2782 PRINT("N/A");
2783 else
2784 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
2785 PRINT("\n");
2786 PRINT(" size=%d\n", pkt->size);
2787 #undef PRINT
2788 if (dump_payload)
2789 av_hex_dump(f, pkt->data, pkt->size);
2790 }
2791
2792 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2793 {
2794 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
2795 }
2796
2797 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
2798 {
2799 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
2800 }
2801
2802 void url_split(char *proto, int proto_size,
2803 char *authorization, int authorization_size,
2804 char *hostname, int hostname_size,
2805 int *port_ptr,
2806 char *path, int path_size,
2807 const char *url)
2808 {
2809 const char *p;
2810 char *q;
2811 int port;
2812
2813 port = -1;
2814
2815 p = url;
2816 q = proto;
2817 while (*p != ':' && *p != '\0') {
2818 if ((q - proto) < proto_size - 1)
2819 *q++ = *p;
2820 p++;
2821 }
2822 if (proto_size > 0)
2823 *q = '\0';
2824 if (authorization_size > 0)
2825 authorization[0] = '\0';
2826 if (*p == '\0') {
2827 if (proto_size > 0)
2828 proto[0] = '\0';
2829 if (hostname_size > 0)
2830 hostname[0] = '\0';
2831 p = url;
2832 } else {
2833 char *at,*slash; // PETR: position of '@' character and '/' character
2834
2835 p++;
2836 if (*p == '/')
2837 p++;
2838 if (*p == '/')
2839 p++;
2840 at = strchr(p,'@'); // PETR: get the position of '@'
2841 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2842 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2843
2844 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2845
2846 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2847 if (*p == '@') { // PETR: passed '@'
2848 if (authorization_size > 0)
2849 *q = '\0';
2850 q = hostname;
2851 at = NULL;
2852 } else if (!at) { // PETR: hostname
2853 if ((q - hostname) < hostname_size - 1)
2854 *q++ = *p;
2855 } else {
2856 if ((q - authorization) < authorization_size - 1)
2857 *q++ = *p;
2858 }
2859 p++;
2860 }
2861 if (hostname_size > 0)
2862 *q = '\0';
2863 if (*p == ':') {
2864 p++;
2865 port = strtoul(p, (char **)&p, 10);
2866 }
2867 }
2868 if (port_ptr)
2869 *port_ptr = port;
2870 av_strlcpy(path, p, path_size);
2871 }
2872
2873 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2874 int pts_num, int pts_den)
2875 {
2876 s->pts_wrap_bits = pts_wrap_bits;
2877 s->time_base.num = pts_num;
2878 s->time_base.den = pts_den;
2879 }
2880
2881 /* fraction handling */
2882
2883 /**
2884 * f = val + (num / den) + 0.5.
2885 *
2886 * 'num' is normalized so that it is such as 0 <= num < den.
2887 *
2888 * @param f fractional number
2889 * @param val integer value
2890 * @param num must be >= 0
2891 * @param den must be >= 1
2892 */
2893 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2894 {
2895 num += (den >> 1);
2896 if (num >= den) {
2897 val += num / den;
2898 num = num % den;
2899 }
2900 f->val = val;
2901 f->num = num;
2902 f->den = den;
2903 }
2904
2905 /**
2906 * Fractionnal addition to f: f = f + (incr / f->den).
2907 *
2908 * @param f fractional number
2909 * @param incr increment, can be positive or negative
2910 */
2911 static void av_frac_add(AVFrac *f, int64_t incr)
2912 {
2913 int64_t num, den;
2914
2915 num = f->num + incr;
2916 den = f->den;
2917 if (num < 0) {
2918 f->val += num / den;
2919 num = num % den;
2920 if (num < 0) {
2921 num += den;
2922 f->val--;
2923 }
2924 } else if (num >= den) {
2925 f->val += num / den;
2926 num = num % den;
2927 }
2928 f->num = num;
2929 }