add support for downmixing to stereo or mono
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "allformats.h"
23 #include "opt.h"
24 #include "avstring.h"
25 #include "riff.h"
26 #include <sys/time.h>
27 #include <time.h>
28
29 #undef NDEBUG
30 #include <assert.h>
31
32 /**
33 * @file libavformat/utils.c
34 * Various utility functions for using ffmpeg library.
35 */
36
37 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
38 static void av_frac_add(AVFrac *f, int64_t incr);
39
40 /** head of registered input format linked list. */
41 AVInputFormat *first_iformat = NULL;
42 /** head of registered output format linked list. */
43 AVOutputFormat *first_oformat = NULL;
44
45 void av_register_input_format(AVInputFormat *format)
46 {
47 AVInputFormat **p;
48 p = &first_iformat;
49 while (*p != NULL) p = &(*p)->next;
50 *p = format;
51 format->next = NULL;
52 }
53
54 void av_register_output_format(AVOutputFormat *format)
55 {
56 AVOutputFormat **p;
57 p = &first_oformat;
58 while (*p != NULL) p = &(*p)->next;
59 *p = format;
60 format->next = NULL;
61 }
62
63 int match_ext(const char *filename, const char *extensions)
64 {
65 const char *ext, *p;
66 char ext1[32], *q;
67
68 if(!filename)
69 return 0;
70
71 ext = strrchr(filename, '.');
72 if (ext) {
73 ext++;
74 p = extensions;
75 for(;;) {
76 q = ext1;
77 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
78 *q++ = *p++;
79 *q = '\0';
80 if (!strcasecmp(ext1, ext))
81 return 1;
82 if (*p == '\0')
83 break;
84 p++;
85 }
86 }
87 return 0;
88 }
89
90 AVOutputFormat *guess_format(const char *short_name, const char *filename,
91 const char *mime_type)
92 {
93 AVOutputFormat *fmt, *fmt_found;
94 int score_max, score;
95
96 /* specific test for image sequences */
97 #ifdef CONFIG_IMAGE2_MUXER
98 if (!short_name && filename &&
99 av_filename_number_test(filename) &&
100 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
101 return guess_format("image2", NULL, NULL);
102 }
103 #endif
104 /* find the proper file type */
105 fmt_found = NULL;
106 score_max = 0;
107 fmt = first_oformat;
108 while (fmt != NULL) {
109 score = 0;
110 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
111 score += 100;
112 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
113 score += 10;
114 if (filename && fmt->extensions &&
115 match_ext(filename, fmt->extensions)) {
116 score += 5;
117 }
118 if (score > score_max) {
119 score_max = score;
120 fmt_found = fmt;
121 }
122 fmt = fmt->next;
123 }
124 return fmt_found;
125 }
126
127 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
128 const char *mime_type)
129 {
130 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
131
132 if (fmt) {
133 AVOutputFormat *stream_fmt;
134 char stream_format_name[64];
135
136 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
137 stream_fmt = guess_format(stream_format_name, NULL, NULL);
138
139 if (stream_fmt)
140 fmt = stream_fmt;
141 }
142
143 return fmt;
144 }
145
146 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
147 const char *filename, const char *mime_type, enum CodecType type){
148 if(type == CODEC_TYPE_VIDEO){
149 enum CodecID codec_id= CODEC_ID_NONE;
150
151 #ifdef CONFIG_IMAGE2_MUXER
152 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
153 codec_id= av_guess_image2_codec(filename);
154 }
155 #endif
156 if(codec_id == CODEC_ID_NONE)
157 codec_id= fmt->video_codec;
158 return codec_id;
159 }else if(type == CODEC_TYPE_AUDIO)
160 return fmt->audio_codec;
161 else
162 return CODEC_ID_NONE;
163 }
164
165 AVInputFormat *av_find_input_format(const char *short_name)
166 {
167 AVInputFormat *fmt;
168 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
169 if (!strcmp(fmt->name, short_name))
170 return fmt;
171 }
172 return NULL;
173 }
174
175 /* memory handling */
176
177 void av_destruct_packet(AVPacket *pkt)
178 {
179 av_free(pkt->data);
180 pkt->data = NULL; pkt->size = 0;
181 }
182
183 void av_init_packet(AVPacket *pkt)
184 {
185 pkt->pts = AV_NOPTS_VALUE;
186 pkt->dts = AV_NOPTS_VALUE;
187 pkt->pos = -1;
188 pkt->duration = 0;
189 pkt->flags = 0;
190 pkt->stream_index = 0;
191 pkt->destruct= av_destruct_packet_nofree;
192 }
193
194 int av_new_packet(AVPacket *pkt, int size)
195 {
196 uint8_t *data;
197 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
198 return AVERROR(ENOMEM);
199 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
200 if (!data)
201 return AVERROR(ENOMEM);
202 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
203
204 av_init_packet(pkt);
205 pkt->data = data;
206 pkt->size = size;
207 pkt->destruct = av_destruct_packet;
208 return 0;
209 }
210
211 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
212 {
213 int ret= av_new_packet(pkt, size);
214
215 if(ret<0)
216 return ret;
217
218 pkt->pos= url_ftell(s);
219
220 ret= get_buffer(s, pkt->data, size);
221 if(ret<=0)
222 av_free_packet(pkt);
223 else
224 pkt->size= ret;
225
226 return ret;
227 }
228
229 int av_dup_packet(AVPacket *pkt)
230 {
231 if (pkt->destruct != av_destruct_packet) {
232 uint8_t *data;
233 /* we duplicate the packet and don't forget to put the padding
234 again */
235 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
236 return AVERROR(ENOMEM);
237 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
238 if (!data) {
239 return AVERROR(ENOMEM);
240 }
241 memcpy(data, pkt->data, pkt->size);
242 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
243 pkt->data = data;
244 pkt->destruct = av_destruct_packet;
245 }
246 return 0;
247 }
248
249 int av_filename_number_test(const char *filename)
250 {
251 char buf[1024];
252 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
253 }
254
255 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
256 {
257 AVInputFormat *fmt1, *fmt;
258 int score;
259
260 fmt = NULL;
261 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
262 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
263 continue;
264 score = 0;
265 if (fmt1->read_probe) {
266 score = fmt1->read_probe(pd);
267 } else if (fmt1->extensions) {
268 if (match_ext(pd->filename, fmt1->extensions)) {
269 score = 50;
270 }
271 }
272 if (score > *score_max) {
273 *score_max = score;
274 fmt = fmt1;
275 }
276 }
277 return fmt;
278 }
279
280 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
281 int score=0;
282 return av_probe_input_format2(pd, is_opened, &score);
283 }
284
285 /************************************************************/
286 /* input media file */
287
288 /**
289 * Open a media file from an IO stream. 'fmt' must be specified.
290 */
291 static const char* format_to_name(void* ptr)
292 {
293 AVFormatContext* fc = (AVFormatContext*) ptr;
294 if(fc->iformat) return fc->iformat->name;
295 else if(fc->oformat) return fc->oformat->name;
296 else return "NULL";
297 }
298
299 #define OFFSET(x) offsetof(AVFormatContext,x)
300 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
301 //these names are too long to be readable
302 #define E AV_OPT_FLAG_ENCODING_PARAM
303 #define D AV_OPT_FLAG_DECODING_PARAM
304
305 static const AVOption options[]={
306 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
307 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
308 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
309 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
310 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
311 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
312 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
313 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
314 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
315 {NULL},
316 };
317
318 #undef E
319 #undef D
320 #undef DEFAULT
321
322 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
323
324 static void avformat_get_context_defaults(AVFormatContext *s)
325 {
326 memset(s, 0, sizeof(AVFormatContext));
327
328 s->av_class = &av_format_context_class;
329
330 av_opt_set_defaults(s);
331 }
332
333 AVFormatContext *av_alloc_format_context(void)
334 {
335 AVFormatContext *ic;
336 ic = av_malloc(sizeof(AVFormatContext));
337 if (!ic) return ic;
338 avformat_get_context_defaults(ic);
339 ic->av_class = &av_format_context_class;
340 return ic;
341 }
342
343 int av_open_input_stream(AVFormatContext **ic_ptr,
344 ByteIOContext *pb, const char *filename,
345 AVInputFormat *fmt, AVFormatParameters *ap)
346 {
347 int err;
348 AVFormatContext *ic;
349 AVFormatParameters default_ap;
350
351 if(!ap){
352 ap=&default_ap;
353 memset(ap, 0, sizeof(default_ap));
354 }
355
356 if(!ap->prealloced_context)
357 ic = av_alloc_format_context();
358 else
359 ic = *ic_ptr;
360 if (!ic) {
361 err = AVERROR(ENOMEM);
362 goto fail;
363 }
364 ic->iformat = fmt;
365 if (pb)
366 ic->pb = *pb;
367 ic->duration = AV_NOPTS_VALUE;
368 ic->start_time = AV_NOPTS_VALUE;
369 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
370
371 /* allocate private data */
372 if (fmt->priv_data_size > 0) {
373 ic->priv_data = av_mallocz(fmt->priv_data_size);
374 if (!ic->priv_data) {
375 err = AVERROR(ENOMEM);
376 goto fail;
377 }
378 } else {
379 ic->priv_data = NULL;
380 }
381
382 err = ic->iformat->read_header(ic, ap);
383 if (err < 0)
384 goto fail;
385
386 if (pb && !ic->data_offset)
387 ic->data_offset = url_ftell(&ic->pb);
388
389 *ic_ptr = ic;
390 return 0;
391 fail:
392 if (ic) {
393 av_freep(&ic->priv_data);
394 }
395 av_free(ic);
396 *ic_ptr = NULL;
397 return err;
398 }
399
400 /** Size of probe buffer, for guessing file type from file contents. */
401 #define PROBE_BUF_MIN 2048
402 #define PROBE_BUF_MAX (1<<20)
403
404 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
405 AVInputFormat *fmt,
406 int buf_size,
407 AVFormatParameters *ap)
408 {
409 int err, must_open_file, file_opened, probe_size;
410 AVProbeData probe_data, *pd = &probe_data;
411 ByteIOContext pb1, *pb = &pb1;
412
413 file_opened = 0;
414 pd->filename = "";
415 if (filename)
416 pd->filename = filename;
417 pd->buf = NULL;
418 pd->buf_size = 0;
419
420 if (!fmt) {
421 /* guess format if no file can be opened */
422 fmt = av_probe_input_format(pd, 0);
423 }
424
425 /* do not open file if the format does not need it. XXX: specific
426 hack needed to handle RTSP/TCP */
427 must_open_file = 1;
428 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
429 must_open_file = 0;
430 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise it is uninitialized
431 }
432
433 if (!fmt || must_open_file) {
434 /* if no file needed do not try to open one */
435 if ((err=url_fopen(pb, filename, URL_RDONLY)) < 0) {
436 goto fail;
437 }
438 file_opened = 1;
439 if (buf_size > 0) {
440 url_setbufsize(pb, buf_size);
441 }
442
443 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
444 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
445 /* read probe data */
446 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
447 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
448 if (url_fseek(pb, 0, SEEK_SET) < 0) {
449 url_fclose(pb);
450 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
451 file_opened = 0;
452 err = AVERROR(EIO);
453 goto fail;
454 }
455 }
456 /* guess file format */
457 fmt = av_probe_input_format2(pd, 1, &score);
458 }
459 av_freep(&pd->buf);
460 }
461
462 /* if still no format found, error */
463 if (!fmt) {
464 err = AVERROR_NOFMT;
465 goto fail;
466 }
467
468 /* XXX: suppress this hack for redirectors */
469 #ifdef CONFIG_REDIR_DEMUXER
470 if (fmt == &redir_demuxer) {
471 err = redir_open(ic_ptr, pb);
472 url_fclose(pb);
473 return err;
474 }
475 #endif
476
477 /* check filename in case of an image number is expected */
478 if (fmt->flags & AVFMT_NEEDNUMBER) {
479 if (!av_filename_number_test(filename)) {
480 err = AVERROR_NUMEXPECTED;
481 goto fail;
482 }
483 }
484 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
485 if (err)
486 goto fail;
487 return 0;
488 fail:
489 av_freep(&pd->buf);
490 if (file_opened)
491 url_fclose(pb);
492 *ic_ptr = NULL;
493 return err;
494
495 }
496
497 /*******************************************************/
498
499 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
500 {
501 av_init_packet(pkt);
502 return s->iformat->read_packet(s, pkt);
503 }
504
505 /**********************************************************/
506
507 /**
508 * Get the number of samples of an audio frame. Return (-1) if error.
509 */
510 static int get_audio_frame_size(AVCodecContext *enc, int size)
511 {
512 int frame_size;
513
514 if (enc->frame_size <= 1) {
515 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
516
517 if (bits_per_sample) {
518 if (enc->channels == 0)
519 return -1;
520 frame_size = (size << 3) / (bits_per_sample * enc->channels);
521 } else {
522 /* used for example by ADPCM codecs */
523 if (enc->bit_rate == 0)
524 return -1;
525 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
526 }
527 } else {
528 frame_size = enc->frame_size;
529 }
530 return frame_size;
531 }
532
533
534 /**
535 * Return the frame duration in seconds, return 0 if not available.
536 */
537 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
538 AVCodecParserContext *pc, AVPacket *pkt)
539 {
540 int frame_size;
541
542 *pnum = 0;
543 *pden = 0;
544 switch(st->codec->codec_type) {
545 case CODEC_TYPE_VIDEO:
546 if(st->time_base.num*1000LL > st->time_base.den){
547 *pnum = st->time_base.num;
548 *pden = st->time_base.den;
549 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
550 *pnum = st->codec->time_base.num;
551 *pden = st->codec->time_base.den;
552 if (pc && pc->repeat_pict) {
553 *pden *= 2;
554 *pnum = (*pnum) * (2 + pc->repeat_pict);
555 }
556 }
557 break;
558 case CODEC_TYPE_AUDIO:
559 frame_size = get_audio_frame_size(st->codec, pkt->size);
560 if (frame_size < 0)
561 break;
562 *pnum = frame_size;
563 *pden = st->codec->sample_rate;
564 break;
565 default:
566 break;
567 }
568 }
569
570 static int is_intra_only(AVCodecContext *enc){
571 if(enc->codec_type == CODEC_TYPE_AUDIO){
572 return 1;
573 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
574 switch(enc->codec_id){
575 case CODEC_ID_MJPEG:
576 case CODEC_ID_MJPEGB:
577 case CODEC_ID_LJPEG:
578 case CODEC_ID_RAWVIDEO:
579 case CODEC_ID_DVVIDEO:
580 case CODEC_ID_HUFFYUV:
581 case CODEC_ID_FFVHUFF:
582 case CODEC_ID_ASV1:
583 case CODEC_ID_ASV2:
584 case CODEC_ID_VCR1:
585 return 1;
586 default: break;
587 }
588 }
589 return 0;
590 }
591
592 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
593 AVCodecParserContext *pc, AVPacket *pkt)
594 {
595 int num, den, presentation_delayed, delay, i;
596 int64_t offset;
597
598 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
599 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
600 pkt->dts -= 1LL<<st->pts_wrap_bits;
601 }
602
603 if (pkt->duration == 0) {
604 compute_frame_duration(&num, &den, st, pc, pkt);
605 if (den && num) {
606 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
607 }
608 }
609
610 /* correct timestamps with byte offset if demuxers only have timestamps on packet boundaries */
611 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
612 /* this will estimate bitrate based on this frame's duration and size */
613 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
614 if(pkt->pts != AV_NOPTS_VALUE)
615 pkt->pts += offset;
616 if(pkt->dts != AV_NOPTS_VALUE)
617 pkt->dts += offset;
618 }
619
620 if(is_intra_only(st->codec))
621 pkt->flags |= PKT_FLAG_KEY;
622
623 /* do we have a video B frame ? */
624 delay= st->codec->has_b_frames;
625 presentation_delayed = 0;
626 /* XXX: need has_b_frame, but cannot get it if the codec is
627 not initialized */
628 if (delay &&
629 pc && pc->pict_type != FF_B_TYPE)
630 presentation_delayed = 1;
631 /* This may be redundant, but it should not hurt. */
632 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
633 presentation_delayed = 1;
634
635 if(st->cur_dts == AV_NOPTS_VALUE){
636 st->cur_dts = -delay * pkt->duration;
637 }
638
639 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
640 /* interpolate PTS and DTS if they are not present */
641 if(delay <=1){
642 if (presentation_delayed) {
643 /* DTS = decompression time stamp */
644 /* PTS = presentation time stamp */
645 if (pkt->dts == AV_NOPTS_VALUE)
646 pkt->dts = st->last_IP_pts;
647 if (pkt->dts == AV_NOPTS_VALUE)
648 pkt->dts = st->cur_dts;
649
650 /* this is tricky: the dts must be incremented by the duration
651 of the frame we are displaying, i.e. the last I or P frame */
652 if (st->last_IP_duration == 0)
653 st->last_IP_duration = pkt->duration;
654 st->cur_dts = pkt->dts + st->last_IP_duration;
655 st->last_IP_duration = pkt->duration;
656 st->last_IP_pts= pkt->pts;
657 /* cannot compute PTS if not present (we can compute it only
658 by knowing the futur */
659 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
660 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
661 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
662 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
663 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
664 pkt->pts += pkt->duration;
665 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
666 }
667 }
668
669 /* presentation is not delayed : PTS and DTS are the same */
670 if(pkt->pts == AV_NOPTS_VALUE)
671 pkt->pts = pkt->dts;
672 if(pkt->pts == AV_NOPTS_VALUE)
673 pkt->pts = st->cur_dts;
674 pkt->dts = pkt->pts;
675 st->cur_dts = pkt->pts + pkt->duration;
676 }
677 }
678
679 if(pkt->pts != AV_NOPTS_VALUE){
680 st->pts_buffer[0]= pkt->pts;
681 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
682 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
683 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
684 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
685 if(pkt->dts == AV_NOPTS_VALUE)
686 pkt->dts= st->pts_buffer[0];
687 if(pkt->dts > st->cur_dts)
688 st->cur_dts = pkt->dts;
689 }
690
691 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
692
693 /* update flags */
694 if (pc) {
695 pkt->flags = 0;
696 /* key frame computation */
697 if (pc->pict_type == FF_I_TYPE)
698 pkt->flags |= PKT_FLAG_KEY;
699 }
700 }
701
702 void av_destruct_packet_nofree(AVPacket *pkt)
703 {
704 pkt->data = NULL; pkt->size = 0;
705 }
706
707 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
708 {
709 AVStream *st;
710 int len, ret, i;
711
712 av_init_packet(pkt);
713
714 for(;;) {
715 /* select current input stream component */
716 st = s->cur_st;
717 if (st) {
718 if (!st->need_parsing || !st->parser) {
719 /* no parsing needed: we just output the packet as is */
720 /* raw data support */
721 *pkt = s->cur_pkt;
722 compute_pkt_fields(s, st, NULL, pkt);
723 s->cur_st = NULL;
724 break;
725 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
726 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
727 s->cur_ptr, s->cur_len,
728 s->cur_pkt.pts, s->cur_pkt.dts);
729 s->cur_pkt.pts = AV_NOPTS_VALUE;
730 s->cur_pkt.dts = AV_NOPTS_VALUE;
731 /* increment read pointer */
732 s->cur_ptr += len;
733 s->cur_len -= len;
734
735 /* return packet if any */
736 if (pkt->size) {
737 got_packet:
738 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
739 pkt->duration = 0;
740 pkt->stream_index = st->index;
741 pkt->pts = st->parser->pts;
742 pkt->dts = st->parser->dts;
743 pkt->destruct = av_destruct_packet_nofree;
744 compute_pkt_fields(s, st, st->parser, pkt);
745
746 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
747 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
748 0, 0, AVINDEX_KEYFRAME);
749 }
750
751 break;
752 }
753 } else {
754 /* free packet */
755 av_free_packet(&s->cur_pkt);
756 s->cur_st = NULL;
757 }
758 } else {
759 /* read next packet */
760 ret = av_read_packet(s, &s->cur_pkt);
761 if (ret < 0) {
762 if (ret == AVERROR(EAGAIN))
763 return ret;
764 /* return the last frames, if any */
765 for(i = 0; i < s->nb_streams; i++) {
766 st = s->streams[i];
767 if (st->parser && st->need_parsing) {
768 av_parser_parse(st->parser, st->codec,
769 &pkt->data, &pkt->size,
770 NULL, 0,
771 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
772 if (pkt->size)
773 goto got_packet;
774 }
775 }
776 /* no more packets: really terminates parsing */
777 return ret;
778 }
779
780 st = s->streams[s->cur_pkt.stream_index];
781 if(st->codec->debug & FF_DEBUG_PTS)
782 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
783 s->cur_pkt.stream_index,
784 s->cur_pkt.pts,
785 s->cur_pkt.dts,
786 s->cur_pkt.size);
787
788 s->cur_st = st;
789 s->cur_ptr = s->cur_pkt.data;
790 s->cur_len = s->cur_pkt.size;
791 if (st->need_parsing && !st->parser) {
792 st->parser = av_parser_init(st->codec->codec_id);
793 if (!st->parser) {
794 /* no parser available : just output the raw packets */
795 st->need_parsing = AVSTREAM_PARSE_NONE;
796 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
797 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
798 }
799 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
800 st->parser->last_frame_offset=
801 st->parser->cur_offset= s->cur_pkt.pos;
802 }
803 }
804 }
805 }
806 if(st->codec->debug & FF_DEBUG_PTS)
807 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
808 pkt->stream_index,
809 pkt->pts,
810 pkt->dts,
811 pkt->size);
812
813 return 0;
814 }
815
816 static AVPacket *add_to_pktbuf(AVFormatContext *s, AVPacket *pkt){
817 AVPacketList *pktl= s->packet_buffer;
818 AVPacketList **plast_pktl= &s->packet_buffer;
819
820 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
821
822 pktl = av_mallocz(sizeof(AVPacketList));
823 if (!pktl)
824 return NULL;
825
826 /* add the packet in the buffered packet list */
827 *plast_pktl = pktl;
828 pktl->pkt= *pkt;
829 return &pktl->pkt;
830 }
831
832 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
833 {
834 AVPacketList *pktl;
835 int eof=0;
836 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
837
838 for(;;){
839 pktl = s->packet_buffer;
840 if (pktl) {
841 AVPacket *next_pkt= &pktl->pkt;
842
843 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
844 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
845 if( pktl->pkt.stream_index == next_pkt->stream_index
846 && next_pkt->dts < pktl->pkt.dts
847 && pktl->pkt.pts != pktl->pkt.dts //not b frame
848 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
849 next_pkt->pts= pktl->pkt.dts;
850 }
851 pktl= pktl->next;
852 }
853 pktl = s->packet_buffer;
854 }
855
856 if( next_pkt->pts != AV_NOPTS_VALUE
857 || next_pkt->dts == AV_NOPTS_VALUE
858 || !genpts || eof){
859 /* read packet from packet buffer, if there is data */
860 *pkt = *next_pkt;
861 s->packet_buffer = pktl->next;
862 av_free(pktl);
863 return 0;
864 }
865 }
866 if(genpts){
867 int ret= av_read_frame_internal(s, pkt);
868 if(ret<0){
869 if(pktl && ret != AVERROR(EAGAIN)){
870 eof=1;
871 continue;
872 }else
873 return ret;
874 }
875
876 if(av_dup_packet(add_to_pktbuf(s, pkt)) < 0)
877 return AVERROR(ENOMEM);
878 }else{
879 assert(!s->packet_buffer);
880 return av_read_frame_internal(s, pkt);
881 }
882 }
883 }
884
885 /* XXX: suppress the packet queue */
886 static void flush_packet_queue(AVFormatContext *s)
887 {
888 AVPacketList *pktl;
889
890 for(;;) {
891 pktl = s->packet_buffer;
892 if (!pktl)
893 break;
894 s->packet_buffer = pktl->next;
895 av_free_packet(&pktl->pkt);
896 av_free(pktl);
897 }
898 }
899
900 /*******************************************************/
901 /* seek support */
902
903 int av_find_default_stream_index(AVFormatContext *s)
904 {
905 int i;
906 AVStream *st;
907
908 if (s->nb_streams <= 0)
909 return -1;
910 for(i = 0; i < s->nb_streams; i++) {
911 st = s->streams[i];
912 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
913 return i;
914 }
915 }
916 return 0;
917 }
918
919 /**
920 * Flush the frame reader.
921 */
922 static void av_read_frame_flush(AVFormatContext *s)
923 {
924 AVStream *st;
925 int i;
926
927 flush_packet_queue(s);
928
929 /* free previous packet */
930 if (s->cur_st) {
931 if (s->cur_st->parser)
932 av_free_packet(&s->cur_pkt);
933 s->cur_st = NULL;
934 }
935 /* fail safe */
936 s->cur_ptr = NULL;
937 s->cur_len = 0;
938
939 /* for each stream, reset read state */
940 for(i = 0; i < s->nb_streams; i++) {
941 st = s->streams[i];
942
943 if (st->parser) {
944 av_parser_close(st->parser);
945 st->parser = NULL;
946 }
947 st->last_IP_pts = AV_NOPTS_VALUE;
948 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
949 }
950 }
951
952 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
953 int i;
954
955 for(i = 0; i < s->nb_streams; i++) {
956 AVStream *st = s->streams[i];
957
958 st->cur_dts = av_rescale(timestamp,
959 st->time_base.den * (int64_t)ref_st->time_base.num,
960 st->time_base.num * (int64_t)ref_st->time_base.den);
961 }
962 }
963
964 int av_add_index_entry(AVStream *st,
965 int64_t pos, int64_t timestamp, int size, int distance, int flags)
966 {
967 AVIndexEntry *entries, *ie;
968 int index;
969
970 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
971 return -1;
972
973 entries = av_fast_realloc(st->index_entries,
974 &st->index_entries_allocated_size,
975 (st->nb_index_entries + 1) *
976 sizeof(AVIndexEntry));
977 if(!entries)
978 return -1;
979
980 st->index_entries= entries;
981
982 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
983
984 if(index<0){
985 index= st->nb_index_entries++;
986 ie= &entries[index];
987 assert(index==0 || ie[-1].timestamp < timestamp);
988 }else{
989 ie= &entries[index];
990 if(ie->timestamp != timestamp){
991 if(ie->timestamp <= timestamp)
992 return -1;
993 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
994 st->nb_index_entries++;
995 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
996 distance= ie->min_distance;
997 }
998
999 ie->pos = pos;
1000 ie->timestamp = timestamp;
1001 ie->min_distance= distance;
1002 ie->size= size;
1003 ie->flags = flags;
1004
1005 return index;
1006 }
1007
1008 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1009 int flags)
1010 {
1011 AVIndexEntry *entries= st->index_entries;
1012 int nb_entries= st->nb_index_entries;
1013 int a, b, m;
1014 int64_t timestamp;
1015
1016 a = - 1;
1017 b = nb_entries;
1018
1019 while (b - a > 1) {
1020 m = (a + b) >> 1;
1021 timestamp = entries[m].timestamp;
1022 if(timestamp >= wanted_timestamp)
1023 b = m;
1024 if(timestamp <= wanted_timestamp)
1025 a = m;
1026 }
1027 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1028
1029 if(!(flags & AVSEEK_FLAG_ANY)){
1030 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1031 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1032 }
1033 }
1034
1035 if(m == nb_entries)
1036 return -1;
1037 return m;
1038 }
1039
1040 #define DEBUG_SEEK
1041
1042 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1043 AVInputFormat *avif= s->iformat;
1044 int64_t pos_min, pos_max, pos, pos_limit;
1045 int64_t ts_min, ts_max, ts;
1046 int index;
1047 AVStream *st;
1048
1049 if (stream_index < 0)
1050 return -1;
1051
1052 #ifdef DEBUG_SEEK
1053 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1054 #endif
1055
1056 ts_max=
1057 ts_min= AV_NOPTS_VALUE;
1058 pos_limit= -1; //gcc falsely says it may be uninitialized
1059
1060 st= s->streams[stream_index];
1061 if(st->index_entries){
1062 AVIndexEntry *e;
1063
1064 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1065 index= FFMAX(index, 0);
1066 e= &st->index_entries[index];
1067
1068 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1069 pos_min= e->pos;
1070 ts_min= e->timestamp;
1071 #ifdef DEBUG_SEEK
1072 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1073 pos_min,ts_min);
1074 #endif
1075 }else{
1076 assert(index==0);
1077 }
1078
1079 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1080 assert(index < st->nb_index_entries);
1081 if(index >= 0){
1082 e= &st->index_entries[index];
1083 assert(e->timestamp >= target_ts);
1084 pos_max= e->pos;
1085 ts_max= e->timestamp;
1086 pos_limit= pos_max - e->min_distance;
1087 #ifdef DEBUG_SEEK
1088 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1089 pos_max,pos_limit, ts_max);
1090 #endif
1091 }
1092 }
1093
1094 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1095 if(pos<0)
1096 return -1;
1097
1098 /* do the seek */
1099 url_fseek(&s->pb, pos, SEEK_SET);
1100
1101 av_update_cur_dts(s, st, ts);
1102
1103 return 0;
1104 }
1105
1106 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1107 int64_t pos, ts;
1108 int64_t start_pos, filesize;
1109 int no_change;
1110
1111 #ifdef DEBUG_SEEK
1112 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1113 #endif
1114
1115 if(ts_min == AV_NOPTS_VALUE){
1116 pos_min = s->data_offset;
1117 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1118 if (ts_min == AV_NOPTS_VALUE)
1119 return -1;
1120 }
1121
1122 if(ts_max == AV_NOPTS_VALUE){
1123 int step= 1024;
1124 filesize = url_fsize(&s->pb);
1125 pos_max = filesize - 1;
1126 do{
1127 pos_max -= step;
1128 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1129 step += step;
1130 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1131 if (ts_max == AV_NOPTS_VALUE)
1132 return -1;
1133
1134 for(;;){
1135 int64_t tmp_pos= pos_max + 1;
1136 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1137 if(tmp_ts == AV_NOPTS_VALUE)
1138 break;
1139 ts_max= tmp_ts;
1140 pos_max= tmp_pos;
1141 if(tmp_pos >= filesize)
1142 break;
1143 }
1144 pos_limit= pos_max;
1145 }
1146
1147 if(ts_min > ts_max){
1148 return -1;
1149 }else if(ts_min == ts_max){
1150 pos_limit= pos_min;
1151 }
1152
1153 no_change=0;
1154 while (pos_min < pos_limit) {
1155 #ifdef DEBUG_SEEK
1156 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1157 pos_min, pos_max,
1158 ts_min, ts_max);
1159 #endif
1160 assert(pos_limit <= pos_max);
1161
1162 if(no_change==0){
1163 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1164 // interpolate position (better than dichotomy)
1165 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1166 + pos_min - approximate_keyframe_distance;
1167 }else if(no_change==1){
1168 // bisection, if interpolation failed to change min or max pos last time
1169 pos = (pos_min + pos_limit)>>1;
1170 }else{
1171 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1172 pos=pos_min;
1173 }
1174 if(pos <= pos_min)
1175 pos= pos_min + 1;
1176 else if(pos > pos_limit)
1177 pos= pos_limit;
1178 start_pos= pos;
1179
1180 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1181 if(pos == pos_max)
1182 no_change++;
1183 else
1184 no_change=0;
1185 #ifdef DEBUG_SEEK
1186 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1187 #endif
1188 if(ts == AV_NOPTS_VALUE){
1189 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1190 return -1;
1191 }
1192 assert(ts != AV_NOPTS_VALUE);
1193 if (target_ts <= ts) {
1194 pos_limit = start_pos - 1;
1195 pos_max = pos;
1196 ts_max = ts;
1197 }
1198 if (target_ts >= ts) {
1199 pos_min = pos;
1200 ts_min = ts;
1201 }
1202 }
1203
1204 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1205 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1206 #ifdef DEBUG_SEEK
1207 pos_min = pos;
1208 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1209 pos_min++;
1210 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1211 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1212 pos, ts_min, target_ts, ts_max);
1213 #endif
1214 *ts_ret= ts;
1215 return pos;
1216 }
1217
1218 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1219 int64_t pos_min, pos_max;
1220 #if 0
1221 AVStream *st;
1222
1223 if (stream_index < 0)
1224 return -1;
1225
1226 st= s->streams[stream_index];
1227 #endif
1228
1229 pos_min = s->data_offset;
1230 pos_max = url_fsize(&s->pb) - 1;
1231
1232 if (pos < pos_min) pos= pos_min;
1233 else if(pos > pos_max) pos= pos_max;
1234
1235 url_fseek(&s->pb, pos, SEEK_SET);
1236
1237 #if 0
1238 av_update_cur_dts(s, st, ts);
1239 #endif
1240 return 0;
1241 }
1242
1243 static int av_seek_frame_generic(AVFormatContext *s,
1244 int stream_index, int64_t timestamp, int flags)
1245 {
1246 int index;
1247 AVStream *st;
1248 AVIndexEntry *ie;
1249
1250 st = s->streams[stream_index];
1251
1252 index = av_index_search_timestamp(st, timestamp, flags);
1253
1254 if(index < 0 || index==st->nb_index_entries-1){
1255 int i;
1256 AVPacket pkt;
1257
1258 if(st->index_entries && st->nb_index_entries){
1259 ie= &st->index_entries[st->nb_index_entries-1];
1260 url_fseek(&s->pb, ie->pos, SEEK_SET);
1261 av_update_cur_dts(s, st, ie->timestamp);
1262 }else
1263 url_fseek(&s->pb, 0, SEEK_SET);
1264
1265 for(i=0;; i++) {
1266 int ret = av_read_frame(s, &pkt);
1267 if(ret<0)
1268 break;
1269 av_free_packet(&pkt);
1270 if(stream_index == pkt.stream_index){
1271 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1272 break;
1273 }
1274 }
1275 index = av_index_search_timestamp(st, timestamp, flags);
1276 }
1277 if (index < 0)
1278 return -1;
1279
1280 av_read_frame_flush(s);
1281 if (s->iformat->read_seek){
1282 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1283 return 0;
1284 }
1285 ie = &st->index_entries[index];
1286 url_fseek(&s->pb, ie->pos, SEEK_SET);
1287
1288 av_update_cur_dts(s, st, ie->timestamp);
1289
1290 return 0;
1291 }
1292
1293 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1294 {
1295 int ret;
1296 AVStream *st;
1297
1298 av_read_frame_flush(s);
1299
1300 if(flags & AVSEEK_FLAG_BYTE)
1301 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1302
1303 if(stream_index < 0){
1304 stream_index= av_find_default_stream_index(s);
1305 if(stream_index < 0)
1306 return -1;
1307
1308 st= s->streams[stream_index];
1309 /* timestamp for default must be expressed in AV_TIME_BASE units */
1310 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1311 }
1312 st= s->streams[stream_index];
1313
1314 /* first, we try the format specific seek */
1315 if (s->iformat->read_seek)
1316 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1317 else
1318 ret = -1;
1319 if (ret >= 0) {
1320 return 0;
1321 }
1322
1323 if(s->iformat->read_timestamp)
1324 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1325 else
1326 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1327 }
1328
1329 /*******************************************************/
1330
1331 /**
1332 * Returns TRUE if the stream has accurate timings in any stream.
1333 *
1334 * @return TRUE if the stream has accurate timings for at least one component.
1335 */
1336 static int av_has_timings(AVFormatContext *ic)
1337 {
1338 int i;
1339 AVStream *st;
1340
1341 for(i = 0;i < ic->nb_streams; i++) {
1342 st = ic->streams[i];
1343 if (st->start_time != AV_NOPTS_VALUE &&
1344 st->duration != AV_NOPTS_VALUE)
1345 return 1;
1346 }
1347 return 0;
1348 }
1349
1350 /**
1351 * Estimate the stream timings from the one of each components.
1352 *
1353 * Also computes the global bitrate if possible.
1354 */
1355 static void av_update_stream_timings(AVFormatContext *ic)
1356 {
1357 int64_t start_time, start_time1, end_time, end_time1;
1358 int i;
1359 AVStream *st;
1360
1361 start_time = INT64_MAX;
1362 end_time = INT64_MIN;
1363 for(i = 0;i < ic->nb_streams; i++) {
1364 st = ic->streams[i];
1365 if (st->start_time != AV_NOPTS_VALUE) {
1366 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1367 if (start_time1 < start_time)
1368 start_time = start_time1;
1369 if (st->duration != AV_NOPTS_VALUE) {
1370 end_time1 = start_time1
1371 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1372 if (end_time1 > end_time)
1373 end_time = end_time1;
1374 }
1375 }
1376 }
1377 if (start_time != INT64_MAX) {
1378 ic->start_time = start_time;
1379 if (end_time != INT64_MIN) {
1380 ic->duration = end_time - start_time;
1381 if (ic->file_size > 0) {
1382 /* compute the bit rate */
1383 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1384 (double)ic->duration;
1385 }
1386 }
1387 }
1388
1389 }
1390
1391 static void fill_all_stream_timings(AVFormatContext *ic)
1392 {
1393 int i;
1394 AVStream *st;
1395
1396 av_update_stream_timings(ic);
1397 for(i = 0;i < ic->nb_streams; i++) {
1398 st = ic->streams[i];
1399 if (st->start_time == AV_NOPTS_VALUE) {
1400 if(ic->start_time != AV_NOPTS_VALUE)
1401 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1402 if(ic->duration != AV_NOPTS_VALUE)
1403 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1404 }
1405 }
1406 }
1407
1408 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1409 {
1410 int64_t filesize, duration;
1411 int bit_rate, i;
1412 AVStream *st;
1413
1414 /* if bit_rate is already set, we believe it */
1415 if (ic->bit_rate == 0) {
1416 bit_rate = 0;
1417 for(i=0;i<ic->nb_streams;i++) {
1418 st = ic->streams[i];
1419 bit_rate += st->codec->bit_rate;
1420 }
1421 ic->bit_rate = bit_rate;
1422 }
1423
1424 /* if duration is already set, we believe it */
1425 if (ic->duration == AV_NOPTS_VALUE &&
1426 ic->bit_rate != 0 &&
1427 ic->file_size != 0) {
1428 filesize = ic->file_size;
1429 if (filesize > 0) {
1430 for(i = 0; i < ic->nb_streams; i++) {
1431 st = ic->streams[i];
1432 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1433 if (st->start_time == AV_NOPTS_VALUE ||
1434 st->duration == AV_NOPTS_VALUE) {
1435 st->start_time = 0;
1436 st->duration = duration;
1437 }
1438 }
1439 }
1440 }
1441 }
1442
1443 #define DURATION_MAX_READ_SIZE 250000
1444
1445 /* only usable for MPEG-PS streams */
1446 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1447 {
1448 AVPacket pkt1, *pkt = &pkt1;
1449 AVStream *st;
1450 int read_size, i, ret;
1451 int64_t end_time;
1452 int64_t filesize, offset, duration;
1453
1454 /* free previous packet */
1455 if (ic->cur_st && ic->cur_st->parser)
1456 av_free_packet(&ic->cur_pkt);
1457 ic->cur_st = NULL;
1458
1459 /* flush packet queue */
1460 flush_packet_queue(ic);
1461
1462 for(i=0;i<ic->nb_streams;i++) {
1463 st = ic->streams[i];
1464 if (st->parser) {
1465 av_parser_close(st->parser);
1466 st->parser= NULL;
1467 }
1468 }
1469
1470 /* we read the first packets to get the first PTS (not fully
1471 accurate, but it is enough now) */
1472 url_fseek(&ic->pb, 0, SEEK_SET);
1473 read_size = 0;
1474 for(;;) {
1475 if (read_size >= DURATION_MAX_READ_SIZE)
1476 break;
1477 /* if all info is available, we can stop */
1478 for(i = 0;i < ic->nb_streams; i++) {
1479 st = ic->streams[i];
1480 if (st->start_time == AV_NOPTS_VALUE)
1481 break;
1482 }
1483 if (i == ic->nb_streams)
1484 break;
1485
1486 ret = av_read_packet(ic, pkt);
1487 if (ret != 0)
1488 break;
1489 read_size += pkt->size;
1490 st = ic->streams[pkt->stream_index];
1491 if (pkt->pts != AV_NOPTS_VALUE) {
1492 if (st->start_time == AV_NOPTS_VALUE)
1493 st->start_time = pkt->pts;
1494 }
1495 av_free_packet(pkt);
1496 }
1497
1498 /* estimate the end time (duration) */
1499 /* XXX: may need to support wrapping */
1500 filesize = ic->file_size;
1501 offset = filesize - DURATION_MAX_READ_SIZE;
1502 if (offset < 0)
1503 offset = 0;
1504
1505 url_fseek(&ic->pb, offset, SEEK_SET);
1506 read_size = 0;
1507 for(;;) {
1508 if (read_size >= DURATION_MAX_READ_SIZE)
1509 break;
1510 /* if all info is available, we can stop */
1511 for(i = 0;i < ic->nb_streams; i++) {
1512 st = ic->streams[i];
1513 if (st->duration == AV_NOPTS_VALUE)
1514 break;
1515 }
1516 if (i == ic->nb_streams)
1517 break;
1518
1519 ret = av_read_packet(ic, pkt);
1520 if (ret != 0)
1521 break;
1522 read_size += pkt->size;
1523 st = ic->streams[pkt->stream_index];
1524 if (pkt->pts != AV_NOPTS_VALUE) {
1525 end_time = pkt->pts;
1526 duration = end_time - st->start_time;
1527 if (duration > 0) {
1528 if (st->duration == AV_NOPTS_VALUE ||
1529 st->duration < duration)
1530 st->duration = duration;
1531 }
1532 }
1533 av_free_packet(pkt);
1534 }
1535
1536 fill_all_stream_timings(ic);
1537
1538 url_fseek(&ic->pb, old_offset, SEEK_SET);
1539 }
1540
1541 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1542 {
1543 int64_t file_size;
1544
1545 /* get the file size, if possible */
1546 if (ic->iformat->flags & AVFMT_NOFILE) {
1547 file_size = 0;
1548 } else {
1549 file_size = url_fsize(&ic->pb);
1550 if (file_size < 0)
1551 file_size = 0;
1552 }
1553 ic->file_size = file_size;
1554
1555 if ((!strcmp(ic->iformat->name, "mpeg") ||
1556 !strcmp(ic->iformat->name, "mpegts")) &&
1557 file_size && !ic->pb.is_streamed) {
1558 /* get accurate estimate from the PTSes */
1559 av_estimate_timings_from_pts(ic, old_offset);
1560 } else if (av_has_timings(ic)) {
1561 /* at least one components has timings - we use them for all
1562 the components */
1563 fill_all_stream_timings(ic);
1564 } else {
1565 /* less precise: use bit rate info */
1566 av_estimate_timings_from_bit_rate(ic);
1567 }
1568 av_update_stream_timings(ic);
1569
1570 #if 0
1571 {
1572 int i;
1573 AVStream *st;
1574 for(i = 0;i < ic->nb_streams; i++) {
1575 st = ic->streams[i];
1576 printf("%d: start_time: %0.3f duration: %0.3f\n",
1577 i, (double)st->start_time / AV_TIME_BASE,
1578 (double)st->duration / AV_TIME_BASE);
1579 }
1580 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1581 (double)ic->start_time / AV_TIME_BASE,
1582 (double)ic->duration / AV_TIME_BASE,
1583 ic->bit_rate / 1000);
1584 }
1585 #endif
1586 }
1587
1588 static int has_codec_parameters(AVCodecContext *enc)
1589 {
1590 int val;
1591 switch(enc->codec_type) {
1592 case CODEC_TYPE_AUDIO:
1593 val = enc->sample_rate;
1594 break;
1595 case CODEC_TYPE_VIDEO:
1596 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1597 break;
1598 default:
1599 val = 1;
1600 break;
1601 }
1602 return (val != 0);
1603 }
1604
1605 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1606 {
1607 int16_t *samples;
1608 AVCodec *codec;
1609 int got_picture, data_size, ret=0;
1610 AVFrame picture;
1611
1612 if(!st->codec->codec){
1613 codec = avcodec_find_decoder(st->codec->codec_id);
1614 if (!codec)
1615 return -1;
1616 ret = avcodec_open(st->codec, codec);
1617 if (ret < 0)
1618 return ret;
1619 }
1620
1621 if(!has_codec_parameters(st->codec)){
1622 switch(st->codec->codec_type) {
1623 case CODEC_TYPE_VIDEO:
1624 ret = avcodec_decode_video(st->codec, &picture,
1625 &got_picture, (uint8_t *)data, size);
1626 break;
1627 case CODEC_TYPE_AUDIO:
1628 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1629 samples = av_malloc(data_size);
1630 if (!samples)
1631 goto fail;
1632 ret = avcodec_decode_audio2(st->codec, samples,
1633 &data_size, (uint8_t *)data, size);
1634 av_free(samples);
1635 break;
1636 default:
1637 break;
1638 }
1639 }
1640 fail:
1641 return ret;
1642 }
1643
1644 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
1645 {
1646 AVInputFormat *fmt;
1647 fmt = av_probe_input_format2(pd, 1, &score);
1648
1649 if (fmt) {
1650 if (strncmp(fmt->name, "mp3", 3) == 0)
1651 st->codec->codec_id = CODEC_ID_MP3;
1652 else if (strncmp(fmt->name, "ac3", 3) == 0)
1653 st->codec->codec_id = CODEC_ID_AC3;
1654 }
1655 return !!fmt;
1656 }
1657
1658 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1659 {
1660 while (tags->id != CODEC_ID_NONE) {
1661 if (tags->id == id)
1662 return tags->tag;
1663 tags++;
1664 }
1665 return 0;
1666 }
1667
1668 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1669 {
1670 int i;
1671 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1672 if(tag == tags[i].tag)
1673 return tags[i].id;
1674 }
1675 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1676 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1677 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1678 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1679 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1680 return tags[i].id;
1681 }
1682 return CODEC_ID_NONE;
1683 }
1684
1685 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1686 {
1687 int i;
1688 for(i=0; tags && tags[i]; i++){
1689 int tag= codec_get_tag(tags[i], id);
1690 if(tag) return tag;
1691 }
1692 return 0;
1693 }
1694
1695 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1696 {
1697 int i;
1698 for(i=0; tags && tags[i]; i++){
1699 enum CodecID id= codec_get_id(tags[i], tag);
1700 if(id!=CODEC_ID_NONE) return id;
1701 }
1702 return CODEC_ID_NONE;
1703 }
1704
1705 /* absolute maximum size we read until we abort */
1706 #define MAX_READ_SIZE 5000000
1707
1708 #define MAX_STD_TIMEBASES (60*12+5)
1709 static int get_std_framerate(int i){
1710 if(i<60*12) return i*1001;
1711 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1712 }
1713
1714 int av_find_stream_info(AVFormatContext *ic)
1715 {
1716 int i, count, ret, read_size, j;
1717 AVStream *st;
1718 AVPacket pkt1, *pkt;
1719 int64_t last_dts[MAX_STREAMS];
1720 int duration_count[MAX_STREAMS]={0};
1721 double (*duration_error)[MAX_STD_TIMEBASES];
1722 offset_t old_offset = url_ftell(&ic->pb);
1723 int64_t codec_info_duration[MAX_STREAMS]={0};
1724 int codec_info_nb_frames[MAX_STREAMS]={0};
1725 AVProbeData probe_data[MAX_STREAMS];
1726 int codec_identified[MAX_STREAMS]={0};
1727
1728 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1729 if (!duration_error) return AVERROR(ENOMEM);
1730
1731 for(i=0;i<ic->nb_streams;i++) {
1732 st = ic->streams[i];
1733 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1734 /* if(!st->time_base.num)
1735 st->time_base= */
1736 if(!st->codec->time_base.num)
1737 st->codec->time_base= st->time_base;
1738 }
1739 //only for the split stuff
1740 if (!st->parser) {
1741 st->parser = av_parser_init(st->codec->codec_id);
1742 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1743 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1744 }
1745 }
1746 }
1747
1748 for(i=0;i<MAX_STREAMS;i++){
1749 last_dts[i]= AV_NOPTS_VALUE;
1750 }
1751
1752 memset(probe_data, 0, sizeof(probe_data));
1753 count = 0;
1754 read_size = 0;
1755 for(;;) {
1756 /* check if one codec still needs to be handled */
1757 for(i=0;i<ic->nb_streams;i++) {
1758 st = ic->streams[i];
1759 if (!has_codec_parameters(st->codec))
1760 break;
1761 /* variable fps and no guess at the real fps */
1762 if( (st->codec->time_base.den >= 101LL*st->codec->time_base.num || st->codec->codec_id == CODEC_ID_MPEG2VIDEO)
1763 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1764 break;
1765 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1766 break;
1767 if (st->codec->codec_type == CODEC_TYPE_AUDIO &&
1768 st->codec->codec_id == CODEC_ID_NONE)
1769 break;
1770 }
1771 if (i == ic->nb_streams) {
1772 /* NOTE: if the format has no header, then we need to read
1773 some packets to get most of the streams, so we cannot
1774 stop here */
1775 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1776 /* if we found the info for all the codecs, we can stop */
1777 ret = count;
1778 break;
1779 }
1780 }
1781 /* we did not get all the codec info, but we read too much data */
1782 if (read_size >= MAX_READ_SIZE) {
1783 ret = count;
1784 break;
1785 }
1786
1787 /* NOTE: a new stream can be added there if no header in file
1788 (AVFMTCTX_NOHEADER) */
1789 ret = av_read_frame_internal(ic, &pkt1);
1790 if (ret < 0) {
1791 /* EOF or error */
1792 ret = -1; /* we could not have all the codec parameters before EOF */
1793 for(i=0;i<ic->nb_streams;i++) {
1794 st = ic->streams[i];
1795 if (!has_codec_parameters(st->codec)){
1796 char buf[256];
1797 avcodec_string(buf, sizeof(buf), st->codec, 0);
1798 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1799 } else {
1800 ret = 0;
1801 }
1802 }
1803 break;
1804 }
1805
1806 pkt= add_to_pktbuf(ic, &pkt1);
1807 if(av_dup_packet(pkt) < 0)
1808 return AVERROR(ENOMEM);
1809
1810 read_size += pkt->size;
1811
1812 st = ic->streams[pkt->stream_index];
1813 if(codec_info_nb_frames[st->index]>1)
1814 codec_info_duration[st->index] += pkt->duration;
1815 if (pkt->duration != 0)
1816 codec_info_nb_frames[st->index]++;
1817
1818 {
1819 int index= pkt->stream_index;
1820 int64_t last= last_dts[index];
1821 int64_t duration= pkt->dts - last;
1822
1823 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1824 double dur= duration * av_q2d(st->time_base);
1825
1826 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1827 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1828 if(duration_count[index] < 2)
1829 memset(duration_error, 0, MAX_STREAMS * sizeof(*duration_error));
1830 for(i=1; i<MAX_STD_TIMEBASES; i++){
1831 int framerate= get_std_framerate(i);
1832 int ticks= lrintf(dur*framerate/(1001*12));
1833 double error= dur - ticks*1001*12/(double)framerate;
1834 duration_error[index][i] += error*error;
1835 }
1836 duration_count[index]++;
1837 }
1838 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1839 last_dts[pkt->stream_index]= pkt->dts;
1840
1841 if (st->codec->codec_id == CODEC_ID_NONE) {
1842 AVProbeData *pd = &(probe_data[st->index]);
1843 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size);
1844 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
1845 pd->buf_size += pkt->size;
1846 }
1847 }
1848 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1849 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1850 if(i){
1851 st->codec->extradata_size= i;
1852 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1853 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1854 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1855 }
1856 }
1857
1858 /* if still no information, we try to open the codec and to
1859 decompress the frame. We try to avoid that in most cases as
1860 it takes longer and uses more memory. For MPEG4, we need to
1861 decompress for Quicktime. */
1862 if (!has_codec_parameters(st->codec) /*&&
1863 (st->codec->codec_id == CODEC_ID_FLV1 ||
1864 st->codec->codec_id == CODEC_ID_H264 ||
1865 st->codec->codec_id == CODEC_ID_H263 ||
1866 st->codec->codec_id == CODEC_ID_H261 ||
1867 st->codec->codec_id == CODEC_ID_VORBIS ||
1868 st->codec->codec_id == CODEC_ID_MJPEG ||
1869 st->codec->codec_id == CODEC_ID_PNG ||
1870 st->codec->codec_id == CODEC_ID_PAM ||
1871 st->codec->codec_id == CODEC_ID_PGM ||
1872 st->codec->codec_id == CODEC_ID_PGMYUV ||
1873 st->codec->codec_id == CODEC_ID_PBM ||
1874 st->codec->codec_id == CODEC_ID_PPM ||
1875 st->codec->codec_id == CODEC_ID_SHORTEN ||
1876 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1877 try_decode_frame(st, pkt->data, pkt->size);
1878
1879 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1880 break;
1881 }
1882 count++;
1883 }
1884
1885 // close codecs which where opened in try_decode_frame()
1886 for(i=0;i<ic->nb_streams;i++) {
1887 st = ic->streams[i];
1888 if(st->codec->codec)
1889 avcodec_close(st->codec);
1890 }
1891 for(i=0;i<ic->nb_streams;i++) {
1892 st = ic->streams[i];
1893 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1894 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1895 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1896
1897 if(duration_count[i]
1898 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&&
1899 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1900 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
1901 double best_error= 2*av_q2d(st->time_base);
1902 best_error= best_error*best_error*duration_count[i]*1000*12*30;
1903
1904 for(j=1; j<MAX_STD_TIMEBASES; j++){
1905 double error= duration_error[i][j] * get_std_framerate(j);
1906 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1907 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
1908 if(error < best_error){
1909 best_error= error;
1910 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
1911 }
1912 }
1913 }
1914
1915 if (!st->r_frame_rate.num){
1916 if( st->codec->time_base.den * (int64_t)st->time_base.num
1917 <= st->codec->time_base.num * (int64_t)st->time_base.den){
1918 st->r_frame_rate.num = st->codec->time_base.den;
1919 st->r_frame_rate.den = st->codec->time_base.num;
1920 }else{
1921 st->r_frame_rate.num = st->time_base.den;
1922 st->r_frame_rate.den = st->time_base.num;
1923 }
1924 }
1925 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
1926 if (st->codec->codec_id == CODEC_ID_NONE && probe_data[st->index].buf_size > 0) {
1927 codec_identified[st->index] = set_codec_from_probe_data(st, &(probe_data[st->index]), 1);
1928 if (codec_identified[st->index]) {
1929 st->need_parsing = AVSTREAM_PARSE_FULL;
1930 }
1931 }
1932 if(!st->codec->bits_per_sample)
1933 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
1934 }
1935 }
1936
1937 av_estimate_timings(ic, old_offset);
1938
1939 for(i=0;i<ic->nb_streams;i++) {
1940 st = ic->streams[i];
1941 if (codec_identified[st->index]) {
1942 av_read_frame_flush(ic);
1943 av_seek_frame(ic, st->index, 0.0, 0);
1944 url_fseek(&ic->pb, ic->data_offset, SEEK_SET);
1945 }
1946 }
1947
1948 #if 0
1949 /* correct DTS for b frame streams with no timestamps */
1950 for(i=0;i<ic->nb_streams;i++) {
1951 st = ic->streams[i];
1952 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1953 if(b-frames){
1954 ppktl = &ic->packet_buffer;
1955 while(ppkt1){
1956 if(ppkt1->stream_index != i)
1957 continue;
1958 if(ppkt1->pkt->dts < 0)
1959 break;
1960 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1961 break;
1962 ppkt1->pkt->dts -= delta;
1963 ppkt1= ppkt1->next;
1964 }
1965 if(ppkt1)
1966 continue;
1967 st->cur_dts -= delta;
1968 }
1969 }
1970 }
1971 #endif
1972
1973 av_free(duration_error);
1974 for(i=0;i<MAX_STREAMS;i++){
1975 av_freep(&(probe_data[i].buf));
1976 }
1977
1978 return ret;
1979 }
1980
1981 /*******************************************************/
1982
1983 int av_read_play(AVFormatContext *s)
1984 {
1985 if (!s->iformat->read_play)
1986 return AVERROR(ENOSYS);
1987 return s->iformat->read_play(s);
1988 }
1989
1990 int av_read_pause(AVFormatContext *s)
1991 {
1992 if (!s->iformat->read_pause)
1993 return AVERROR(ENOSYS);
1994 return s->iformat->read_pause(s);
1995 }
1996
1997 void av_close_input_file(AVFormatContext *s)
1998 {
1999 int i, must_open_file;
2000 AVStream *st;
2001
2002 /* free previous packet */
2003 if (s->cur_st && s->cur_st->parser)
2004 av_free_packet(&s->cur_pkt);
2005
2006 if (s->iformat->read_close)
2007 s->iformat->read_close(s);
2008 for(i=0;i<s->nb_streams;i++) {
2009 /* free all data in a stream component */
2010 st = s->streams[i];
2011 if (st->parser) {
2012 av_parser_close(st->parser);
2013 }
2014 av_free(st->index_entries);
2015 av_free(st->codec->extradata);
2016 av_free(st->codec);
2017 av_free(st);
2018 }
2019 flush_packet_queue(s);
2020 must_open_file = 1;
2021 if (s->iformat->flags & AVFMT_NOFILE) {
2022 must_open_file = 0;
2023 }
2024 if (must_open_file) {
2025 url_fclose(&s->pb);
2026 }
2027 av_freep(&s->priv_data);
2028 av_free(s);
2029 }
2030
2031 AVStream *av_new_stream(AVFormatContext *s, int id)
2032 {
2033 AVStream *st;
2034 int i;
2035
2036 if (s->nb_streams >= MAX_STREAMS)
2037 return NULL;
2038
2039 st = av_mallocz(sizeof(AVStream));
2040 if (!st)
2041 return NULL;
2042
2043 st->codec= avcodec_alloc_context();
2044 if (s->iformat) {
2045 /* no default bitrate if decoding */
2046 st->codec->bit_rate = 0;
2047 }
2048 st->index = s->nb_streams;
2049 st->id = id;
2050 st->start_time = AV_NOPTS_VALUE;
2051 st->duration = AV_NOPTS_VALUE;
2052 st->cur_dts = AV_NOPTS_VALUE;
2053
2054 /* default pts settings is MPEG like */
2055 av_set_pts_info(st, 33, 1, 90000);
2056 st->last_IP_pts = AV_NOPTS_VALUE;
2057 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2058 st->pts_buffer[i]= AV_NOPTS_VALUE;
2059
2060 s->streams[s->nb_streams++] = st;
2061 return st;
2062 }
2063
2064 /************************************************************/
2065 /* output media file */
2066
2067 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2068 {
2069 int ret;
2070
2071 if (s->oformat->priv_data_size > 0) {
2072 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2073 if (!s->priv_data)
2074 return AVERROR(ENOMEM);
2075 } else
2076 s->priv_data = NULL;
2077
2078 if (s->oformat->set_parameters) {
2079 ret = s->oformat->set_parameters(s, ap);
2080 if (ret < 0)
2081 return ret;
2082 }
2083 return 0;
2084 }
2085
2086 int av_write_header(AVFormatContext *s)
2087 {
2088 int ret, i;
2089 AVStream *st;
2090
2091 // some sanity checks
2092 for(i=0;i<s->nb_streams;i++) {
2093 st = s->streams[i];
2094
2095 switch (st->codec->codec_type) {
2096 case CODEC_TYPE_AUDIO:
2097 if(st->codec->sample_rate<=0){
2098 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2099 return -1;
2100 }
2101 break;
2102 case CODEC_TYPE_VIDEO:
2103 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2104 av_log(s, AV_LOG_ERROR, "time base not set\n");
2105 return -1;
2106 }
2107 if(st->codec->width<=0 || st->codec->height<=0){
2108 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2109 return -1;
2110 }
2111 break;
2112 }
2113
2114 if(s->oformat->codec_tag){
2115 if(st->codec->codec_tag){
2116 //FIXME
2117 //check that tag + id is in the table
2118 //if neither is in the table -> ok
2119 //if tag is in the table with another id -> FAIL
2120 //if id is in the table with another tag -> FAIL unless strict < ?
2121 }else
2122 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2123 }
2124 }
2125
2126 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2127 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2128 if (!s->priv_data)
2129 return AVERROR(ENOMEM);
2130 }
2131
2132 if(s->oformat->write_header){
2133 ret = s->oformat->write_header(s);
2134 if (ret < 0)
2135 return ret;
2136 }
2137
2138 /* init PTS generation */
2139 for(i=0;i<s->nb_streams;i++) {
2140 int64_t den = AV_NOPTS_VALUE;
2141 st = s->streams[i];
2142
2143 switch (st->codec->codec_type) {
2144 case CODEC_TYPE_AUDIO:
2145 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2146 break;
2147 case CODEC_TYPE_VIDEO:
2148 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2149 break;
2150 default:
2151 break;
2152 }
2153 if (den != AV_NOPTS_VALUE) {
2154 if (den <= 0)
2155 return AVERROR_INVALIDDATA;
2156 av_frac_init(&st->pts, 0, 0, den);
2157 }
2158 }
2159 return 0;
2160 }
2161
2162 //FIXME merge with compute_pkt_fields
2163 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2164 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2165 int num, den, frame_size, i;
2166
2167 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2168
2169 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2170 return -1;*/
2171
2172 /* duration field */
2173 if (pkt->duration == 0) {
2174 compute_frame_duration(&num, &den, st, NULL, pkt);
2175 if (den && num) {
2176 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2177 }
2178 }
2179
2180 //XXX/FIXME this is a temporary hack until all encoders output pts
2181 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2182 pkt->dts=
2183 // pkt->pts= st->cur_dts;
2184 pkt->pts= st->pts.val;
2185 }
2186
2187 //calculate dts from pts
2188 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2189 st->pts_buffer[0]= pkt->pts;
2190 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2191 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2192 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2193 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2194
2195 pkt->dts= st->pts_buffer[0];
2196 }
2197
2198 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2199 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2200 return -1;
2201 }
2202 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2203 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2204 return -1;
2205 }
2206
2207 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2208 st->cur_dts= pkt->dts;
2209 st->pts.val= pkt->dts;
2210
2211 /* update pts */
2212 switch (st->codec->codec_type) {
2213 case CODEC_TYPE_AUDIO:
2214 frame_size = get_audio_frame_size(st->codec, pkt->size);
2215
2216 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2217 but it would be better if we had the real timestamps from the encoder */
2218 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2219 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2220 }
2221 break;
2222 case CODEC_TYPE_VIDEO:
2223 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2224 break;
2225 default:
2226 break;
2227 }
2228 return 0;
2229 }
2230
2231 static void truncate_ts(AVStream *st, AVPacket *pkt){
2232 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2233
2234 // if(pkt->dts < 0)
2235 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2236
2237 if (pkt->pts != AV_NOPTS_VALUE)
2238 pkt->pts &= pts_mask;
2239 if (pkt->dts != AV_NOPTS_VALUE)
2240 pkt->dts &= pts_mask;
2241 }
2242
2243 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2244 {
2245 int ret;
2246
2247 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2248 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2249 return ret;
2250
2251 truncate_ts(s->streams[pkt->stream_index], pkt);
2252
2253 ret= s->oformat->write_packet(s, pkt);
2254 if(!ret)
2255 ret= url_ferror(&s->pb);
2256 return ret;
2257 }
2258
2259 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2260 AVPacketList *pktl, **next_point, *this_pktl;
2261 int stream_count=0;
2262 int streams[MAX_STREAMS];
2263
2264 if(pkt){
2265 AVStream *st= s->streams[ pkt->stream_index];
2266
2267 // assert(pkt->destruct != av_destruct_packet); //FIXME
2268
2269 this_pktl = av_mallocz(sizeof(AVPacketList));
2270 this_pktl->pkt= *pkt;
2271 if(pkt->destruct == av_destruct_packet)
2272 pkt->destruct= NULL; // non shared -> must keep original from being freed
2273 else
2274 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2275
2276 next_point = &s->packet_buffer;
2277 while(*next_point){
2278 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2279 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2280 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2281 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2282 break;
2283 next_point= &(*next_point)->next;
2284 }
2285 this_pktl->next= *next_point;
2286 *next_point= this_pktl;
2287 }
2288
2289 memset(streams, 0, sizeof(streams));
2290 pktl= s->packet_buffer;
2291 while(pktl){
2292 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2293 if(streams[ pktl->pkt.stream_index ] == 0)
2294 stream_count++;
2295 streams[ pktl->pkt.stream_index ]++;
2296 pktl= pktl->next;
2297 }
2298
2299 if(s->nb_streams == stream_count || (flush && stream_count)){
2300 pktl= s->packet_buffer;
2301 *out= pktl->pkt;
2302
2303 s->packet_buffer= pktl->next;
2304 av_freep(&pktl);
2305 return 1;
2306 }else{
2307 av_init_packet(out);
2308 return 0;
2309 }
2310 }
2311
2312 /**
2313 * Interleaves a AVPacket correctly so it can be muxed.
2314 * @param out the interleaved packet will be output here
2315 * @param in the input packet
2316 * @param flush 1 if no further packets are available as input and all
2317 * remaining packets should be output
2318 * @return 1 if a packet was output, 0 if no packet could be output,
2319 * < 0 if an error occured
2320 */
2321 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2322 if(s->oformat->interleave_packet)
2323 return s->oformat->interleave_packet(s, out, in, flush);
2324 else
2325 return av_interleave_packet_per_dts(s, out, in, flush);
2326 }
2327
2328 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2329 AVStream *st= s->streams[ pkt->stream_index];
2330
2331 //FIXME/XXX/HACK drop zero sized packets
2332 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2333 return 0;
2334
2335 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2336 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2337 return -1;
2338
2339 if(pkt->dts == AV_NOPTS_VALUE)
2340 return -1;
2341
2342 for(;;){
2343 AVPacket opkt;
2344 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2345 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2346 return ret;
2347
2348 truncate_ts(s->streams[opkt.stream_index], &opkt);
2349 ret= s->oformat->write_packet(s, &opkt);
2350
2351 av_free_packet(&opkt);
2352 pkt= NULL;
2353
2354 if(ret<0)
2355 return ret;
2356 if(url_ferror(&s->pb))
2357 return url_ferror(&s->pb);
2358 }
2359 }
2360
2361 int av_write_trailer(AVFormatContext *s)
2362 {
2363 int ret, i;
2364
2365 for(;;){
2366 AVPacket pkt;
2367 ret= av_interleave_packet(s, &pkt, NULL, 1);
2368 if(ret<0) //FIXME cleanup needed for ret<0 ?
2369 goto fail;
2370 if(!ret)
2371 break;
2372
2373 truncate_ts(s->streams[pkt.stream_index], &pkt);
2374 ret= s->oformat->write_packet(s, &pkt);
2375
2376 av_free_packet(&pkt);
2377
2378 if(ret<0)
2379 goto fail;
2380 if(url_ferror(&s->pb))
2381 goto fail;
2382 }
2383
2384 if(s->oformat->write_trailer)
2385 ret = s->oformat->write_trailer(s);
2386 fail:
2387 if(ret == 0)
2388 ret=url_ferror(&s->pb);
2389 for(i=0;i<s->nb_streams;i++)
2390 av_freep(&s->streams[i]->priv_data);
2391 av_freep(&s->priv_data);
2392 return ret;
2393 }
2394
2395 /* "user interface" functions */
2396
2397 void dump_format(AVFormatContext *ic,
2398 int index,
2399 const char *url,
2400 int is_output)
2401 {
2402 int i, flags;
2403 char buf[256];
2404
2405 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2406 is_output ? "Output" : "Input",
2407 index,
2408 is_output ? ic->oformat->name : ic->iformat->name,
2409 is_output ? "to" : "from", url);
2410 if (!is_output) {
2411 av_log(NULL, AV_LOG_INFO, " Duration: ");
2412 if (ic->duration != AV_NOPTS_VALUE) {
2413 int hours, mins, secs, us;
2414 secs = ic->duration / AV_TIME_BASE;
2415 us = ic->duration % AV_TIME_BASE;
2416 mins = secs / 60;
2417 secs %= 60;
2418 hours = mins / 60;
2419 mins %= 60;
2420 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2421 (10 * us) / AV_TIME_BASE);
2422 } else {
2423 av_log(NULL, AV_LOG_INFO, "N/A");
2424 }
2425 if (ic->start_time != AV_NOPTS_VALUE) {
2426 int secs, us;
2427 av_log(NULL, AV_LOG_INFO, ", start: ");
2428 secs = ic->start_time / AV_TIME_BASE;
2429 us = ic->start_time % AV_TIME_BASE;
2430 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2431 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2432 }
2433 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2434 if (ic->bit_rate) {
2435 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2436 } else {
2437 av_log(NULL, AV_LOG_INFO, "N/A");
2438 }
2439 av_log(NULL, AV_LOG_INFO, "\n");
2440 }
2441 for(i=0;i<ic->nb_streams;i++) {
2442 AVStream *st = ic->streams[i];
2443 int g= ff_gcd(st->time_base.num, st->time_base.den);
2444 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2445 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2446 /* the pid is an important information, so we display it */
2447 /* XXX: add a generic system */
2448 if (is_output)
2449 flags = ic->oformat->flags;
2450 else
2451 flags = ic->iformat->flags;
2452 if (flags & AVFMT_SHOW_IDS) {
2453 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2454 }
2455 if (strlen(st->language) > 0) {
2456 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2457 }
2458 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2459 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2460 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2461 if(st->r_frame_rate.den && st->r_frame_rate.num)
2462 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2463 /* else if(st->time_base.den && st->time_base.num)
2464 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2465 else
2466 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2467 }
2468 av_log(NULL, AV_LOG_INFO, "\n");
2469 }
2470 }
2471
2472 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2473 {
2474 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2475 }
2476
2477 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2478 {
2479 AVRational frame_rate;
2480 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2481 *frame_rate_num= frame_rate.num;
2482 *frame_rate_den= frame_rate.den;
2483 return ret;
2484 }
2485
2486 /**
2487 * gets the current time in micro seconds.
2488 */
2489 int64_t av_gettime(void)
2490 {
2491 struct timeval tv;
2492 gettimeofday(&tv,NULL);
2493 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2494 }
2495
2496 int64_t parse_date(const char *datestr, int duration)
2497 {
2498 const char *p;
2499 int64_t t;
2500 struct tm dt;
2501 int i;
2502 static const char *date_fmt[] = {
2503 "%Y-%m-%d",
2504 "%Y%m%d",
2505 };
2506 static const char *time_fmt[] = {
2507 "%H:%M:%S",
2508 "%H%M%S",
2509 };
2510 const char *q;
2511 int is_utc, len;
2512 char lastch;
2513 int negative = 0;
2514
2515 #undef time
2516 time_t now = time(0);
2517
2518 len = strlen(datestr);
2519 if (len > 0)
2520 lastch = datestr[len - 1];
2521 else
2522 lastch = '\0';
2523 is_utc = (lastch == 'z' || lastch == 'Z');
2524
2525 memset(&dt, 0, sizeof(dt));
2526
2527 p = datestr;
2528 q = NULL;
2529 if (!duration) {
2530 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2531 q = small_strptime(p, date_fmt[i], &dt);
2532 if (q) {
2533 break;
2534 }
2535 }
2536
2537 if (!q) {
2538 if (is_utc) {
2539 dt = *gmtime(&now);
2540 } else {
2541 dt = *localtime(&now);
2542 }
2543 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2544 } else {
2545 p = q;
2546 }
2547
2548 if (*p == 'T' || *p == 't' || *p == ' ')
2549 p++;
2550
2551 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2552 q = small_strptime(p, time_fmt[i], &dt);
2553 if (q) {
2554 break;
2555 }
2556 }
2557 } else {
2558 if (p[0] == '-') {
2559 negative = 1;
2560 ++p;
2561 }
2562 q = small_strptime(p, time_fmt[0], &dt);
2563 if (!q) {
2564 dt.tm_sec = strtol(p, (char **)&q, 10);
2565 dt.tm_min = 0;
2566 dt.tm_hour = 0;
2567 }
2568 }
2569
2570 /* Now we have all the fields that we can get */
2571 if (!q) {
2572 if (duration)
2573 return 0;
2574 else
2575 return now * INT64_C(1000000);
2576 }
2577
2578 if (duration) {
2579 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2580 } else {
2581 dt.tm_isdst = -1; /* unknown */
2582 if (is_utc) {
2583 t = mktimegm(&dt);
2584 } else {
2585 t = mktime(&dt);
2586 }
2587 }
2588
2589 t *= 1000000;
2590
2591 if (*q == '.') {
2592 int val, n;
2593 q++;
2594 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2595 if (!isdigit(*q))
2596 break;
2597 val += n * (*q - '0');
2598 }
2599 t += val;
2600 }
2601 return negative ? -t : t;
2602 }
2603
2604 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2605 {
2606 const char *p;
2607 char tag[128], *q;
2608
2609 p = info;
2610 if (*p == '?')
2611 p++;
2612 for(;;) {
2613 q = tag;
2614 while (*p != '\0' && *p != '=' && *p != '&') {
2615 if ((q - tag) < sizeof(tag) - 1)
2616 *q++ = *p;
2617 p++;
2618 }
2619 *q = '\0';
2620 q = arg;
2621 if (*p == '=') {
2622 p++;
2623 while (*p != '&' && *p != '\0') {
2624 if ((q - arg) < arg_size - 1) {
2625 if (*p == '+')
2626 *q++ = ' ';
2627 else
2628 *q++ = *p;
2629 }
2630 p++;
2631 }
2632 *q = '\0';
2633 }
2634 if (!strcmp(tag, tag1))
2635 return 1;
2636 if (*p != '&')
2637 break;
2638 p++;
2639 }
2640 return 0;
2641 }
2642
2643 int av_get_frame_filename(char *buf, int buf_size,
2644 const char *path, int number)
2645 {
2646 const char *p;
2647 char *q, buf1[20], c;
2648 int nd, len, percentd_found;
2649
2650 q = buf;
2651 p = path;
2652 percentd_found = 0;
2653 for(;;) {
2654 c = *p++;
2655 if (c == '\0')
2656 break;
2657 if (c == '%') {
2658 do {
2659 nd = 0;
2660 while (isdigit(*p)) {
2661 nd = nd * 10 + *p++ - '0';
2662 }
2663 c = *p++;
2664 } while (isdigit(c));
2665
2666 switch(c) {
2667 case '%':
2668 goto addchar;
2669 case 'd':
2670 if (percentd_found)
2671 goto fail;
2672 percentd_found = 1;
2673 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2674 len = strlen(buf1);
2675 if ((q - buf + len) > buf_size - 1)
2676 goto fail;
2677 memcpy(q, buf1, len);
2678 q += len;
2679 break;
2680 default:
2681 goto fail;
2682 }
2683 } else {
2684 addchar:
2685 if ((q - buf) < buf_size - 1)
2686 *q++ = c;
2687 }
2688 }
2689 if (!percentd_found)
2690 goto fail;
2691 *q = '\0';
2692 return 0;
2693 fail:
2694 *q = '\0';
2695 return -1;
2696 }
2697
2698 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
2699 {
2700 int len, i, j, c;
2701 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2702
2703 for(i=0;i<size;i+=16) {
2704 len = size - i;
2705 if (len > 16)
2706 len = 16;
2707 PRINT("%08x ", i);
2708 for(j=0;j<16;j++) {
2709 if (j < len)
2710 PRINT(" %02x", buf[i+j]);
2711 else
2712 PRINT(" ");
2713 }
2714 PRINT(" ");
2715 for(j=0;j<len;j++) {
2716 c = buf[i+j];
2717 if (c < ' ' || c > '~')
2718 c = '.';
2719 PRINT("%c", c);
2720 }
2721 PRINT("\n");
2722 }
2723 #undef PRINT
2724 }
2725
2726 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2727 {
2728 hex_dump_internal(NULL, f, 0, buf, size);
2729 }
2730
2731 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
2732 {
2733 hex_dump_internal(avcl, NULL, level, buf, size);
2734 }
2735
2736 //FIXME needs to know the time_base
2737 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
2738 {
2739 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2740 PRINT("stream #%d:\n", pkt->stream_index);
2741 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2742 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2743 /* DTS is _always_ valid after av_read_frame() */
2744 PRINT(" dts=");
2745 if (pkt->dts == AV_NOPTS_VALUE)
2746 PRINT("N/A");
2747 else
2748 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
2749 /* PTS may be not known if B frames are present */
2750 PRINT(" pts=");
2751 if (pkt->pts == AV_NOPTS_VALUE)
2752 PRINT("N/A");
2753 else
2754 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
2755 PRINT("\n");
2756 PRINT(" size=%d\n", pkt->size);
2757 #undef PRINT
2758 if (dump_payload)
2759 av_hex_dump(f, pkt->data, pkt->size);
2760 }
2761
2762 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2763 {
2764 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
2765 }
2766
2767 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
2768 {
2769 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
2770 }
2771
2772 void url_split(char *proto, int proto_size,
2773 char *authorization, int authorization_size,
2774 char *hostname, int hostname_size,
2775 int *port_ptr,
2776 char *path, int path_size,
2777 const char *url)
2778 {
2779 const char *p;
2780 char *q;
2781 int port;
2782
2783 port = -1;
2784
2785 p = url;
2786 q = proto;
2787 while (*p != ':' && *p != '\0') {
2788 if ((q - proto) < proto_size - 1)
2789 *q++ = *p;
2790 p++;
2791 }
2792 if (proto_size > 0)
2793 *q = '\0';
2794 if (authorization_size > 0)
2795 authorization[0] = '\0';
2796 if (*p == '\0') {
2797 if (proto_size > 0)
2798 proto[0] = '\0';
2799 if (hostname_size > 0)
2800 hostname[0] = '\0';
2801 p = url;
2802 } else {
2803 char *at,*slash; // PETR: position of '@' character and '/' character
2804
2805 p++;
2806 if (*p == '/')
2807 p++;
2808 if (*p == '/')
2809 p++;
2810 at = strchr(p,'@'); // PETR: get the position of '@'
2811 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2812 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2813
2814 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2815
2816 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2817 if (*p == '@') { // PETR: passed '@'
2818 if (authorization_size > 0)
2819 *q = '\0';
2820 q = hostname;
2821 at = NULL;
2822 } else if (!at) { // PETR: hostname
2823 if ((q - hostname) < hostname_size - 1)
2824 *q++ = *p;
2825 } else {
2826 if ((q - authorization) < authorization_size - 1)
2827 *q++ = *p;
2828 }
2829 p++;
2830 }
2831 if (hostname_size > 0)
2832 *q = '\0';
2833 if (*p == ':') {
2834 p++;
2835 port = strtoul(p, (char **)&p, 10);
2836 }
2837 }
2838 if (port_ptr)
2839 *port_ptr = port;
2840 av_strlcpy(path, p, path_size);
2841 }
2842
2843 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2844 int pts_num, int pts_den)
2845 {
2846 s->pts_wrap_bits = pts_wrap_bits;
2847 s->time_base.num = pts_num;
2848 s->time_base.den = pts_den;
2849 }
2850
2851 /* fraction handling */
2852
2853 /**
2854 * f = val + (num / den) + 0.5.
2855 *
2856 * 'num' is normalized so that it is such as 0 <= num < den.
2857 *
2858 * @param f fractional number
2859 * @param val integer value
2860 * @param num must be >= 0
2861 * @param den must be >= 1
2862 */
2863 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2864 {
2865 num += (den >> 1);
2866 if (num >= den) {
2867 val += num / den;
2868 num = num % den;
2869 }
2870 f->val = val;
2871 f->num = num;
2872 f->den = den;
2873 }
2874
2875 /**
2876 * Fractionnal addition to f: f = f + (incr / f->den).
2877 *
2878 * @param f fractional number
2879 * @param incr increment, can be positive or negative
2880 */
2881 static void av_frac_add(AVFrac *f, int64_t incr)
2882 {
2883 int64_t num, den;
2884
2885 num = f->num + incr;
2886 den = f->den;
2887 if (num < 0) {
2888 f->val += num / den;
2889 num = num % den;
2890 if (num < 0) {
2891 num += den;
2892 f->val--;
2893 }
2894 } else if (num >= den) {
2895 f->val += num / den;
2896 num = num % den;
2897 }
2898 f->num = num;
2899 }