30a082720efa66b2e252155ba0e21e7d6b94788a
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "allformats.h"
23 #include "opt.h"
24
25 #undef NDEBUG
26 #include <assert.h>
27
28 /**
29 * @file libavformat/utils.c
30 * Various utility functions for using ffmpeg library.
31 */
32
33 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
34 static void av_frac_add(AVFrac *f, int64_t incr);
35
36 /** head of registered input format linked list. */
37 AVInputFormat *first_iformat = NULL;
38 /** head of registered output format linked list. */
39 AVOutputFormat *first_oformat = NULL;
40
41 void av_register_input_format(AVInputFormat *format)
42 {
43 AVInputFormat **p;
44 p = &first_iformat;
45 while (*p != NULL) p = &(*p)->next;
46 *p = format;
47 format->next = NULL;
48 }
49
50 void av_register_output_format(AVOutputFormat *format)
51 {
52 AVOutputFormat **p;
53 p = &first_oformat;
54 while (*p != NULL) p = &(*p)->next;
55 *p = format;
56 format->next = NULL;
57 }
58
59 int match_ext(const char *filename, const char *extensions)
60 {
61 const char *ext, *p;
62 char ext1[32], *q;
63
64 if(!filename)
65 return 0;
66
67 ext = strrchr(filename, '.');
68 if (ext) {
69 ext++;
70 p = extensions;
71 for(;;) {
72 q = ext1;
73 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
74 *q++ = *p++;
75 *q = '\0';
76 if (!strcasecmp(ext1, ext))
77 return 1;
78 if (*p == '\0')
79 break;
80 p++;
81 }
82 }
83 return 0;
84 }
85
86 AVOutputFormat *guess_format(const char *short_name, const char *filename,
87 const char *mime_type)
88 {
89 AVOutputFormat *fmt, *fmt_found;
90 int score_max, score;
91
92 /* specific test for image sequences */
93 #ifdef CONFIG_IMAGE2_MUXER
94 if (!short_name && filename &&
95 av_filename_number_test(filename) &&
96 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
97 return guess_format("image2", NULL, NULL);
98 }
99 #endif
100 /* find the proper file type */
101 fmt_found = NULL;
102 score_max = 0;
103 fmt = first_oformat;
104 while (fmt != NULL) {
105 score = 0;
106 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
107 score += 100;
108 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
109 score += 10;
110 if (filename && fmt->extensions &&
111 match_ext(filename, fmt->extensions)) {
112 score += 5;
113 }
114 if (score > score_max) {
115 score_max = score;
116 fmt_found = fmt;
117 }
118 fmt = fmt->next;
119 }
120 return fmt_found;
121 }
122
123 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
124 const char *mime_type)
125 {
126 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
127
128 if (fmt) {
129 AVOutputFormat *stream_fmt;
130 char stream_format_name[64];
131
132 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
133 stream_fmt = guess_format(stream_format_name, NULL, NULL);
134
135 if (stream_fmt)
136 fmt = stream_fmt;
137 }
138
139 return fmt;
140 }
141
142 /**
143 * Guesses the codec id based upon muxer and filename.
144 */
145 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
146 const char *filename, const char *mime_type, enum CodecType type){
147 if(type == CODEC_TYPE_VIDEO){
148 enum CodecID codec_id= CODEC_ID_NONE;
149
150 #ifdef CONFIG_IMAGE2_MUXER
151 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
152 codec_id= av_guess_image2_codec(filename);
153 }
154 #endif
155 if(codec_id == CODEC_ID_NONE)
156 codec_id= fmt->video_codec;
157 return codec_id;
158 }else if(type == CODEC_TYPE_AUDIO)
159 return fmt->audio_codec;
160 else
161 return CODEC_ID_NONE;
162 }
163
164 /**
165 * finds AVInputFormat based on input format's short name.
166 */
167 AVInputFormat *av_find_input_format(const char *short_name)
168 {
169 AVInputFormat *fmt;
170 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
171 if (!strcmp(fmt->name, short_name))
172 return fmt;
173 }
174 return NULL;
175 }
176
177 /* memory handling */
178
179 /**
180 * Default packet destructor.
181 */
182 void av_destruct_packet(AVPacket *pkt)
183 {
184 av_free(pkt->data);
185 pkt->data = NULL; pkt->size = 0;
186 }
187
188 /**
189 * Allocate the payload of a packet and intialized its fields to default values.
190 *
191 * @param pkt packet
192 * @param size wanted payload size
193 * @return 0 if OK. AVERROR_xxx otherwise.
194 */
195 int av_new_packet(AVPacket *pkt, int size)
196 {
197 uint8_t *data;
198 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
199 return AVERROR_NOMEM;
200 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
201 if (!data)
202 return AVERROR_NOMEM;
203 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
204
205 av_init_packet(pkt);
206 pkt->data = data;
207 pkt->size = size;
208 pkt->destruct = av_destruct_packet;
209 return 0;
210 }
211
212 /**
213 * Allocate and read the payload of a packet and intialized its fields to default values.
214 *
215 * @param pkt packet
216 * @param size wanted payload size
217 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
218 */
219 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
220 {
221 int ret= av_new_packet(pkt, size);
222
223 if(ret<0)
224 return ret;
225
226 pkt->pos= url_ftell(s);
227
228 ret= get_buffer(s, pkt->data, size);
229 if(ret<=0)
230 av_free_packet(pkt);
231 else
232 pkt->size= ret;
233
234 return ret;
235 }
236
237 /* This is a hack - the packet memory allocation stuff is broken. The
238 packet is allocated if it was not really allocated */
239 int av_dup_packet(AVPacket *pkt)
240 {
241 if (pkt->destruct != av_destruct_packet) {
242 uint8_t *data;
243 /* we duplicate the packet and don't forget to put the padding
244 again */
245 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
246 return AVERROR_NOMEM;
247 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
248 if (!data) {
249 return AVERROR_NOMEM;
250 }
251 memcpy(data, pkt->data, pkt->size);
252 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
253 pkt->data = data;
254 pkt->destruct = av_destruct_packet;
255 }
256 return 0;
257 }
258
259 /**
260 * Check whether filename actually is a numbered sequence generator.
261 *
262 * @param filename possible numbered sequence string
263 * @return 1 if a valid numbered sequence string, 0 otherwise.
264 */
265 int av_filename_number_test(const char *filename)
266 {
267 char buf[1024];
268 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
269 }
270
271 /**
272 * Guess file format.
273 *
274 * @param is_opened whether the file is already opened, determines whether
275 * demuxers with or without AVFMT_NOFILE are probed
276 */
277 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
278 {
279 AVInputFormat *fmt1, *fmt;
280 int score, score_max;
281
282 fmt = NULL;
283 score_max = 0;
284 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
285 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
286 continue;
287 score = 0;
288 if (fmt1->read_probe) {
289 score = fmt1->read_probe(pd);
290 } else if (fmt1->extensions) {
291 if (match_ext(pd->filename, fmt1->extensions)) {
292 score = 50;
293 }
294 }
295 if (score > score_max) {
296 score_max = score;
297 fmt = fmt1;
298 }
299 }
300 return fmt;
301 }
302
303 /************************************************************/
304 /* input media file */
305
306 /**
307 * Open a media file from an IO stream. 'fmt' must be specified.
308 */
309 static const char* format_to_name(void* ptr)
310 {
311 AVFormatContext* fc = (AVFormatContext*) ptr;
312 if(fc->iformat) return fc->iformat->name;
313 else if(fc->oformat) return fc->oformat->name;
314 else return "NULL";
315 }
316
317 #define OFFSET(x) offsetof(AVFormatContext,x)
318 #define DEFAULT 0 //should be NAN but it doesnt work as its not a constant in glibc as required by ANSI/ISO C
319 //these names are too long to be readable
320 #define E AV_OPT_FLAG_ENCODING_PARAM
321 #define D AV_OPT_FLAG_DECODING_PARAM
322
323 static const AVOption options[]={
324 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
325 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
326 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
327 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
328 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
329 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
330 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
331 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
332 {"analyzeduration", NULL, OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
333 {NULL},
334 };
335
336 #undef E
337 #undef D
338 #undef DEFAULT
339
340 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
341
342 static void avformat_get_context_defaults(AVFormatContext *s)
343 {
344 memset(s, 0, sizeof(AVFormatContext));
345
346 s->av_class = &av_format_context_class;
347
348 av_opt_set_defaults(s);
349 }
350
351 AVFormatContext *av_alloc_format_context(void)
352 {
353 AVFormatContext *ic;
354 ic = av_malloc(sizeof(AVFormatContext));
355 if (!ic) return ic;
356 avformat_get_context_defaults(ic);
357 ic->av_class = &av_format_context_class;
358 return ic;
359 }
360
361 /**
362 * Allocates all the structures needed to read an input stream.
363 * This does not open the needed codecs for decoding the stream[s].
364 */
365 int av_open_input_stream(AVFormatContext **ic_ptr,
366 ByteIOContext *pb, const char *filename,
367 AVInputFormat *fmt, AVFormatParameters *ap)
368 {
369 int err;
370 AVFormatContext *ic;
371 AVFormatParameters default_ap;
372
373 if(!ap){
374 ap=&default_ap;
375 memset(ap, 0, sizeof(default_ap));
376 }
377
378 if(!ap->prealloced_context)
379 ic = av_alloc_format_context();
380 else
381 ic = *ic_ptr;
382 if (!ic) {
383 err = AVERROR_NOMEM;
384 goto fail;
385 }
386 ic->iformat = fmt;
387 if (pb)
388 ic->pb = *pb;
389 ic->duration = AV_NOPTS_VALUE;
390 ic->start_time = AV_NOPTS_VALUE;
391 pstrcpy(ic->filename, sizeof(ic->filename), filename);
392
393 /* allocate private data */
394 if (fmt->priv_data_size > 0) {
395 ic->priv_data = av_mallocz(fmt->priv_data_size);
396 if (!ic->priv_data) {
397 err = AVERROR_NOMEM;
398 goto fail;
399 }
400 } else {
401 ic->priv_data = NULL;
402 }
403
404 err = ic->iformat->read_header(ic, ap);
405 if (err < 0)
406 goto fail;
407
408 if (pb && !ic->data_offset)
409 ic->data_offset = url_ftell(&ic->pb);
410
411 *ic_ptr = ic;
412 return 0;
413 fail:
414 if (ic) {
415 av_freep(&ic->priv_data);
416 }
417 av_free(ic);
418 *ic_ptr = NULL;
419 return err;
420 }
421
422 /** Size of probe buffer, for guessing file type from file contents. */
423 #define PROBE_BUF_MIN 2048
424 #define PROBE_BUF_MAX (1<<20)
425
426 /**
427 * Open a media file as input. The codec are not opened. Only the file
428 * header (if present) is read.
429 *
430 * @param ic_ptr the opened media file handle is put here
431 * @param filename filename to open.
432 * @param fmt if non NULL, force the file format to use
433 * @param buf_size optional buffer size (zero if default is OK)
434 * @param ap additionnal parameters needed when opening the file (NULL if default)
435 * @return 0 if OK. AVERROR_xxx otherwise.
436 */
437 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
438 AVInputFormat *fmt,
439 int buf_size,
440 AVFormatParameters *ap)
441 {
442 int err, must_open_file, file_opened, probe_size;
443 AVProbeData probe_data, *pd = &probe_data;
444 ByteIOContext pb1, *pb = &pb1;
445
446 file_opened = 0;
447 pd->filename = "";
448 if (filename)
449 pd->filename = filename;
450 pd->buf = NULL;
451 pd->buf_size = 0;
452
453 if (!fmt) {
454 /* guess format if no file can be opened */
455 fmt = av_probe_input_format(pd, 0);
456 }
457
458 /* do not open file if the format does not need it. XXX: specific
459 hack needed to handle RTSP/TCP */
460 must_open_file = 1;
461 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
462 must_open_file = 0;
463 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
464 }
465
466 if (!fmt || must_open_file) {
467 /* if no file needed do not try to open one */
468 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
469 err = AVERROR_IO;
470 goto fail;
471 }
472 file_opened = 1;
473 if (buf_size > 0) {
474 url_setbufsize(pb, buf_size);
475 }
476
477 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
478 /* read probe data */
479 pd->buf= av_realloc(pd->buf, probe_size);
480 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
481 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
482 url_fclose(pb);
483 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
484 file_opened = 0;
485 err = AVERROR_IO;
486 goto fail;
487 }
488 }
489 /* guess file format */
490 fmt = av_probe_input_format(pd, 1);
491 }
492 av_freep(&pd->buf);
493 }
494
495 /* if still no format found, error */
496 if (!fmt) {
497 err = AVERROR_NOFMT;
498 goto fail;
499 }
500
501 /* XXX: suppress this hack for redirectors */
502 #ifdef CONFIG_NETWORK
503 if (fmt == &redir_demuxer) {
504 err = redir_open(ic_ptr, pb);
505 url_fclose(pb);
506 return err;
507 }
508 #endif
509
510 /* check filename in case of an image number is expected */
511 if (fmt->flags & AVFMT_NEEDNUMBER) {
512 if (!av_filename_number_test(filename)) {
513 err = AVERROR_NUMEXPECTED;
514 goto fail;
515 }
516 }
517 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
518 if (err)
519 goto fail;
520 return 0;
521 fail:
522 av_freep(&pd->buf);
523 if (file_opened)
524 url_fclose(pb);
525 *ic_ptr = NULL;
526 return err;
527
528 }
529
530 /*******************************************************/
531
532 /**
533 * Read a transport packet from a media file.
534 *
535 * This function is absolete and should never be used.
536 * Use av_read_frame() instead.
537 *
538 * @param s media file handle
539 * @param pkt is filled
540 * @return 0 if OK. AVERROR_xxx if error.
541 */
542 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
543 {
544 return s->iformat->read_packet(s, pkt);
545 }
546
547 /**********************************************************/
548
549 /**
550 * Get the number of samples of an audio frame. Return (-1) if error.
551 */
552 static int get_audio_frame_size(AVCodecContext *enc, int size)
553 {
554 int frame_size;
555
556 if (enc->frame_size <= 1) {
557 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
558
559 if (bits_per_sample) {
560 if (enc->channels == 0)
561 return -1;
562 frame_size = (size << 3) / (bits_per_sample * enc->channels);
563 } else {
564 /* used for example by ADPCM codecs */
565 if (enc->bit_rate == 0)
566 return -1;
567 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
568 }
569 } else {
570 frame_size = enc->frame_size;
571 }
572 return frame_size;
573 }
574
575
576 /**
577 * Return the frame duration in seconds, return 0 if not available.
578 */
579 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
580 AVCodecParserContext *pc, AVPacket *pkt)
581 {
582 int frame_size;
583
584 *pnum = 0;
585 *pden = 0;
586 switch(st->codec->codec_type) {
587 case CODEC_TYPE_VIDEO:
588 if(st->time_base.num*1000LL > st->time_base.den){
589 *pnum = st->time_base.num;
590 *pden = st->time_base.den;
591 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
592 *pnum = st->codec->time_base.num;
593 *pden = st->codec->time_base.den;
594 if (pc && pc->repeat_pict) {
595 *pden *= 2;
596 *pnum = (*pnum) * (2 + pc->repeat_pict);
597 }
598 }
599 break;
600 case CODEC_TYPE_AUDIO:
601 frame_size = get_audio_frame_size(st->codec, pkt->size);
602 if (frame_size < 0)
603 break;
604 *pnum = frame_size;
605 *pden = st->codec->sample_rate;
606 break;
607 default:
608 break;
609 }
610 }
611
612 static int is_intra_only(AVCodecContext *enc){
613 if(enc->codec_type == CODEC_TYPE_AUDIO){
614 return 1;
615 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
616 switch(enc->codec_id){
617 case CODEC_ID_MJPEG:
618 case CODEC_ID_MJPEGB:
619 case CODEC_ID_LJPEG:
620 case CODEC_ID_RAWVIDEO:
621 case CODEC_ID_DVVIDEO:
622 case CODEC_ID_HUFFYUV:
623 case CODEC_ID_FFVHUFF:
624 case CODEC_ID_ASV1:
625 case CODEC_ID_ASV2:
626 case CODEC_ID_VCR1:
627 return 1;
628 default: break;
629 }
630 }
631 return 0;
632 }
633
634 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
635 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
636 int64_t delta= last_ts - mask/2;
637 return ((lsb - delta)&mask) + delta;
638 }
639
640 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
641 AVCodecParserContext *pc, AVPacket *pkt)
642 {
643 int num, den, presentation_delayed;
644 /* handle wrapping */
645 if(st->cur_dts != AV_NOPTS_VALUE){
646 if(pkt->pts != AV_NOPTS_VALUE)
647 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
648 if(pkt->dts != AV_NOPTS_VALUE)
649 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
650 }
651
652 if (pkt->duration == 0) {
653 compute_frame_duration(&num, &den, st, pc, pkt);
654 if (den && num) {
655 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
656 }
657 }
658
659 if(is_intra_only(st->codec))
660 pkt->flags |= PKT_FLAG_KEY;
661
662 /* do we have a video B frame ? */
663 presentation_delayed = 0;
664 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
665 /* XXX: need has_b_frame, but cannot get it if the codec is
666 not initialized */
667 if (( st->codec->codec_id == CODEC_ID_H264
668 || st->codec->has_b_frames) &&
669 pc && pc->pict_type != FF_B_TYPE)
670 presentation_delayed = 1;
671 /* this may be redundant, but it shouldnt hurt */
672 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
673 presentation_delayed = 1;
674 }
675
676 if(st->cur_dts == AV_NOPTS_VALUE){
677 if(presentation_delayed) st->cur_dts = -pkt->duration;
678 else st->cur_dts = 0;
679 }
680
681 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
682 /* interpolate PTS and DTS if they are not present */
683 if (presentation_delayed) {
684 /* DTS = decompression time stamp */
685 /* PTS = presentation time stamp */
686 if (pkt->dts == AV_NOPTS_VALUE) {
687 /* if we know the last pts, use it */
688 if(st->last_IP_pts != AV_NOPTS_VALUE)
689 st->cur_dts = pkt->dts = st->last_IP_pts;
690 else
691 pkt->dts = st->cur_dts;
692 } else {
693 st->cur_dts = pkt->dts;
694 }
695 /* this is tricky: the dts must be incremented by the duration
696 of the frame we are displaying, i.e. the last I or P frame */
697 if (st->last_IP_duration == 0)
698 st->cur_dts += pkt->duration;
699 else
700 st->cur_dts += st->last_IP_duration;
701 st->last_IP_duration = pkt->duration;
702 st->last_IP_pts= pkt->pts;
703 /* cannot compute PTS if not present (we can compute it only
704 by knowing the futur */
705 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
706 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
707 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
708 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
709 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
710 pkt->pts += pkt->duration;
711 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
712 }
713 }
714
715 /* presentation is not delayed : PTS and DTS are the same */
716 if (pkt->pts == AV_NOPTS_VALUE) {
717 if (pkt->dts == AV_NOPTS_VALUE) {
718 pkt->pts = st->cur_dts;
719 pkt->dts = st->cur_dts;
720 }
721 else {
722 st->cur_dts = pkt->dts;
723 pkt->pts = pkt->dts;
724 }
725 } else {
726 st->cur_dts = pkt->pts;
727 pkt->dts = pkt->pts;
728 }
729 st->cur_dts += pkt->duration;
730 }
731 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
732
733 /* update flags */
734 if (pc) {
735 pkt->flags = 0;
736 /* key frame computation */
737 switch(st->codec->codec_type) {
738 case CODEC_TYPE_VIDEO:
739 if (pc->pict_type == FF_I_TYPE)
740 pkt->flags |= PKT_FLAG_KEY;
741 break;
742 case CODEC_TYPE_AUDIO:
743 pkt->flags |= PKT_FLAG_KEY;
744 break;
745 default:
746 break;
747 }
748 }
749 }
750
751 void av_destruct_packet_nofree(AVPacket *pkt)
752 {
753 pkt->data = NULL; pkt->size = 0;
754 }
755
756 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
757 {
758 AVStream *st;
759 int len, ret, i;
760
761 for(;;) {
762 /* select current input stream component */
763 st = s->cur_st;
764 if (st) {
765 if (!st->need_parsing || !st->parser) {
766 /* no parsing needed: we just output the packet as is */
767 /* raw data support */
768 *pkt = s->cur_pkt;
769 compute_pkt_fields(s, st, NULL, pkt);
770 s->cur_st = NULL;
771 break;
772 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
773 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
774 s->cur_ptr, s->cur_len,
775 s->cur_pkt.pts, s->cur_pkt.dts);
776 s->cur_pkt.pts = AV_NOPTS_VALUE;
777 s->cur_pkt.dts = AV_NOPTS_VALUE;
778 /* increment read pointer */
779 s->cur_ptr += len;
780 s->cur_len -= len;
781
782 /* return packet if any */
783 if (pkt->size) {
784 got_packet:
785 pkt->duration = 0;
786 pkt->stream_index = st->index;
787 pkt->pts = st->parser->pts;
788 pkt->dts = st->parser->dts;
789 pkt->destruct = av_destruct_packet_nofree;
790 compute_pkt_fields(s, st, st->parser, pkt);
791
792 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
793 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
794 0, 0, AVINDEX_KEYFRAME);
795 }
796
797 break;
798 }
799 } else {
800 /* free packet */
801 av_free_packet(&s->cur_pkt);
802 s->cur_st = NULL;
803 }
804 } else {
805 /* read next packet */
806 ret = av_read_packet(s, &s->cur_pkt);
807 if (ret < 0) {
808 if (ret == -EAGAIN)
809 return ret;
810 /* return the last frames, if any */
811 for(i = 0; i < s->nb_streams; i++) {
812 st = s->streams[i];
813 if (st->parser && st->need_parsing) {
814 av_parser_parse(st->parser, st->codec,
815 &pkt->data, &pkt->size,
816 NULL, 0,
817 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
818 if (pkt->size)
819 goto got_packet;
820 }
821 }
822 /* no more packets: really terminates parsing */
823 return ret;
824 }
825
826 st = s->streams[s->cur_pkt.stream_index];
827 if(st->codec->debug & FF_DEBUG_PTS)
828 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
829 s->cur_pkt.stream_index,
830 s->cur_pkt.pts,
831 s->cur_pkt.dts,
832 s->cur_pkt.size);
833
834 s->cur_st = st;
835 s->cur_ptr = s->cur_pkt.data;
836 s->cur_len = s->cur_pkt.size;
837 if (st->need_parsing && !st->parser) {
838 st->parser = av_parser_init(st->codec->codec_id);
839 if (!st->parser) {
840 /* no parser available : just output the raw packets */
841 st->need_parsing = 0;
842 }else if(st->need_parsing == 2){
843 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
844 }
845 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
846 st->parser->last_frame_offset=
847 st->parser->cur_offset= s->cur_pkt.pos;
848 }
849 }
850 }
851 }
852 if(st->codec->debug & FF_DEBUG_PTS)
853 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
854 pkt->stream_index,
855 pkt->pts,
856 pkt->dts,
857 pkt->size);
858
859 return 0;
860 }
861
862 /**
863 * Return the next frame of a stream.
864 *
865 * The returned packet is valid
866 * until the next av_read_frame() or until av_close_input_file() and
867 * must be freed with av_free_packet. For video, the packet contains
868 * exactly one frame. For audio, it contains an integer number of
869 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
870 * data). If the audio frames have a variable size (e.g. MPEG audio),
871 * then it contains one frame.
872 *
873 * pkt->pts, pkt->dts and pkt->duration are always set to correct
874 * values in AV_TIME_BASE unit (and guessed if the format cannot
875 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
876 * has B frames, so it is better to rely on pkt->dts if you do not
877 * decompress the payload.
878 *
879 * @return 0 if OK, < 0 if error or end of file.
880 */
881 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
882 {
883 AVPacketList *pktl;
884 int eof=0;
885 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
886
887 for(;;){
888 pktl = s->packet_buffer;
889 if (pktl) {
890 AVPacket *next_pkt= &pktl->pkt;
891
892 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
893 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
894 if( pktl->pkt.stream_index == next_pkt->stream_index
895 && next_pkt->dts < pktl->pkt.dts
896 && pktl->pkt.pts != pktl->pkt.dts //not b frame
897 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
898 next_pkt->pts= pktl->pkt.dts;
899 }
900 pktl= pktl->next;
901 }
902 pktl = s->packet_buffer;
903 }
904
905 if( next_pkt->pts != AV_NOPTS_VALUE
906 || next_pkt->dts == AV_NOPTS_VALUE
907 || !genpts || eof){
908 /* read packet from packet buffer, if there is data */
909 *pkt = *next_pkt;
910 s->packet_buffer = pktl->next;
911 av_free(pktl);
912 return 0;
913 }
914 }
915 if(genpts){
916 AVPacketList **plast_pktl= &s->packet_buffer;
917 int ret= av_read_frame_internal(s, pkt);
918 if(ret<0){
919 if(pktl && ret != -EAGAIN){
920 eof=1;
921 continue;
922 }else
923 return ret;
924 }
925
926 /* duplicate the packet */
927 if (av_dup_packet(pkt) < 0)
928 return AVERROR_NOMEM;
929
930 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
931
932 pktl = av_mallocz(sizeof(AVPacketList));
933 if (!pktl)
934 return AVERROR_NOMEM;
935
936 /* add the packet in the buffered packet list */
937 *plast_pktl = pktl;
938 pktl->pkt= *pkt;
939 }else{
940 assert(!s->packet_buffer);
941 return av_read_frame_internal(s, pkt);
942 }
943 }
944 }
945
946 /* XXX: suppress the packet queue */
947 static void flush_packet_queue(AVFormatContext *s)
948 {
949 AVPacketList *pktl;
950
951 for(;;) {
952 pktl = s->packet_buffer;
953 if (!pktl)
954 break;
955 s->packet_buffer = pktl->next;
956 av_free_packet(&pktl->pkt);
957 av_free(pktl);
958 }
959 }
960
961 /*******************************************************/
962 /* seek support */
963
964 int av_find_default_stream_index(AVFormatContext *s)
965 {
966 int i;
967 AVStream *st;
968
969 if (s->nb_streams <= 0)
970 return -1;
971 for(i = 0; i < s->nb_streams; i++) {
972 st = s->streams[i];
973 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
974 return i;
975 }
976 }
977 return 0;
978 }
979
980 /**
981 * Flush the frame reader.
982 */
983 static void av_read_frame_flush(AVFormatContext *s)
984 {
985 AVStream *st;
986 int i;
987
988 flush_packet_queue(s);
989
990 /* free previous packet */
991 if (s->cur_st) {
992 if (s->cur_st->parser)
993 av_free_packet(&s->cur_pkt);
994 s->cur_st = NULL;
995 }
996 /* fail safe */
997 s->cur_ptr = NULL;
998 s->cur_len = 0;
999
1000 /* for each stream, reset read state */
1001 for(i = 0; i < s->nb_streams; i++) {
1002 st = s->streams[i];
1003
1004 if (st->parser) {
1005 av_parser_close(st->parser);
1006 st->parser = NULL;
1007 }
1008 st->last_IP_pts = AV_NOPTS_VALUE;
1009 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
1010 }
1011 }
1012
1013 /**
1014 * Updates cur_dts of all streams based on given timestamp and AVStream.
1015 *
1016 * Stream ref_st unchanged, others set cur_dts in their native timebase
1017 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1018 * @param timestamp new dts expressed in time_base of param ref_st
1019 * @param ref_st reference stream giving time_base of param timestamp
1020 */
1021 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1022 int i;
1023
1024 for(i = 0; i < s->nb_streams; i++) {
1025 AVStream *st = s->streams[i];
1026
1027 st->cur_dts = av_rescale(timestamp,
1028 st->time_base.den * (int64_t)ref_st->time_base.num,
1029 st->time_base.num * (int64_t)ref_st->time_base.den);
1030 }
1031 }
1032
1033 /**
1034 * Add a index entry into a sorted list updateing if it is already there.
1035 *
1036 * @param timestamp timestamp in the timebase of the given stream
1037 */
1038 int av_add_index_entry(AVStream *st,
1039 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1040 {
1041 AVIndexEntry *entries, *ie;
1042 int index;
1043
1044 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1045 return -1;
1046
1047 entries = av_fast_realloc(st->index_entries,
1048 &st->index_entries_allocated_size,
1049 (st->nb_index_entries + 1) *
1050 sizeof(AVIndexEntry));
1051 if(!entries)
1052 return -1;
1053
1054 st->index_entries= entries;
1055
1056 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1057
1058 if(index<0){
1059 index= st->nb_index_entries++;
1060 ie= &entries[index];
1061 assert(index==0 || ie[-1].timestamp < timestamp);
1062 }else{
1063 ie= &entries[index];
1064 if(ie->timestamp != timestamp){
1065 if(ie->timestamp <= timestamp)
1066 return -1;
1067 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1068 st->nb_index_entries++;
1069 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1070 distance= ie->min_distance;
1071 }
1072
1073 ie->pos = pos;
1074 ie->timestamp = timestamp;
1075 ie->min_distance= distance;
1076 ie->size= size;
1077 ie->flags = flags;
1078
1079 return index;
1080 }
1081
1082 /**
1083 * build an index for raw streams using a parser.
1084 */
1085 static void av_build_index_raw(AVFormatContext *s)
1086 {
1087 AVPacket pkt1, *pkt = &pkt1;
1088 int ret;
1089 AVStream *st;
1090
1091 st = s->streams[0];
1092 av_read_frame_flush(s);
1093 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1094
1095 for(;;) {
1096 ret = av_read_frame(s, pkt);
1097 if (ret < 0)
1098 break;
1099 if (pkt->stream_index == 0 && st->parser &&
1100 (pkt->flags & PKT_FLAG_KEY)) {
1101 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1102 0, 0, AVINDEX_KEYFRAME);
1103 }
1104 av_free_packet(pkt);
1105 }
1106 }
1107
1108 /**
1109 * Returns TRUE if we deal with a raw stream.
1110 *
1111 * Raw codec data and parsing needed.
1112 */
1113 static int is_raw_stream(AVFormatContext *s)
1114 {
1115 AVStream *st;
1116
1117 if (s->nb_streams != 1)
1118 return 0;
1119 st = s->streams[0];
1120 if (!st->need_parsing)
1121 return 0;
1122 return 1;
1123 }
1124
1125 /**
1126 * Gets the index for a specific timestamp.
1127 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1128 * the timestamp which is <= the requested one, if backward is 0
1129 * then it will be >=
1130 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1131 * @return < 0 if no such timestamp could be found
1132 */
1133 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1134 int flags)
1135 {
1136 AVIndexEntry *entries= st->index_entries;
1137 int nb_entries= st->nb_index_entries;
1138 int a, b, m;
1139 int64_t timestamp;
1140
1141 a = - 1;
1142 b = nb_entries;
1143
1144 while (b - a > 1) {
1145 m = (a + b) >> 1;
1146 timestamp = entries[m].timestamp;
1147 if(timestamp >= wanted_timestamp)
1148 b = m;
1149 if(timestamp <= wanted_timestamp)
1150 a = m;
1151 }
1152 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1153
1154 if(!(flags & AVSEEK_FLAG_ANY)){
1155 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1156 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1157 }
1158 }
1159
1160 if(m == nb_entries)
1161 return -1;
1162 return m;
1163 }
1164
1165 #define DEBUG_SEEK
1166
1167 /**
1168 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1169 * this isnt supposed to be called directly by a user application, but by demuxers
1170 * @param target_ts target timestamp in the time base of the given stream
1171 * @param stream_index stream number
1172 */
1173 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1174 AVInputFormat *avif= s->iformat;
1175 int64_t pos_min, pos_max, pos, pos_limit;
1176 int64_t ts_min, ts_max, ts;
1177 int index;
1178 AVStream *st;
1179
1180 if (stream_index < 0)
1181 return -1;
1182
1183 #ifdef DEBUG_SEEK
1184 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1185 #endif
1186
1187 ts_max=
1188 ts_min= AV_NOPTS_VALUE;
1189 pos_limit= -1; //gcc falsely says it may be uninitalized
1190
1191 st= s->streams[stream_index];
1192 if(st->index_entries){
1193 AVIndexEntry *e;
1194
1195 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1196 index= FFMAX(index, 0);
1197 e= &st->index_entries[index];
1198
1199 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1200 pos_min= e->pos;
1201 ts_min= e->timestamp;
1202 #ifdef DEBUG_SEEK
1203 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1204 pos_min,ts_min);
1205 #endif
1206 }else{
1207 assert(index==0);
1208 }
1209
1210 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1211 assert(index < st->nb_index_entries);
1212 if(index >= 0){
1213 e= &st->index_entries[index];
1214 assert(e->timestamp >= target_ts);
1215 pos_max= e->pos;
1216 ts_max= e->timestamp;
1217 pos_limit= pos_max - e->min_distance;
1218 #ifdef DEBUG_SEEK
1219 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1220 pos_max,pos_limit, ts_max);
1221 #endif
1222 }
1223 }
1224
1225 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1226 if(pos<0)
1227 return -1;
1228
1229 /* do the seek */
1230 url_fseek(&s->pb, pos, SEEK_SET);
1231
1232 av_update_cur_dts(s, st, ts);
1233
1234 return 0;
1235 }
1236
1237 /**
1238 * Does a binary search using read_timestamp().
1239 * this isnt supposed to be called directly by a user application, but by demuxers
1240 * @param target_ts target timestamp in the time base of the given stream
1241 * @param stream_index stream number
1242 */
1243 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1244 int64_t pos, ts;
1245 int64_t start_pos, filesize;
1246 int no_change;
1247
1248 #ifdef DEBUG_SEEK
1249 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1250 #endif
1251
1252 if(ts_min == AV_NOPTS_VALUE){
1253 pos_min = s->data_offset;
1254 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1255 if (ts_min == AV_NOPTS_VALUE)
1256 return -1;
1257 }
1258
1259 if(ts_max == AV_NOPTS_VALUE){
1260 int step= 1024;
1261 filesize = url_fsize(&s->pb);
1262 pos_max = filesize - 1;
1263 do{
1264 pos_max -= step;
1265 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1266 step += step;
1267 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1268 if (ts_max == AV_NOPTS_VALUE)
1269 return -1;
1270
1271 for(;;){
1272 int64_t tmp_pos= pos_max + 1;
1273 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1274 if(tmp_ts == AV_NOPTS_VALUE)
1275 break;
1276 ts_max= tmp_ts;
1277 pos_max= tmp_pos;
1278 if(tmp_pos >= filesize)
1279 break;
1280 }
1281 pos_limit= pos_max;
1282 }
1283
1284 if(ts_min > ts_max){
1285 return -1;
1286 }else if(ts_min == ts_max){
1287 pos_limit= pos_min;
1288 }
1289
1290 no_change=0;
1291 while (pos_min < pos_limit) {
1292 #ifdef DEBUG_SEEK
1293 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1294 pos_min, pos_max,
1295 ts_min, ts_max);
1296 #endif
1297 assert(pos_limit <= pos_max);
1298
1299 if(no_change==0){
1300 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1301 // interpolate position (better than dichotomy)
1302 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1303 + pos_min - approximate_keyframe_distance;
1304 }else if(no_change==1){
1305 // bisection, if interpolation failed to change min or max pos last time
1306 pos = (pos_min + pos_limit)>>1;
1307 }else{
1308 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1309 pos=pos_min;
1310 }
1311 if(pos <= pos_min)
1312 pos= pos_min + 1;
1313 else if(pos > pos_limit)
1314 pos= pos_limit;
1315 start_pos= pos;
1316
1317 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1318 if(pos == pos_max)
1319 no_change++;
1320 else
1321 no_change=0;
1322 #ifdef DEBUG_SEEK
1323 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1324 #endif
1325 assert(ts != AV_NOPTS_VALUE);
1326 if (target_ts <= ts) {
1327 pos_limit = start_pos - 1;
1328 pos_max = pos;
1329 ts_max = ts;
1330 }
1331 if (target_ts >= ts) {
1332 pos_min = pos;
1333 ts_min = ts;
1334 }
1335 }
1336
1337 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1338 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1339 #ifdef DEBUG_SEEK
1340 pos_min = pos;
1341 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1342 pos_min++;
1343 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1344 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1345 pos, ts_min, target_ts, ts_max);
1346 #endif
1347 *ts_ret= ts;
1348 return pos;
1349 }
1350
1351 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1352 int64_t pos_min, pos_max;
1353 #if 0
1354 AVStream *st;
1355
1356 if (stream_index < 0)
1357 return -1;
1358
1359 st= s->streams[stream_index];
1360 #endif
1361
1362 pos_min = s->data_offset;
1363 pos_max = url_fsize(&s->pb) - 1;
1364
1365 if (pos < pos_min) pos= pos_min;
1366 else if(pos > pos_max) pos= pos_max;
1367
1368 url_fseek(&s->pb, pos, SEEK_SET);
1369
1370 #if 0
1371 av_update_cur_dts(s, st, ts);
1372 #endif
1373 return 0;
1374 }
1375
1376 static int av_seek_frame_generic(AVFormatContext *s,
1377 int stream_index, int64_t timestamp, int flags)
1378 {
1379 int index;
1380 AVStream *st;
1381 AVIndexEntry *ie;
1382
1383 st = s->streams[stream_index];
1384
1385 index = av_index_search_timestamp(st, timestamp, flags);
1386
1387 if(index < 0){
1388 int i;
1389 AVPacket pkt;
1390
1391 if(st->index_entries && st->nb_index_entries){
1392 ie= &st->index_entries[st->nb_index_entries-1];
1393 url_fseek(&s->pb, ie->pos, SEEK_SET);
1394 av_update_cur_dts(s, st, ie->timestamp);
1395 }else
1396 url_fseek(&s->pb, 0, SEEK_SET);
1397
1398 for(i=0;; i++) {
1399 int ret = av_read_frame(s, &pkt);
1400 if(ret<0)
1401 break;
1402 av_free_packet(&pkt);
1403 if(stream_index == pkt.stream_index){
1404 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1405 break;
1406 }
1407 }
1408 index = av_index_search_timestamp(st, timestamp, flags);
1409 }
1410 if (index < 0)
1411 return -1;
1412
1413 av_read_frame_flush(s);
1414 if (s->iformat->read_seek){
1415 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1416 return 0;
1417 }
1418 ie = &st->index_entries[index];
1419 url_fseek(&s->pb, ie->pos, SEEK_SET);
1420
1421 av_update_cur_dts(s, st, ie->timestamp);
1422
1423 return 0;
1424 }
1425
1426 /**
1427 * Seek to the key frame at timestamp.
1428 * 'timestamp' in 'stream_index'.
1429 * @param stream_index If stream_index is (-1), a default
1430 * stream is selected, and timestamp is automatically converted
1431 * from AV_TIME_BASE units to the stream specific time_base.
1432 * @param timestamp timestamp in AVStream.time_base units
1433 * or if there is no stream specified then in AV_TIME_BASE units
1434 * @param flags flags which select direction and seeking mode
1435 * @return >= 0 on success
1436 */
1437 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1438 {
1439 int ret;
1440 AVStream *st;
1441
1442 av_read_frame_flush(s);
1443
1444 if(flags & AVSEEK_FLAG_BYTE)
1445 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1446
1447 if(stream_index < 0){
1448 stream_index= av_find_default_stream_index(s);
1449 if(stream_index < 0)
1450 return -1;
1451
1452 st= s->streams[stream_index];
1453 /* timestamp for default must be expressed in AV_TIME_BASE units */
1454 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1455 }
1456 st= s->streams[stream_index];
1457
1458 /* first, we try the format specific seek */
1459 if (s->iformat->read_seek)
1460 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1461 else
1462 ret = -1;
1463 if (ret >= 0) {
1464 return 0;
1465 }
1466
1467 if(s->iformat->read_timestamp)
1468 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1469 else
1470 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1471 }
1472
1473 /*******************************************************/
1474
1475 /**
1476 * Returns TRUE if the stream has accurate timings in any stream.
1477 *
1478 * @return TRUE if the stream has accurate timings for at least one component.
1479 */
1480 static int av_has_timings(AVFormatContext *ic)
1481 {
1482 int i;
1483 AVStream *st;
1484
1485 for(i = 0;i < ic->nb_streams; i++) {
1486 st = ic->streams[i];
1487 if (st->start_time != AV_NOPTS_VALUE &&
1488 st->duration != AV_NOPTS_VALUE)
1489 return 1;
1490 }
1491 return 0;
1492 }
1493
1494 /**
1495 * Estimate the stream timings from the one of each components.
1496 *
1497 * Also computes the global bitrate if possible.
1498 */
1499 static void av_update_stream_timings(AVFormatContext *ic)
1500 {
1501 int64_t start_time, start_time1, end_time, end_time1;
1502 int i;
1503 AVStream *st;
1504
1505 start_time = INT64_MAX;
1506 end_time = INT64_MIN;
1507 for(i = 0;i < ic->nb_streams; i++) {
1508 st = ic->streams[i];
1509 if (st->start_time != AV_NOPTS_VALUE) {
1510 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1511 if (start_time1 < start_time)
1512 start_time = start_time1;
1513 if (st->duration != AV_NOPTS_VALUE) {
1514 end_time1 = start_time1
1515 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1516 if (end_time1 > end_time)
1517 end_time = end_time1;
1518 }
1519 }
1520 }
1521 if (start_time != INT64_MAX) {
1522 ic->start_time = start_time;
1523 if (end_time != INT64_MIN) {
1524 ic->duration = end_time - start_time;
1525 if (ic->file_size > 0) {
1526 /* compute the bit rate */
1527 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1528 (double)ic->duration;
1529 }
1530 }
1531 }
1532
1533 }
1534
1535 static void fill_all_stream_timings(AVFormatContext *ic)
1536 {
1537 int i;
1538 AVStream *st;
1539
1540 av_update_stream_timings(ic);
1541 for(i = 0;i < ic->nb_streams; i++) {
1542 st = ic->streams[i];
1543 if (st->start_time == AV_NOPTS_VALUE) {
1544 if(ic->start_time != AV_NOPTS_VALUE)
1545 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1546 if(ic->duration != AV_NOPTS_VALUE)
1547 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1548 }
1549 }
1550 }
1551
1552 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1553 {
1554 int64_t filesize, duration;
1555 int bit_rate, i;
1556 AVStream *st;
1557
1558 /* if bit_rate is already set, we believe it */
1559 if (ic->bit_rate == 0) {
1560 bit_rate = 0;
1561 for(i=0;i<ic->nb_streams;i++) {
1562 st = ic->streams[i];
1563 bit_rate += st->codec->bit_rate;
1564 }
1565 ic->bit_rate = bit_rate;
1566 }
1567
1568 /* if duration is already set, we believe it */
1569 if (ic->duration == AV_NOPTS_VALUE &&
1570 ic->bit_rate != 0 &&
1571 ic->file_size != 0) {
1572 filesize = ic->file_size;
1573 if (filesize > 0) {
1574 for(i = 0; i < ic->nb_streams; i++) {
1575 st = ic->streams[i];
1576 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1577 if (st->start_time == AV_NOPTS_VALUE ||
1578 st->duration == AV_NOPTS_VALUE) {
1579 st->start_time = 0;
1580 st->duration = duration;
1581 }
1582 }
1583 }
1584 }
1585 }
1586
1587 #define DURATION_MAX_READ_SIZE 250000
1588
1589 /* only usable for MPEG-PS streams */
1590 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1591 {
1592 AVPacket pkt1, *pkt = &pkt1;
1593 AVStream *st;
1594 int read_size, i, ret;
1595 int64_t end_time;
1596 int64_t filesize, offset, duration;
1597
1598 /* free previous packet */
1599 if (ic->cur_st && ic->cur_st->parser)
1600 av_free_packet(&ic->cur_pkt);
1601 ic->cur_st = NULL;
1602
1603 /* flush packet queue */
1604 flush_packet_queue(ic);
1605
1606 for(i=0;i<ic->nb_streams;i++) {
1607 st = ic->streams[i];
1608 if (st->parser) {
1609 av_parser_close(st->parser);
1610 st->parser= NULL;
1611 }
1612 }
1613
1614 /* we read the first packets to get the first PTS (not fully
1615 accurate, but it is enough now) */
1616 url_fseek(&ic->pb, 0, SEEK_SET);
1617 read_size = 0;
1618 for(;;) {
1619 if (read_size >= DURATION_MAX_READ_SIZE)
1620 break;
1621 /* if all info is available, we can stop */
1622 for(i = 0;i < ic->nb_streams; i++) {
1623 st = ic->streams[i];
1624 if (st->start_time == AV_NOPTS_VALUE)
1625 break;
1626 }
1627 if (i == ic->nb_streams)
1628 break;
1629
1630 ret = av_read_packet(ic, pkt);
1631 if (ret != 0)
1632 break;
1633 read_size += pkt->size;
1634 st = ic->streams[pkt->stream_index];
1635 if (pkt->pts != AV_NOPTS_VALUE) {
1636 if (st->start_time == AV_NOPTS_VALUE)
1637 st->start_time = pkt->pts;
1638 }
1639 av_free_packet(pkt);
1640 }
1641
1642 /* estimate the end time (duration) */
1643 /* XXX: may need to support wrapping */
1644 filesize = ic->file_size;
1645 offset = filesize - DURATION_MAX_READ_SIZE;
1646 if (offset < 0)
1647 offset = 0;
1648
1649 url_fseek(&ic->pb, offset, SEEK_SET);
1650 read_size = 0;
1651 for(;;) {
1652 if (read_size >= DURATION_MAX_READ_SIZE)
1653 break;
1654 /* if all info is available, we can stop */
1655 for(i = 0;i < ic->nb_streams; i++) {
1656 st = ic->streams[i];
1657 if (st->duration == AV_NOPTS_VALUE)
1658 break;
1659 }
1660 if (i == ic->nb_streams)
1661 break;
1662
1663 ret = av_read_packet(ic, pkt);
1664 if (ret != 0)
1665 break;
1666 read_size += pkt->size;
1667 st = ic->streams[pkt->stream_index];
1668 if (pkt->pts != AV_NOPTS_VALUE) {
1669 end_time = pkt->pts;
1670 duration = end_time - st->start_time;
1671 if (duration > 0) {
1672 if (st->duration == AV_NOPTS_VALUE ||
1673 st->duration < duration)
1674 st->duration = duration;
1675 }
1676 }
1677 av_free_packet(pkt);
1678 }
1679
1680 fill_all_stream_timings(ic);
1681
1682 url_fseek(&ic->pb, old_offset, SEEK_SET);
1683 }
1684
1685 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1686 {
1687 int64_t file_size;
1688
1689 /* get the file size, if possible */
1690 if (ic->iformat->flags & AVFMT_NOFILE) {
1691 file_size = 0;
1692 } else {
1693 file_size = url_fsize(&ic->pb);
1694 if (file_size < 0)
1695 file_size = 0;
1696 }
1697 ic->file_size = file_size;
1698
1699 if ((!strcmp(ic->iformat->name, "mpeg") ||
1700 !strcmp(ic->iformat->name, "mpegts")) &&
1701 file_size && !ic->pb.is_streamed) {
1702 /* get accurate estimate from the PTSes */
1703 av_estimate_timings_from_pts(ic, old_offset);
1704 } else if (av_has_timings(ic)) {
1705 /* at least one components has timings - we use them for all
1706 the components */
1707 fill_all_stream_timings(ic);
1708 } else {
1709 /* less precise: use bit rate info */
1710 av_estimate_timings_from_bit_rate(ic);
1711 }
1712 av_update_stream_timings(ic);
1713
1714 #if 0
1715 {
1716 int i;
1717 AVStream *st;
1718 for(i = 0;i < ic->nb_streams; i++) {
1719 st = ic->streams[i];
1720 printf("%d: start_time: %0.3f duration: %0.3f\n",
1721 i, (double)st->start_time / AV_TIME_BASE,
1722 (double)st->duration / AV_TIME_BASE);
1723 }
1724 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1725 (double)ic->start_time / AV_TIME_BASE,
1726 (double)ic->duration / AV_TIME_BASE,
1727 ic->bit_rate / 1000);
1728 }
1729 #endif
1730 }
1731
1732 static int has_codec_parameters(AVCodecContext *enc)
1733 {
1734 int val;
1735 switch(enc->codec_type) {
1736 case CODEC_TYPE_AUDIO:
1737 val = enc->sample_rate;
1738 break;
1739 case CODEC_TYPE_VIDEO:
1740 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1741 break;
1742 default:
1743 val = 1;
1744 break;
1745 }
1746 return (val != 0);
1747 }
1748
1749 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1750 {
1751 int16_t *samples;
1752 AVCodec *codec;
1753 int got_picture, ret=0;
1754 AVFrame picture;
1755
1756 if(!st->codec->codec){
1757 codec = avcodec_find_decoder(st->codec->codec_id);
1758 if (!codec)
1759 return -1;
1760 ret = avcodec_open(st->codec, codec);
1761 if (ret < 0)
1762 return ret;
1763 }
1764
1765 if(!has_codec_parameters(st->codec)){
1766 switch(st->codec->codec_type) {
1767 case CODEC_TYPE_VIDEO:
1768 ret = avcodec_decode_video(st->codec, &picture,
1769 &got_picture, (uint8_t *)data, size);
1770 break;
1771 case CODEC_TYPE_AUDIO:
1772 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1773 if (!samples)
1774 goto fail;
1775 ret = avcodec_decode_audio(st->codec, samples,
1776 &got_picture, (uint8_t *)data, size);
1777 av_free(samples);
1778 break;
1779 default:
1780 break;
1781 }
1782 }
1783 fail:
1784 return ret;
1785 }
1786
1787 /* absolute maximum size we read until we abort */
1788 #define MAX_READ_SIZE 5000000
1789
1790 #define MAX_STD_TIMEBASES (60*12+5)
1791 static int get_std_framerate(int i){
1792 if(i<60*12) return i*1001;
1793 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1794 }
1795
1796 /**
1797 * Read packets of a media file to get stream information. This
1798 * is useful for file formats with no headers such as MPEG. This
1799 * function also computes the real frame rate in case of mpeg2 repeat
1800 * frame mode.
1801 * The logical file position is not changed by this function;
1802 * examined packets may be buffered for later processing.
1803 *
1804 * @param ic media file handle
1805 * @return >=0 if OK. AVERROR_xxx if error.
1806 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1807 */
1808 int av_find_stream_info(AVFormatContext *ic)
1809 {
1810 int i, count, ret, read_size, j;
1811 AVStream *st;
1812 AVPacket pkt1, *pkt;
1813 AVPacketList *pktl=NULL, **ppktl;
1814 int64_t last_dts[MAX_STREAMS];
1815 int duration_count[MAX_STREAMS]={0};
1816 double duration_error[MAX_STREAMS][MAX_STD_TIMEBASES]={{0}}; //FIXME malloc()?
1817 offset_t old_offset = url_ftell(&ic->pb);
1818
1819 for(i=0;i<ic->nb_streams;i++) {
1820 st = ic->streams[i];
1821 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1822 /* if(!st->time_base.num)
1823 st->time_base= */
1824 if(!st->codec->time_base.num)
1825 st->codec->time_base= st->time_base;
1826 }
1827 //only for the split stuff
1828 if (!st->parser) {
1829 st->parser = av_parser_init(st->codec->codec_id);
1830 if(st->need_parsing == 2 && st->parser){
1831 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1832 }
1833 }
1834 }
1835
1836 for(i=0;i<MAX_STREAMS;i++){
1837 last_dts[i]= AV_NOPTS_VALUE;
1838 }
1839
1840 count = 0;
1841 read_size = 0;
1842 ppktl = &ic->packet_buffer;
1843 for(;;) {
1844 /* check if one codec still needs to be handled */
1845 for(i=0;i<ic->nb_streams;i++) {
1846 st = ic->streams[i];
1847 if (!has_codec_parameters(st->codec))
1848 break;
1849 /* variable fps and no guess at the real fps */
1850 if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
1851 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1852 break;
1853 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1854 break;
1855 }
1856 if (i == ic->nb_streams) {
1857 /* NOTE: if the format has no header, then we need to read
1858 some packets to get most of the streams, so we cannot
1859 stop here */
1860 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1861 /* if we found the info for all the codecs, we can stop */
1862 ret = count;
1863 break;
1864 }
1865 }
1866 /* we did not get all the codec info, but we read too much data */
1867 if (read_size >= MAX_READ_SIZE) {
1868 ret = count;
1869 break;
1870 }
1871
1872 /* NOTE: a new stream can be added there if no header in file
1873 (AVFMTCTX_NOHEADER) */
1874 ret = av_read_frame_internal(ic, &pkt1);
1875 if (ret < 0) {
1876 /* EOF or error */
1877 ret = -1; /* we could not have all the codec parameters before EOF */
1878 for(i=0;i<ic->nb_streams;i++) {
1879 st = ic->streams[i];
1880 if (!has_codec_parameters(st->codec)){
1881 char buf[256];
1882 avcodec_string(buf, sizeof(buf), st->codec, 0);
1883 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1884 } else {
1885 ret = 0;
1886 }
1887 }
1888 break;
1889 }
1890
1891 pktl = av_mallocz(sizeof(AVPacketList));
1892 if (!pktl) {
1893 ret = AVERROR_NOMEM;
1894 break;
1895 }
1896
1897 /* add the packet in the buffered packet list */
1898 *ppktl = pktl;
1899 ppktl = &pktl->next;
1900
1901 pkt = &pktl->pkt;
1902 *pkt = pkt1;
1903
1904 /* duplicate the packet */
1905 if (av_dup_packet(pkt) < 0) {
1906 ret = AVERROR_NOMEM;
1907 break;
1908 }
1909
1910 read_size += pkt->size;
1911
1912 st = ic->streams[pkt->stream_index];
1913 if(st->codec_info_nb_frames>1) //FIXME move codec_info_nb_frames and codec_info_duration from AVStream into this func
1914 st->codec_info_duration += pkt->duration;
1915 if (pkt->duration != 0)
1916 st->codec_info_nb_frames++;
1917
1918 {
1919 int index= pkt->stream_index;
1920 int64_t last= last_dts[index];
1921 int64_t duration= pkt->dts - last;
1922
1923 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1924 double dur= duration * av_q2d(st->time_base);
1925
1926 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1927 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1928 if(duration_count[index] < 2)
1929 memset(duration_error, 0, sizeof(duration_error));
1930 for(i=1; i<MAX_STD_TIMEBASES; i++){
1931 int framerate= get_std_framerate(i);
1932 int ticks= lrintf(dur*framerate/(1001*12));
1933 double error= dur - ticks*1001*12/(double)framerate;
1934 duration_error[index][i] += error*error;
1935 }
1936 duration_count[index]++;
1937
1938 if(st->codec_info_nb_frames == 0 && 0)
1939 st->codec_info_duration += duration;
1940 }
1941 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1942 last_dts[pkt->stream_index]= pkt->dts;
1943 }
1944 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1945 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1946 if(i){
1947 st->codec->extradata_size= i;
1948 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1949 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1950 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1951 }
1952 }
1953
1954 /* if still no information, we try to open the codec and to
1955 decompress the frame. We try to avoid that in most cases as
1956 it takes longer and uses more memory. For MPEG4, we need to
1957 decompress for Quicktime. */
1958 if (!has_codec_parameters(st->codec) /*&&
1959 (st->codec->codec_id == CODEC_ID_FLV1 ||
1960 st->codec->codec_id == CODEC_ID_H264 ||
1961 st->codec->codec_id == CODEC_ID_H263 ||
1962 st->codec->codec_id == CODEC_ID_H261 ||
1963 st->codec->codec_id == CODEC_ID_VORBIS ||
1964 st->codec->codec_id == CODEC_ID_MJPEG ||
1965 st->codec->codec_id == CODEC_ID_PNG ||
1966 st->codec->codec_id == CODEC_ID_PAM ||
1967 st->codec->codec_id == CODEC_ID_PGM ||
1968 st->codec->codec_id == CODEC_ID_PGMYUV ||
1969 st->codec->codec_id == CODEC_ID_PBM ||
1970 st->codec->codec_id == CODEC_ID_PPM ||
1971 st->codec->codec_id == CODEC_ID_SHORTEN ||
1972 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1973 try_decode_frame(st, pkt->data, pkt->size);
1974
1975 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1976 break;
1977 }
1978 count++;
1979 }
1980
1981 // close codecs which where opened in try_decode_frame()
1982 for(i=0;i<ic->nb_streams;i++) {
1983 st = ic->streams[i];
1984 if(st->codec->codec)
1985 avcodec_close(st->codec);
1986 }
1987 for(i=0;i<ic->nb_streams;i++) {
1988 st = ic->streams[i];
1989 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1990 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1991 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1992
1993 if(duration_count[i]
1994 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&&
1995 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1996 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
1997 double best_error= 2*av_q2d(st->time_base);
1998 best_error= best_error*best_error*duration_count[i]*1000*12*30;
1999
2000 for(j=1; j<MAX_STD_TIMEBASES; j++){
2001 double error= duration_error[i][j] * get_std_framerate(j);
2002 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2003 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2004 if(error < best_error){
2005 best_error= error;
2006 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2007 }
2008 }
2009 }
2010
2011 if (!st->r_frame_rate.num){
2012 if( st->codec->time_base.den * (int64_t)st->time_base.num
2013 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2014 st->r_frame_rate.num = st->codec->time_base.den;
2015 st->r_frame_rate.den = st->codec->time_base.num;
2016 }else{
2017 st->r_frame_rate.num = st->time_base.den;
2018 st->r_frame_rate.den = st->time_base.num;
2019 }
2020 }
2021 }
2022 }
2023
2024 av_estimate_timings(ic, old_offset);
2025 #if 0
2026 /* correct DTS for b frame streams with no timestamps */
2027 for(i=0;i<ic->nb_streams;i++) {
2028 st = ic->streams[i];
2029 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2030 if(b-frames){
2031 ppktl = &ic->packet_buffer;
2032 while(ppkt1){
2033 if(ppkt1->stream_index != i)
2034 continue;
2035 if(ppkt1->pkt->dts < 0)
2036 break;
2037 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2038 break;
2039 ppkt1->pkt->dts -= delta;
2040 ppkt1= ppkt1->next;
2041 }
2042 if(ppkt1)
2043 continue;
2044 st->cur_dts -= delta;
2045 }
2046 }
2047 }
2048 #endif
2049 return ret;
2050 }
2051
2052 /*******************************************************/
2053
2054 /**
2055 * start playing a network based stream (e.g. RTSP stream) at the
2056 * current position
2057 */
2058 int av_read_play(AVFormatContext *s)
2059 {
2060 if (!s->iformat->read_play)
2061 return AVERROR_NOTSUPP;
2062 return s->iformat->read_play(s);
2063 }
2064
2065 /**
2066 * Pause a network based stream (e.g. RTSP stream).
2067 *
2068 * Use av_read_play() to resume it.
2069 */
2070 int av_read_pause(AVFormatContext *s)
2071 {
2072 if (!s->iformat->read_pause)
2073 return AVERROR_NOTSUPP;
2074 return s->iformat->read_pause(s);
2075 }
2076
2077 /**
2078 * Close a media file (but not its codecs).
2079 *
2080 * @param s media file handle
2081 */
2082 void av_close_input_file(AVFormatContext *s)
2083 {
2084 int i, must_open_file;
2085 AVStream *st;
2086
2087 /* free previous packet */
2088 if (s->cur_st && s->cur_st->parser)
2089 av_free_packet(&s->cur_pkt);
2090
2091 if (s->iformat->read_close)
2092 s->iformat->read_close(s);
2093 for(i=0;i<s->nb_streams;i++) {
2094 /* free all data in a stream component */
2095 st = s->streams[i];
2096 if (st->parser) {
2097 av_parser_close(st->parser);
2098 }
2099 av_free(st->index_entries);
2100 av_free(st->codec->extradata);
2101 av_free(st->codec);
2102 av_free(st);
2103 }
2104 flush_packet_queue(s);
2105 must_open_file = 1;
2106 if (s->iformat->flags & AVFMT_NOFILE) {
2107 must_open_file = 0;
2108 }
2109 if (must_open_file) {
2110 url_fclose(&s->pb);
2111 }
2112 av_freep(&s->priv_data);
2113 av_free(s);
2114 }
2115
2116 /**
2117 * Add a new stream to a media file.
2118 *
2119 * Can only be called in the read_header() function. If the flag
2120 * AVFMTCTX_NOHEADER is in the format context, then new streams
2121 * can be added in read_packet too.
2122 *
2123 * @param s media file handle
2124 * @param id file format dependent stream id
2125 */
2126 AVStream *av_new_stream(AVFormatContext *s, int id)
2127 {
2128 AVStream *st;
2129 int i;
2130
2131 if (s->nb_streams >= MAX_STREAMS)
2132 return NULL;
2133
2134 st = av_mallocz(sizeof(AVStream));
2135 if (!st)
2136 return NULL;
2137
2138 st->codec= avcodec_alloc_context();
2139 if (s->iformat) {
2140 /* no default bitrate if decoding */
2141 st->codec->bit_rate = 0;
2142 }
2143 st->index = s->nb_streams;
2144 st->id = id;
2145 st->start_time = AV_NOPTS_VALUE;
2146 st->duration = AV_NOPTS_VALUE;
2147 st->cur_dts = AV_NOPTS_VALUE;
2148
2149 /* default pts settings is MPEG like */
2150 av_set_pts_info(st, 33, 1, 90000);
2151 st->last_IP_pts = AV_NOPTS_VALUE;
2152 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2153 st->pts_buffer[i]= AV_NOPTS_VALUE;
2154
2155 s->streams[s->nb_streams++] = st;
2156 return st;
2157 }
2158
2159 /************************************************************/
2160 /* output media file */
2161
2162 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2163 {
2164 int ret;
2165
2166 if (s->oformat->priv_data_size > 0) {
2167 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2168 if (!s->priv_data)
2169 return AVERROR_NOMEM;
2170 } else
2171 s->priv_data = NULL;
2172
2173 if (s->oformat->set_parameters) {
2174 ret = s->oformat->set_parameters(s, ap);
2175 if (ret < 0)
2176 return ret;
2177 }
2178 return 0;
2179 }
2180
2181 /**
2182 * allocate the stream private data and write the stream header to an
2183 * output media file
2184 *
2185 * @param s media file handle
2186 * @return 0 if OK. AVERROR_xxx if error.
2187 */
2188 int av_write_header(AVFormatContext *s)
2189 {
2190 int ret, i;
2191 AVStream *st;
2192
2193 // some sanity checks
2194 for(i=0;i<s->nb_streams;i++) {
2195 st = s->streams[i];
2196
2197 switch (st->codec->codec_type) {
2198 case CODEC_TYPE_AUDIO:
2199 if(st->codec->sample_rate<=0){
2200 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2201 return -1;
2202 }
2203 break;
2204 case CODEC_TYPE_VIDEO:
2205 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2206 av_log(s, AV_LOG_ERROR, "time base not set\n");
2207 return -1;
2208 }
2209 if(st->codec->width<=0 || st->codec->height<=0){
2210 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2211 return -1;
2212 }
2213 break;
2214 }
2215
2216 if(s->oformat->codec_tag){
2217 if(st->codec->codec_tag){
2218 //FIXME
2219 //check that tag + id is in the table
2220 //if neither is in the table -> ok
2221 //if tag is in the table with another id -> FAIL
2222 //if id is in the table with another tag -> FAIL unless strict < ?
2223 }else
2224 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2225 }
2226 }
2227
2228 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2229 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2230 if (!s->priv_data)
2231 return AVERROR_NOMEM;
2232 }
2233
2234 if(s->oformat->write_header){
2235 ret = s->oformat->write_header(s);
2236 if (ret < 0)
2237 return ret;
2238 }
2239
2240 /* init PTS generation */
2241 for(i=0;i<s->nb_streams;i++) {
2242 int64_t den = AV_NOPTS_VALUE;
2243 st = s->streams[i];
2244
2245 switch (st->codec->codec_type) {
2246 case CODEC_TYPE_AUDIO:
2247 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2248 break;
2249 case CODEC_TYPE_VIDEO:
2250 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2251 break;
2252 default:
2253 break;
2254 }
2255 if (den != AV_NOPTS_VALUE) {
2256 if (den <= 0)
2257 return AVERROR_INVALIDDATA;
2258 av_frac_init(&st->pts, 0, 0, den);
2259 }
2260 }
2261 return 0;
2262 }
2263
2264 //FIXME merge with compute_pkt_fields
2265 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2266 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2267 int num, den, frame_size, i;
2268
2269 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2270
2271 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2272 return -1;*/
2273
2274 /* duration field */
2275 if (pkt->duration == 0) {
2276 compute_frame_duration(&num, &den, st, NULL, pkt);
2277 if (den && num) {
2278 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2279 }
2280 }
2281
2282 //XXX/FIXME this is a temporary hack until all encoders output pts
2283 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2284 pkt->dts=
2285 // pkt->pts= st->cur_dts;
2286 pkt->pts= st->pts.val;
2287 }
2288
2289 //calculate dts from pts
2290 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2291 st->pts_buffer[0]= pkt->pts;
2292 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2293 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2294 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2295 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2296
2297 pkt->dts= st->pts_buffer[0];
2298 }
2299
2300 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2301 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2302 return -1;
2303 }
2304 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2305 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2306 return -1;
2307 }
2308
2309 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2310 st->cur_dts= pkt->dts;
2311 st->pts.val= pkt->dts;
2312
2313 /* update pts */
2314 switch (st->codec->codec_type) {
2315 case CODEC_TYPE_AUDIO:
2316 frame_size = get_audio_frame_size(st->codec, pkt->size);
2317
2318 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2319 but it would be better if we had the real timestamps from the encoder */
2320 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2321 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2322 }
2323 break;
2324 case CODEC_TYPE_VIDEO:
2325 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2326 break;
2327 default:
2328 break;
2329 }
2330 return 0;
2331 }
2332
2333 static void truncate_ts(AVStream *st, AVPacket *pkt){
2334 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2335
2336 // if(pkt->dts < 0)
2337 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2338
2339 if (pkt->pts != AV_NOPTS_VALUE)
2340 pkt->pts &= pts_mask;
2341 if (pkt->dts != AV_NOPTS_VALUE)
2342 pkt->dts &= pts_mask;
2343 }
2344
2345 /**
2346 * Write a packet to an output media file.
2347 *
2348 * The packet shall contain one audio or video frame.
2349 * The packet must be correctly interleaved according to the container specification,
2350 * if not then av_interleaved_write_frame must be used
2351 *
2352 * @param s media file handle
2353 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2354 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2355 */
2356 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2357 {
2358 int ret;
2359
2360 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2361 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2362 return ret;
2363
2364 truncate_ts(s->streams[pkt->stream_index], pkt);
2365
2366 ret= s->oformat->write_packet(s, pkt);
2367 if(!ret)
2368 ret= url_ferror(&s->pb);
2369 return ret;
2370 }
2371
2372 /**
2373 * Interleave a packet per DTS in an output media file.
2374 *
2375 * Packets with pkt->destruct == av_destruct_packet will be freed inside this function,
2376 * so they cannot be used after it, note calling av_free_packet() on them is still safe.
2377 *
2378 * @param s media file handle
2379 * @param out the interleaved packet will be output here
2380 * @param in the input packet
2381 * @param flush 1 if no further packets are available as input and all
2382 * remaining packets should be output
2383 * @return 1 if a packet was output, 0 if no packet could be output,
2384 * < 0 if an error occured
2385 */
2386 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2387 AVPacketList *pktl, **next_point, *this_pktl;
2388 int stream_count=0;
2389 int streams[MAX_STREAMS];
2390
2391 if(pkt){
2392 AVStream *st= s->streams[ pkt->stream_index];
2393
2394 // assert(pkt->destruct != av_destruct_packet); //FIXME
2395
2396 this_pktl = av_mallocz(sizeof(AVPacketList));
2397 this_pktl->pkt= *pkt;
2398 if(pkt->destruct == av_destruct_packet)
2399 pkt->destruct= NULL; // non shared -> must keep original from being freed
2400 else
2401 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2402
2403 next_point = &s->packet_buffer;
2404 while(*next_point){
2405 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2406 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2407 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2408 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2409 break;
2410 next_point= &(*next_point)->next;
2411 }
2412 this_pktl->next= *next_point;
2413 *next_point= this_pktl;
2414 }
2415
2416 memset(streams, 0, sizeof(streams));
2417 pktl= s->packet_buffer;
2418 while(pktl){
2419 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2420 if(streams[ pktl->pkt.stream_index ] == 0)
2421 stream_count++;
2422 streams[ pktl->pkt.stream_index ]++;
2423 pktl= pktl->next;
2424 }
2425
2426 if(s->nb_streams == stream_count || (flush && stream_count)){
2427 pktl= s->packet_buffer;
2428 *out= pktl->pkt;
2429
2430 s->packet_buffer= pktl->next;
2431 av_freep(&pktl);
2432 return 1;
2433 }else{
2434 av_init_packet(out);
2435 return 0;
2436 }
2437 }
2438
2439 /**
2440 * Interleaves a AVPacket correctly so it can be muxed.
2441 * @param out the interleaved packet will be output here
2442 * @param in the input packet
2443 * @param flush 1 if no further packets are available as input and all
2444 * remaining packets should be output
2445 * @return 1 if a packet was output, 0 if no packet could be output,
2446 * < 0 if an error occured
2447 */
2448 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2449 if(s->oformat->interleave_packet)
2450 return s->oformat->interleave_packet(s, out, in, flush);
2451 else
2452 return av_interleave_packet_per_dts(s, out, in, flush);
2453 }
2454
2455 /**
2456 * Writes a packet to an output media file ensuring correct interleaving.
2457 *
2458 * The packet must contain one audio or video frame.
2459 * If the packets are already correctly interleaved the application should
2460 * call av_write_frame() instead as its slightly faster, its also important
2461 * to keep in mind that completly non interleaved input will need huge amounts
2462 * of memory to interleave with this, so its prefereable to interleave at the
2463 * demuxer level
2464 *
2465 * @param s media file handle
2466 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2467 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2468 */
2469 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2470 AVStream *st= s->streams[ pkt->stream_index];
2471
2472 //FIXME/XXX/HACK drop zero sized packets
2473 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2474 return 0;
2475
2476 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2477 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2478 return -1;
2479
2480 if(pkt->dts == AV_NOPTS_VALUE)
2481 return -1;
2482
2483 for(;;){
2484 AVPacket opkt;
2485 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2486 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2487 return ret;
2488
2489 truncate_ts(s->streams[opkt.stream_index], &opkt);
2490 ret= s->oformat->write_packet(s, &opkt);
2491
2492 av_free_packet(&opkt);
2493 pkt= NULL;
2494
2495 if(ret<0)
2496 return ret;
2497 if(url_ferror(&s->pb))
2498 return url_ferror(&s->pb);
2499 }
2500 }
2501
2502 /**
2503 * @brief Write the stream trailer to an output media file and
2504 * free the file private data.
2505 *
2506 * @param s media file handle
2507 * @return 0 if OK. AVERROR_xxx if error.
2508 */
2509 int av_write_trailer(AVFormatContext *s)
2510 {
2511 int ret, i;
2512
2513 for(;;){
2514 AVPacket pkt;
2515 ret= av_interleave_packet(s, &pkt, NULL, 1);
2516 if(ret<0) //FIXME cleanup needed for ret<0 ?
2517 goto fail;
2518 if(!ret)
2519 break;
2520
2521 truncate_ts(s->streams[pkt.stream_index], &pkt);
2522 ret= s->oformat->write_packet(s, &pkt);
2523
2524 av_free_packet(&pkt);
2525
2526 if(ret<0)
2527 goto fail;
2528 if(url_ferror(&s->pb))
2529 goto fail;
2530 }
2531
2532 if(s->oformat->write_trailer)
2533 ret = s->oformat->write_trailer(s);
2534 fail:
2535 if(ret == 0)
2536 ret=url_ferror(&s->pb);
2537 for(i=0;i<s->nb_streams;i++)
2538 av_freep(&s->streams[i]->priv_data);
2539 av_freep(&s->priv_data);
2540 return ret;
2541 }
2542
2543 /* "user interface" functions */
2544
2545 void dump_format(AVFormatContext *ic,
2546 int index,
2547 const char *url,
2548 int is_output)
2549 {
2550 int i, flags;
2551 char buf[256];
2552
2553 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2554 is_output ? "Output" : "Input",
2555 index,
2556 is_output ? ic->oformat->name : ic->iformat->name,
2557 is_output ? "to" : "from", url);
2558 if (!is_output) {
2559 av_log(NULL, AV_LOG_INFO, " Duration: ");
2560 if (ic->duration != AV_NOPTS_VALUE) {
2561 int hours, mins, secs, us;
2562 secs = ic->duration / AV_TIME_BASE;
2563 us = ic->duration % AV_TIME_BASE;
2564 mins = secs / 60;
2565 secs %= 60;
2566 hours = mins / 60;
2567 mins %= 60;
2568 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2569 (10 * us) / AV_TIME_BASE);
2570 } else {
2571 av_log(NULL, AV_LOG_INFO, "N/A");
2572 }
2573 if (ic->start_time != AV_NOPTS_VALUE) {
2574 int secs, us;
2575 av_log(NULL, AV_LOG_INFO, ", start: ");
2576 secs = ic->start_time / AV_TIME_BASE;
2577 us = ic->start_time % AV_TIME_BASE;
2578 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2579 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2580 }
2581 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2582 if (ic->bit_rate) {
2583 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2584 } else {
2585 av_log(NULL, AV_LOG_INFO, "N/A");
2586 }
2587 av_log(NULL, AV_LOG_INFO, "\n");
2588 }
2589 for(i=0;i<ic->nb_streams;i++) {
2590 AVStream *st = ic->streams[i];
2591 int g= ff_gcd(st->time_base.num, st->time_base.den);
2592 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2593 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2594 /* the pid is an important information, so we display it */
2595 /* XXX: add a generic system */
2596 if (is_output)
2597 flags = ic->oformat->flags;
2598 else
2599 flags = ic->iformat->flags;
2600 if (flags & AVFMT_SHOW_IDS) {
2601 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2602 }
2603 if (strlen(st->language) > 0) {
2604 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2605 }
2606 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2607 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2608 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2609 if(st->r_frame_rate.den && st->r_frame_rate.num)
2610 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2611 /* else if(st->time_base.den && st->time_base.num)
2612 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2613 else
2614 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2615 }
2616 av_log(NULL, AV_LOG_INFO, "\n");
2617 }
2618 }
2619
2620 typedef struct {
2621 const char *abv;
2622 int width, height;
2623 int frame_rate, frame_rate_base;
2624 } AbvEntry;
2625
2626 static AbvEntry frame_abvs[] = {
2627 { "ntsc", 720, 480, 30000, 1001 },
2628 { "pal", 720, 576, 25, 1 },
2629 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2630 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2631 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2632 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2633 { "film", 352, 240, 24, 1 },
2634 { "ntsc-film", 352, 240, 24000, 1001 },
2635 { "sqcif", 128, 96, 0, 0 },
2636 { "qcif", 176, 144, 0, 0 },
2637 { "cif", 352, 288, 0, 0 },
2638 { "4cif", 704, 576, 0, 0 },
2639 };
2640
2641 /**
2642 * parses width and height out of string str.
2643 */
2644 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2645 {
2646 int i;
2647 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2648 const char *p;
2649 int frame_width = 0, frame_height = 0;
2650
2651 for(i=0;i<n;i++) {
2652 if (!strcmp(frame_abvs[i].abv, str)) {
2653 frame_width = frame_abvs[i].width;
2654 frame_height = frame_abvs[i].height;
2655 break;
2656 }
2657 }
2658 if (i == n) {
2659 p = str;
2660 frame_width = strtol(p, (char **)&p, 10);
2661 if (*p)
2662 p++;
2663 frame_height = strtol(p, (char **)&p, 10);
2664 }
2665 if (frame_width <= 0 || frame_height <= 0)
2666 return -1;
2667 *width_ptr = frame_width;
2668 *height_ptr = frame_height;
2669 return 0;
2670 }
2671
2672 /**
2673 * Converts frame rate from string to a fraction.
2674 *
2675 * First we try to get an exact integer or fractional frame rate.
2676 * If this fails we convert the frame rate to a double and return
2677 * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
2678 */
2679 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2680 {
2681 int i;
2682 char* cp;
2683
2684 /* First, we check our abbreviation table */
2685 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2686 if (!strcmp(frame_abvs[i].abv, arg)) {
2687 *frame_rate = frame_abvs[i].frame_rate;
2688 *frame_rate_base = frame_abvs[i].frame_rate_base;
2689 return 0;
2690 }
2691
2692 /* Then, we try to parse it as fraction */
2693 cp = strchr(arg, '/');
2694 if (!cp)
2695 cp = strchr(arg, ':');
2696 if (cp) {
2697 char* cpp;
2698 *frame_rate = strtol(arg, &cpp, 10);
2699 if (cpp != arg || cpp == cp)
2700 *frame_rate_base = strtol(cp+1, &cpp, 10);
2701 else
2702 *frame_rate = 0;
2703 }
2704 else {
2705 /* Finally we give up and parse it as double */
2706 AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
2707 *frame_rate_base = time_base.den;
2708 *frame_rate = time_base.num;
2709 }
2710 if (!*frame_rate || !*frame_rate_base)
2711 return -1;
2712 else
2713 return 0;
2714 }
2715
2716 /**
2717 * Converts date string to number of seconds since Jan 1st, 1970.
2718 *
2719 * @code
2720 * Syntax:
2721 * - If not a duration:
2722 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2723 * Time is localtime unless Z is suffixed to the end. In this case GMT
2724 * Return the date in micro seconds since 1970
2725 *
2726 * - If a duration:
2727 * HH[:MM[:SS[.m...]]]
2728 * S+[.m...]
2729 * @endcode
2730 */
2731 #ifndef CONFIG_WINCE
2732 int64_t parse_date(const char *datestr, int duration)
2733 {
2734 const char *p;
2735 int64_t t;
2736 struct tm dt;
2737 int i;
2738 static const char *date_fmt[] = {
2739 "%Y-%m-%d",
2740 "%Y%m%d",
2741 };
2742 static const char *time_fmt[] = {
2743 "%H:%M:%S",
2744 "%H%M%S",
2745 };
2746 const char *q;
2747 int is_utc, len;
2748 char lastch;
2749 int negative = 0;
2750
2751 #undef time
2752 time_t now = time(0);
2753
2754 len = strlen(datestr);
2755 if (len > 0)
2756 lastch = datestr[len - 1];
2757 else
2758 lastch = '\0';
2759 is_utc = (lastch == 'z' || lastch == 'Z');
2760
2761 memset(&dt, 0, sizeof(dt));
2762
2763 p = datestr;
2764 q = NULL;
2765 if (!duration) {
2766 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2767 q = small_strptime(p, date_fmt[i], &dt);
2768 if (q) {
2769 break;
2770 }
2771 }
2772
2773 if (!q) {
2774 if (is_utc) {
2775 dt = *gmtime(&now);
2776 } else {
2777 dt = *localtime(&now);
2778 }
2779 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2780 } else {
2781 p = q;
2782 }
2783
2784 if (*p == 'T' || *p == 't' || *p == ' ')
2785 p++;
2786
2787 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2788 q = small_strptime(p, time_fmt[i], &dt);
2789 if (q) {
2790 break;
2791 }
2792 }
2793 } else {
2794 if (p[0] == '-') {
2795 negative = 1;
2796 ++p;
2797 }
2798 q = small_strptime(p, time_fmt[0], &dt);
2799 if (!q) {
2800 dt.tm_sec = strtol(p, (char **)&q, 10);
2801 dt.tm_min = 0;
2802 dt.tm_hour = 0;
2803 }
2804 }
2805
2806 /* Now we have all the fields that we can get */
2807 if (!q) {
2808 if (duration)
2809 return 0;
2810 else
2811 return now * INT64_C(1000000);
2812 }
2813
2814 if (duration) {
2815 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2816 } else {
2817 dt.tm_isdst = -1; /* unknown */
2818 if (is_utc) {
2819 t = mktimegm(&dt);
2820 } else {
2821 t = mktime(&dt);
2822 }
2823 }
2824
2825 t *= 1000000;
2826
2827 if (*q == '.') {
2828 int val, n;
2829 q++;
2830 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2831 if (!isdigit(*q))
2832 break;
2833 val += n * (*q - '0');
2834 }
2835 t += val;
2836 }
2837 return negative ? -t : t;
2838 }
2839 #endif /* CONFIG_WINCE */
2840
2841 /**
2842 * Attempts to find a specific tag in a URL.
2843 *
2844 * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
2845 * Return 1 if found.
2846 */
2847 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2848 {
2849 const char *p;
2850 char tag[128], *q;
2851
2852 p = info;
2853 if (*p == '?')
2854 p++;
2855 for(;;) {
2856 q = tag;
2857 while (*p != '\0' && *p != '=' && *p != '&') {
2858 if ((q - tag) < sizeof(tag) - 1)
2859 *q++ = *p;
2860 p++;
2861 }
2862 *q = '\0';
2863 q = arg;
2864 if (*p == '=') {
2865 p++;
2866 while (*p != '&' && *p != '\0') {
2867 if ((q - arg) < arg_size - 1) {
2868 if (*p == '+')
2869 *q++ = ' ';
2870 else
2871 *q++ = *p;
2872 }
2873 p++;
2874 }
2875 *q = '\0';
2876 }
2877 if (!strcmp(tag, tag1))
2878 return 1;
2879 if (*p != '&')
2880 break;
2881 p++;
2882 }
2883 return 0;
2884 }
2885
2886 /**
2887 * Returns in 'buf' the path with '%d' replaced by number.
2888
2889 * Also handles the '%0nd' format where 'n' is the total number
2890 * of digits and '%%'.
2891 *
2892 * @param buf destination buffer
2893 * @param buf_size destination buffer size
2894 * @param path numbered sequence string
2895 * @number frame number
2896 * @return 0 if OK, -1 if format error.
2897 */
2898 int av_get_frame_filename(char *buf, int buf_size,
2899 const char *path, int number)
2900 {
2901 const char *p;
2902 char *q, buf1[20], c;
2903 int nd, len, percentd_found;
2904
2905 q = buf;
2906 p = path;
2907 percentd_found = 0;
2908 for(;;) {
2909 c = *p++;
2910 if (c == '\0')
2911 break;
2912 if (c == '%') {
2913 do {
2914 nd = 0;
2915 while (isdigit(*p)) {
2916 nd = nd * 10 + *p++ - '0';
2917 }
2918 c = *p++;
2919 } while (isdigit(c));
2920
2921 switch(c) {
2922 case '%':
2923 goto addchar;
2924 case 'd':
2925 if (percentd_found)
2926 goto fail;
2927 percentd_found = 1;
2928 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2929 len = strlen(buf1);
2930 if ((q - buf + len) > buf_size - 1)
2931 goto fail;
2932 memcpy(q, buf1, len);
2933 q += len;
2934 break;
2935 default:
2936 goto fail;
2937 }
2938 } else {
2939 addchar:
2940 if ((q - buf) < buf_size - 1)
2941 *q++ = c;
2942 }
2943 }
2944 if (!percentd_found)
2945 goto fail;
2946 *q = '\0';
2947 return 0;
2948 fail:
2949 *q = '\0';
2950 return -1;
2951 }
2952
2953 /**
2954 * Print nice hexa dump of a buffer
2955 * @param f stream for output
2956 * @param buf buffer
2957 * @param size buffer size
2958 */
2959 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2960 {
2961 int len, i, j, c;
2962
2963 for(i=0;i<size;i+=16) {
2964 len = size - i;
2965 if (len > 16)
2966 len = 16;
2967 fprintf(f, "%08x ", i);
2968 for(j=0;j<16;j++) {
2969 if (j < len)
2970 fprintf(f, " %02x", buf[i+j]);
2971 else
2972 fprintf(f, " ");
2973 }
2974 fprintf(f, " ");
2975 for(j=0;j<len;j++) {
2976 c = buf[i+j];
2977 if (c < ' ' || c > '~')
2978 c = '.';
2979 fprintf(f, "%c", c);
2980 }
2981 fprintf(f, "\n");
2982 }
2983 }
2984
2985 /**
2986 * Print on 'f' a nice dump of a packet
2987 * @param f stream for output
2988 * @param pkt packet to dump
2989 * @param dump_payload true if the payload must be displayed too
2990 */
2991 //FIXME needs to know the time_base
2992 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2993 {
2994 fprintf(f, "stream #%d:\n", pkt->stream_index);
2995 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2996 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2997 /* DTS is _always_ valid after av_read_frame() */
2998 fprintf(f, " dts=");
2999 if (pkt->dts == AV_NOPTS_VALUE)
3000 fprintf(f, "N/A");
3001 else
3002 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
3003 /* PTS may be not known if B frames are present */
3004 fprintf(f, " pts=");
3005 if (pkt->pts == AV_NOPTS_VALUE)
3006 fprintf(f, "N/A");
3007 else
3008 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
3009 fprintf(f, "\n");
3010 fprintf(f, " size=%d\n", pkt->size);
3011 if (dump_payload)
3012 av_hex_dump(f, pkt->data, pkt->size);
3013 }
3014
3015 void url_split(char *proto, int proto_size,
3016 char *authorization, int authorization_size,
3017 char *hostname, int hostname_size,
3018 int *port_ptr,
3019 char *path, int path_size,
3020 const char *url)
3021 {
3022 const char *p;
3023 char *q;
3024 int port;
3025
3026 port = -1;
3027
3028 p = url;
3029 q = proto;
3030 while (*p != ':' && *p != '\0') {
3031 if ((q - proto) < proto_size - 1)
3032 *q++ = *p;
3033 p++;
3034 }
3035 if (proto_size > 0)
3036 *q = '\0';
3037 if (authorization_size > 0)
3038 authorization[0] = '\0';
3039 if (*p == '\0') {
3040 if (proto_size > 0)
3041 proto[0] = '\0';
3042 if (hostname_size > 0)
3043 hostname[0] = '\0';
3044 p = url;
3045 } else {
3046 char *at,*slash; // PETR: position of '@' character and '/' character
3047
3048 p++;
3049 if (*p == '/')
3050 p++;
3051 if (*p == '/')
3052 p++;
3053 at = strchr(p,'@'); // PETR: get the position of '@'
3054 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
3055 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
3056
3057 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
3058
3059 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
3060 if (*p == '@') { // PETR: passed '@'
3061 if (authorization_size > 0)
3062 *q = '\0';
3063 q = hostname;
3064 at = NULL;
3065 } else if (!at) { // PETR: hostname
3066 if ((q - hostname) < hostname_size - 1)
3067 *q++ = *p;
3068 } else {
3069 if ((q - authorization) < authorization_size - 1)
3070 *q++ = *p;
3071 }
3072 p++;
3073 }
3074 if (hostname_size > 0)
3075 *q = '\0';
3076 if (*p == ':') {
3077 p++;
3078 port = strtoul(p, (char **)&p, 10);
3079 }
3080 }
3081 if (port_ptr)
3082 *port_ptr = port;
3083 pstrcpy(path, path_size, p);
3084 }
3085
3086 /**
3087 * Set the pts for a given stream.
3088 *
3089 * @param s stream
3090 * @param pts_wrap_bits number of bits effectively used by the pts
3091 * (used for wrap control, 33 is the value for MPEG)
3092 * @param pts_num numerator to convert to seconds (MPEG: 1)
3093 * @param pts_den denominator to convert to seconds (MPEG: 90000)
3094 */
3095 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3096 int pts_num, int pts_den)
3097 {
3098 s->pts_wrap_bits = pts_wrap_bits;
3099 s->time_base.num = pts_num;
3100 s->time_base.den = pts_den;
3101 }
3102
3103 /* fraction handling */
3104
3105 /**
3106 * f = val + (num / den) + 0.5.
3107 *
3108 * 'num' is normalized so that it is such as 0 <= num < den.
3109 *
3110 * @param f fractional number
3111 * @param val integer value
3112 * @param num must be >= 0
3113 * @param den must be >= 1
3114 */
3115 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3116 {
3117 num += (den >> 1);
3118 if (num >= den) {
3119 val += num / den;
3120 num = num % den;
3121 }
3122 f->val = val;
3123 f->num = num;
3124 f->den = den;
3125 }
3126
3127 /**
3128 * Fractionnal addition to f: f = f + (incr / f->den).
3129 *
3130 * @param f fractional number
3131 * @param incr increment, can be positive or negative
3132 */
3133 static void av_frac_add(AVFrac *f, int64_t incr)
3134 {
3135 int64_t num, den;
3136
3137 num = f->num + incr;
3138 den = f->den;
3139 if (num < 0) {
3140 f->val += num / den;
3141 num = num % den;
3142 if (num < 0) {
3143 num += den;
3144 f->val--;
3145 }
3146 } else if (num >= den) {
3147 f->val += num / den;
3148 num = num % den;
3149 }
3150 f->num = num;
3151 }