Use proper PRI?64 macros for %ll? format to fix printf format warnings:
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "allformats.h"
23 #include "opt.h"
24
25 #undef NDEBUG
26 #include <assert.h>
27
28 /**
29 * @file libavformat/utils.c
30 * Various utility functions for using ffmpeg library.
31 */
32
33 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
34 static void av_frac_add(AVFrac *f, int64_t incr);
35 static void av_frac_set(AVFrac *f, int64_t val);
36
37 /** head of registered input format linked list. */
38 AVInputFormat *first_iformat = NULL;
39 /** head of registered output format linked list. */
40 AVOutputFormat *first_oformat = NULL;
41 /** head of registered image format linked list. */
42 AVImageFormat *first_image_format = NULL;
43
44 void av_register_input_format(AVInputFormat *format)
45 {
46 AVInputFormat **p;
47 p = &first_iformat;
48 while (*p != NULL) p = &(*p)->next;
49 *p = format;
50 format->next = NULL;
51 }
52
53 void av_register_output_format(AVOutputFormat *format)
54 {
55 AVOutputFormat **p;
56 p = &first_oformat;
57 while (*p != NULL) p = &(*p)->next;
58 *p = format;
59 format->next = NULL;
60 }
61
62 int match_ext(const char *filename, const char *extensions)
63 {
64 const char *ext, *p;
65 char ext1[32], *q;
66
67 if(!filename)
68 return 0;
69
70 ext = strrchr(filename, '.');
71 if (ext) {
72 ext++;
73 p = extensions;
74 for(;;) {
75 q = ext1;
76 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
77 *q++ = *p++;
78 *q = '\0';
79 if (!strcasecmp(ext1, ext))
80 return 1;
81 if (*p == '\0')
82 break;
83 p++;
84 }
85 }
86 return 0;
87 }
88
89 AVOutputFormat *guess_format(const char *short_name, const char *filename,
90 const char *mime_type)
91 {
92 AVOutputFormat *fmt, *fmt_found;
93 int score_max, score;
94
95 /* specific test for image sequences */
96 #ifdef CONFIG_IMAGE2_MUXER
97 if (!short_name && filename &&
98 av_filename_number_test(filename) &&
99 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
100 return guess_format("image2", NULL, NULL);
101 }
102 #endif
103 if (!short_name && filename &&
104 av_filename_number_test(filename) &&
105 guess_image_format(filename)) {
106 return guess_format("image", NULL, NULL);
107 }
108
109 /* find the proper file type */
110 fmt_found = NULL;
111 score_max = 0;
112 fmt = first_oformat;
113 while (fmt != NULL) {
114 score = 0;
115 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
116 score += 100;
117 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
118 score += 10;
119 if (filename && fmt->extensions &&
120 match_ext(filename, fmt->extensions)) {
121 score += 5;
122 }
123 if (score > score_max) {
124 score_max = score;
125 fmt_found = fmt;
126 }
127 fmt = fmt->next;
128 }
129 return fmt_found;
130 }
131
132 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
133 const char *mime_type)
134 {
135 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
136
137 if (fmt) {
138 AVOutputFormat *stream_fmt;
139 char stream_format_name[64];
140
141 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
142 stream_fmt = guess_format(stream_format_name, NULL, NULL);
143
144 if (stream_fmt)
145 fmt = stream_fmt;
146 }
147
148 return fmt;
149 }
150
151 /**
152 * Guesses the codec id based upon muxer and filename.
153 */
154 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
155 const char *filename, const char *mime_type, enum CodecType type){
156 if(type == CODEC_TYPE_VIDEO){
157 enum CodecID codec_id= CODEC_ID_NONE;
158
159 #ifdef CONFIG_IMAGE2_MUXER
160 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
161 codec_id= av_guess_image2_codec(filename);
162 }
163 #endif
164 if(codec_id == CODEC_ID_NONE)
165 codec_id= fmt->video_codec;
166 return codec_id;
167 }else if(type == CODEC_TYPE_AUDIO)
168 return fmt->audio_codec;
169 else
170 return CODEC_ID_NONE;
171 }
172
173 /**
174 * finds AVInputFormat based on input format's short name.
175 */
176 AVInputFormat *av_find_input_format(const char *short_name)
177 {
178 AVInputFormat *fmt;
179 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
180 if (!strcmp(fmt->name, short_name))
181 return fmt;
182 }
183 return NULL;
184 }
185
186 /* memory handling */
187
188 /**
189 * Default packet destructor.
190 */
191 void av_destruct_packet(AVPacket *pkt)
192 {
193 av_free(pkt->data);
194 pkt->data = NULL; pkt->size = 0;
195 }
196
197 /**
198 * Allocate the payload of a packet and intialized its fields to default values.
199 *
200 * @param pkt packet
201 * @param size wanted payload size
202 * @return 0 if OK. AVERROR_xxx otherwise.
203 */
204 int av_new_packet(AVPacket *pkt, int size)
205 {
206 void *data;
207 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
208 return AVERROR_NOMEM;
209 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
210 if (!data)
211 return AVERROR_NOMEM;
212 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
213
214 av_init_packet(pkt);
215 pkt->data = data;
216 pkt->size = size;
217 pkt->destruct = av_destruct_packet;
218 return 0;
219 }
220
221 /**
222 * Allocate and read the payload of a packet and intialized its fields to default values.
223 *
224 * @param pkt packet
225 * @param size wanted payload size
226 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
227 */
228 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
229 {
230 int ret= av_new_packet(pkt, size);
231
232 if(ret<0)
233 return ret;
234
235 pkt->pos= url_ftell(s);
236
237 ret= get_buffer(s, pkt->data, size);
238 if(ret<=0)
239 av_free_packet(pkt);
240 else
241 pkt->size= ret;
242
243 return ret;
244 }
245
246 /* This is a hack - the packet memory allocation stuff is broken. The
247 packet is allocated if it was not really allocated */
248 int av_dup_packet(AVPacket *pkt)
249 {
250 if (pkt->destruct != av_destruct_packet) {
251 uint8_t *data;
252 /* we duplicate the packet and don't forget to put the padding
253 again */
254 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
255 return AVERROR_NOMEM;
256 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
257 if (!data) {
258 return AVERROR_NOMEM;
259 }
260 memcpy(data, pkt->data, pkt->size);
261 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
262 pkt->data = data;
263 pkt->destruct = av_destruct_packet;
264 }
265 return 0;
266 }
267
268 /**
269 * Allocate the payload of a packet and intialized its fields to default values.
270 *
271 * @param filename possible numbered sequence string
272 * @return 1 if a valid numbered sequence string, 0 otherwise.
273 */
274 int av_filename_number_test(const char *filename)
275 {
276 char buf[1024];
277 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
278 }
279
280 /**
281 * Guess file format.
282 */
283 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
284 {
285 AVInputFormat *fmt1, *fmt;
286 int score, score_max;
287
288 fmt = NULL;
289 score_max = 0;
290 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
291 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
292 continue;
293 score = 0;
294 if (fmt1->read_probe) {
295 score = fmt1->read_probe(pd);
296 } else if (fmt1->extensions) {
297 if (match_ext(pd->filename, fmt1->extensions)) {
298 score = 50;
299 }
300 }
301 if (score > score_max) {
302 score_max = score;
303 fmt = fmt1;
304 }
305 }
306 return fmt;
307 }
308
309 /************************************************************/
310 /* input media file */
311
312 /**
313 * Open a media file from an IO stream. 'fmt' must be specified.
314 */
315 static const char* format_to_name(void* ptr)
316 {
317 AVFormatContext* fc = (AVFormatContext*) ptr;
318 if(fc->iformat) return fc->iformat->name;
319 else if(fc->oformat) return fc->oformat->name;
320 else return "NULL";
321 }
322
323 #define OFFSET(x) offsetof(AVFormatContext,x)
324 #define DEFAULT 0 //should be NAN but it doesnt work as its not a constant in glibc as required by ANSI/ISO C
325 //these names are too long to be readable
326 #define E AV_OPT_FLAG_ENCODING_PARAM
327 #define D AV_OPT_FLAG_DECODING_PARAM
328
329 static const AVOption options[]={
330 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
331 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
332 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
333 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D, "fflags"},
334 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
335 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
336 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
337 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
338 {NULL},
339 };
340
341 #undef E
342 #undef D
343 #undef DEFAULT
344
345 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
346
347 #if LIBAVFORMAT_VERSION_INT >= ((51<<16)+(0<<8)+0)
348 static
349 #endif
350 void avformat_get_context_defaults(AVFormatContext *s){
351 memset(s, 0, sizeof(AVFormatContext));
352
353 s->av_class = &av_format_context_class;
354
355 av_opt_set_defaults(s);
356 }
357
358 AVFormatContext *av_alloc_format_context(void)
359 {
360 AVFormatContext *ic;
361 ic = av_malloc(sizeof(AVFormatContext));
362 if (!ic) return ic;
363 avformat_get_context_defaults(ic);
364 ic->av_class = &av_format_context_class;
365 return ic;
366 }
367
368 /**
369 * Allocates all the structures needed to read an input stream.
370 * This does not open the needed codecs for decoding the stream[s].
371 */
372 int av_open_input_stream(AVFormatContext **ic_ptr,
373 ByteIOContext *pb, const char *filename,
374 AVInputFormat *fmt, AVFormatParameters *ap)
375 {
376 int err;
377 AVFormatContext *ic;
378 AVFormatParameters default_ap;
379
380 if(!ap){
381 ap=&default_ap;
382 memset(ap, 0, sizeof(default_ap));
383 }
384
385 if(!ap->prealloced_context)
386 ic = av_alloc_format_context();
387 else
388 ic = *ic_ptr;
389 if (!ic) {
390 err = AVERROR_NOMEM;
391 goto fail;
392 }
393 ic->iformat = fmt;
394 if (pb)
395 ic->pb = *pb;
396 ic->duration = AV_NOPTS_VALUE;
397 ic->start_time = AV_NOPTS_VALUE;
398 pstrcpy(ic->filename, sizeof(ic->filename), filename);
399
400 /* allocate private data */
401 if (fmt->priv_data_size > 0) {
402 ic->priv_data = av_mallocz(fmt->priv_data_size);
403 if (!ic->priv_data) {
404 err = AVERROR_NOMEM;
405 goto fail;
406 }
407 } else {
408 ic->priv_data = NULL;
409 }
410
411 err = ic->iformat->read_header(ic, ap);
412 if (err < 0)
413 goto fail;
414
415 if (pb)
416 ic->data_offset = url_ftell(&ic->pb);
417
418 *ic_ptr = ic;
419 return 0;
420 fail:
421 if (ic) {
422 av_freep(&ic->priv_data);
423 }
424 av_free(ic);
425 *ic_ptr = NULL;
426 return err;
427 }
428
429 /** Size of probe buffer, for guessing file type from file contents. */
430 #define PROBE_BUF_MIN 2048
431 #define PROBE_BUF_MAX (1<<20)
432
433 /**
434 * Open a media file as input. The codec are not opened. Only the file
435 * header (if present) is read.
436 *
437 * @param ic_ptr the opened media file handle is put here
438 * @param filename filename to open.
439 * @param fmt if non NULL, force the file format to use
440 * @param buf_size optional buffer size (zero if default is OK)
441 * @param ap additionnal parameters needed when opening the file (NULL if default)
442 * @return 0 if OK. AVERROR_xxx otherwise.
443 */
444 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
445 AVInputFormat *fmt,
446 int buf_size,
447 AVFormatParameters *ap)
448 {
449 int err, must_open_file, file_opened, probe_size;
450 AVProbeData probe_data, *pd = &probe_data;
451 ByteIOContext pb1, *pb = &pb1;
452
453 file_opened = 0;
454 pd->filename = "";
455 if (filename)
456 pd->filename = filename;
457 pd->buf = NULL;
458 pd->buf_size = 0;
459
460 if (!fmt) {
461 /* guess format if no file can be opened */
462 fmt = av_probe_input_format(pd, 0);
463 }
464
465 /* do not open file if the format does not need it. XXX: specific
466 hack needed to handle RTSP/TCP */
467 must_open_file = 1;
468 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
469 must_open_file = 0;
470 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
471 }
472
473 if (!fmt || must_open_file) {
474 /* if no file needed do not try to open one */
475 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
476 err = AVERROR_IO;
477 goto fail;
478 }
479 file_opened = 1;
480 if (buf_size > 0) {
481 url_setbufsize(pb, buf_size);
482 }
483
484 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
485 /* read probe data */
486 pd->buf= av_realloc(pd->buf, probe_size);
487 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
488 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
489 url_fclose(pb);
490 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
491 file_opened = 0;
492 err = AVERROR_IO;
493 goto fail;
494 }
495 }
496 /* guess file format */
497 fmt = av_probe_input_format(pd, 1);
498 }
499 av_freep(&pd->buf);
500 }
501
502 /* if still no format found, error */
503 if (!fmt) {
504 err = AVERROR_NOFMT;
505 goto fail;
506 }
507
508 /* XXX: suppress this hack for redirectors */
509 #ifdef CONFIG_NETWORK
510 if (fmt == &redir_demuxer) {
511 err = redir_open(ic_ptr, pb);
512 url_fclose(pb);
513 return err;
514 }
515 #endif
516
517 /* check filename in case of an image number is expected */
518 if (fmt->flags & AVFMT_NEEDNUMBER) {
519 if (!av_filename_number_test(filename)) {
520 err = AVERROR_NUMEXPECTED;
521 goto fail;
522 }
523 }
524 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
525 if (err)
526 goto fail;
527 return 0;
528 fail:
529 av_freep(&pd->buf);
530 if (file_opened)
531 url_fclose(pb);
532 *ic_ptr = NULL;
533 return err;
534
535 }
536
537 /*******************************************************/
538
539 /**
540 * Read a transport packet from a media file.
541 *
542 * This function is absolete and should never be used.
543 * Use av_read_frame() instead.
544 *
545 * @param s media file handle
546 * @param pkt is filled
547 * @return 0 if OK. AVERROR_xxx if error.
548 */
549 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
550 {
551 return s->iformat->read_packet(s, pkt);
552 }
553
554 /**********************************************************/
555
556 /**
557 * Get the number of samples of an audio frame. Return (-1) if error.
558 */
559 static int get_audio_frame_size(AVCodecContext *enc, int size)
560 {
561 int frame_size;
562
563 if (enc->frame_size <= 1) {
564 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
565
566 if (bits_per_sample) {
567 if (enc->channels == 0)
568 return -1;
569 frame_size = (size << 3) / (bits_per_sample * enc->channels);
570 } else {
571 /* used for example by ADPCM codecs */
572 if (enc->bit_rate == 0)
573 return -1;
574 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
575 }
576 } else {
577 frame_size = enc->frame_size;
578 }
579 return frame_size;
580 }
581
582
583 /**
584 * Return the frame duration in seconds, return 0 if not available.
585 */
586 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
587 AVCodecParserContext *pc, AVPacket *pkt)
588 {
589 int frame_size;
590
591 *pnum = 0;
592 *pden = 0;
593 switch(st->codec->codec_type) {
594 case CODEC_TYPE_VIDEO:
595 if(st->time_base.num*1000LL > st->time_base.den){
596 *pnum = st->time_base.num;
597 *pden = st->time_base.den;
598 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
599 *pnum = st->codec->time_base.num;
600 *pden = st->codec->time_base.den;
601 if (pc && pc->repeat_pict) {
602 *pden *= 2;
603 *pnum = (*pnum) * (2 + pc->repeat_pict);
604 }
605 }
606 break;
607 case CODEC_TYPE_AUDIO:
608 frame_size = get_audio_frame_size(st->codec, pkt->size);
609 if (frame_size < 0)
610 break;
611 *pnum = frame_size;
612 *pden = st->codec->sample_rate;
613 break;
614 default:
615 break;
616 }
617 }
618
619 static int is_intra_only(AVCodecContext *enc){
620 if(enc->codec_type == CODEC_TYPE_AUDIO){
621 return 1;
622 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
623 switch(enc->codec_id){
624 case CODEC_ID_MJPEG:
625 case CODEC_ID_MJPEGB:
626 case CODEC_ID_LJPEG:
627 case CODEC_ID_RAWVIDEO:
628 case CODEC_ID_DVVIDEO:
629 case CODEC_ID_HUFFYUV:
630 case CODEC_ID_FFVHUFF:
631 case CODEC_ID_ASV1:
632 case CODEC_ID_ASV2:
633 case CODEC_ID_VCR1:
634 return 1;
635 default: break;
636 }
637 }
638 return 0;
639 }
640
641 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
642 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
643 int64_t delta= last_ts - mask/2;
644 return ((lsb - delta)&mask) + delta;
645 }
646
647 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
648 AVCodecParserContext *pc, AVPacket *pkt)
649 {
650 int num, den, presentation_delayed;
651 /* handle wrapping */
652 if(st->cur_dts != AV_NOPTS_VALUE){
653 if(pkt->pts != AV_NOPTS_VALUE)
654 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
655 if(pkt->dts != AV_NOPTS_VALUE)
656 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
657 }
658
659 if (pkt->duration == 0) {
660 compute_frame_duration(&num, &den, st, pc, pkt);
661 if (den && num) {
662 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
663 }
664 }
665
666 if(is_intra_only(st->codec))
667 pkt->flags |= PKT_FLAG_KEY;
668
669 /* do we have a video B frame ? */
670 presentation_delayed = 0;
671 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
672 /* XXX: need has_b_frame, but cannot get it if the codec is
673 not initialized */
674 if (( st->codec->codec_id == CODEC_ID_H264
675 || st->codec->has_b_frames) &&
676 pc && pc->pict_type != FF_B_TYPE)
677 presentation_delayed = 1;
678 /* this may be redundant, but it shouldnt hurt */
679 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
680 presentation_delayed = 1;
681 }
682
683 if(st->cur_dts == AV_NOPTS_VALUE){
684 if(presentation_delayed) st->cur_dts = -pkt->duration;
685 else st->cur_dts = 0;
686 }
687
688 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
689 /* interpolate PTS and DTS if they are not present */
690 if (presentation_delayed) {
691 /* DTS = decompression time stamp */
692 /* PTS = presentation time stamp */
693 if (pkt->dts == AV_NOPTS_VALUE) {
694 /* if we know the last pts, use it */
695 if(st->last_IP_pts != AV_NOPTS_VALUE)
696 st->cur_dts = pkt->dts = st->last_IP_pts;
697 else
698 pkt->dts = st->cur_dts;
699 } else {
700 st->cur_dts = pkt->dts;
701 }
702 /* this is tricky: the dts must be incremented by the duration
703 of the frame we are displaying, i.e. the last I or P frame */
704 if (st->last_IP_duration == 0)
705 st->cur_dts += pkt->duration;
706 else
707 st->cur_dts += st->last_IP_duration;
708 st->last_IP_duration = pkt->duration;
709 st->last_IP_pts= pkt->pts;
710 /* cannot compute PTS if not present (we can compute it only
711 by knowing the futur */
712 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
713 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
714 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
715 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
716 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
717 pkt->pts += pkt->duration;
718 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
719 }
720 }
721
722 /* presentation is not delayed : PTS and DTS are the same */
723 if (pkt->pts == AV_NOPTS_VALUE) {
724 if (pkt->dts == AV_NOPTS_VALUE) {
725 pkt->pts = st->cur_dts;
726 pkt->dts = st->cur_dts;
727 }
728 else {
729 st->cur_dts = pkt->dts;
730 pkt->pts = pkt->dts;
731 }
732 } else {
733 st->cur_dts = pkt->pts;
734 pkt->dts = pkt->pts;
735 }
736 st->cur_dts += pkt->duration;
737 }
738 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
739
740 /* update flags */
741 if (pc) {
742 pkt->flags = 0;
743 /* key frame computation */
744 switch(st->codec->codec_type) {
745 case CODEC_TYPE_VIDEO:
746 if (pc->pict_type == FF_I_TYPE)
747 pkt->flags |= PKT_FLAG_KEY;
748 break;
749 case CODEC_TYPE_AUDIO:
750 pkt->flags |= PKT_FLAG_KEY;
751 break;
752 default:
753 break;
754 }
755 }
756 }
757
758 void av_destruct_packet_nofree(AVPacket *pkt)
759 {
760 pkt->data = NULL; pkt->size = 0;
761 }
762
763 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
764 {
765 AVStream *st;
766 int len, ret, i;
767
768 for(;;) {
769 /* select current input stream component */
770 st = s->cur_st;
771 if (st) {
772 if (!st->need_parsing || !st->parser) {
773 /* no parsing needed: we just output the packet as is */
774 /* raw data support */
775 *pkt = s->cur_pkt;
776 compute_pkt_fields(s, st, NULL, pkt);
777 s->cur_st = NULL;
778 break;
779 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
780 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
781 s->cur_ptr, s->cur_len,
782 s->cur_pkt.pts, s->cur_pkt.dts);
783 s->cur_pkt.pts = AV_NOPTS_VALUE;
784 s->cur_pkt.dts = AV_NOPTS_VALUE;
785 /* increment read pointer */
786 s->cur_ptr += len;
787 s->cur_len -= len;
788
789 /* return packet if any */
790 if (pkt->size) {
791 got_packet:
792 pkt->duration = 0;
793 pkt->stream_index = st->index;
794 pkt->pts = st->parser->pts;
795 pkt->dts = st->parser->dts;
796 pkt->destruct = av_destruct_packet_nofree;
797 compute_pkt_fields(s, st, st->parser, pkt);
798 break;
799 }
800 } else {
801 /* free packet */
802 av_free_packet(&s->cur_pkt);
803 s->cur_st = NULL;
804 }
805 } else {
806 /* read next packet */
807 ret = av_read_packet(s, &s->cur_pkt);
808 if (ret < 0) {
809 if (ret == -EAGAIN)
810 return ret;
811 /* return the last frames, if any */
812 for(i = 0; i < s->nb_streams; i++) {
813 st = s->streams[i];
814 if (st->parser && st->need_parsing) {
815 av_parser_parse(st->parser, st->codec,
816 &pkt->data, &pkt->size,
817 NULL, 0,
818 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
819 if (pkt->size)
820 goto got_packet;
821 }
822 }
823 /* no more packets: really terminates parsing */
824 return ret;
825 }
826
827 st = s->streams[s->cur_pkt.stream_index];
828 if(st->codec->debug & FF_DEBUG_PTS)
829 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
830 s->cur_pkt.stream_index,
831 s->cur_pkt.pts,
832 s->cur_pkt.dts,
833 s->cur_pkt.size);
834
835 s->cur_st = st;
836 s->cur_ptr = s->cur_pkt.data;
837 s->cur_len = s->cur_pkt.size;
838 if (st->need_parsing && !st->parser) {
839 st->parser = av_parser_init(st->codec->codec_id);
840 if (!st->parser) {
841 /* no parser available : just output the raw packets */
842 st->need_parsing = 0;
843 }else if(st->need_parsing == 2){
844 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
845 }
846 }
847 }
848 }
849 if(st->codec->debug & FF_DEBUG_PTS)
850 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
851 pkt->stream_index,
852 pkt->pts,
853 pkt->dts,
854 pkt->size);
855
856 return 0;
857 }
858
859 /**
860 * Return the next frame of a stream.
861 *
862 * The returned packet is valid
863 * until the next av_read_frame() or until av_close_input_file() and
864 * must be freed with av_free_packet. For video, the packet contains
865 * exactly one frame. For audio, it contains an integer number of
866 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
867 * data). If the audio frames have a variable size (e.g. MPEG audio),
868 * then it contains one frame.
869 *
870 * pkt->pts, pkt->dts and pkt->duration are always set to correct
871 * values in AV_TIME_BASE unit (and guessed if the format cannot
872 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
873 * has B frames, so it is better to rely on pkt->dts if you do not
874 * decompress the payload.
875 *
876 * @return 0 if OK, < 0 if error or end of file.
877 */
878 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
879 {
880 AVPacketList *pktl;
881 int eof=0;
882 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
883
884 for(;;){
885 pktl = s->packet_buffer;
886 if (pktl) {
887 AVPacket *next_pkt= &pktl->pkt;
888
889 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
890 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
891 if( pktl->pkt.stream_index == next_pkt->stream_index
892 && next_pkt->dts < pktl->pkt.dts
893 && pktl->pkt.pts != pktl->pkt.dts //not b frame
894 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
895 next_pkt->pts= pktl->pkt.dts;
896 }
897 pktl= pktl->next;
898 }
899 pktl = s->packet_buffer;
900 }
901
902 if( next_pkt->pts != AV_NOPTS_VALUE
903 || next_pkt->dts == AV_NOPTS_VALUE
904 || !genpts || eof){
905 /* read packet from packet buffer, if there is data */
906 *pkt = *next_pkt;
907 s->packet_buffer = pktl->next;
908 av_free(pktl);
909 return 0;
910 }
911 }
912 if(genpts){
913 AVPacketList **plast_pktl= &s->packet_buffer;
914 int ret= av_read_frame_internal(s, pkt);
915 if(ret<0){
916 if(pktl && ret != -EAGAIN){
917 eof=1;
918 continue;
919 }else
920 return ret;
921 }
922
923 /* duplicate the packet */
924 if (av_dup_packet(pkt) < 0)
925 return AVERROR_NOMEM;
926
927 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
928
929 pktl = av_mallocz(sizeof(AVPacketList));
930 if (!pktl)
931 return AVERROR_NOMEM;
932
933 /* add the packet in the buffered packet list */
934 *plast_pktl = pktl;
935 pktl->pkt= *pkt;
936 }else{
937 assert(!s->packet_buffer);
938 return av_read_frame_internal(s, pkt);
939 }
940 }
941 }
942
943 /* XXX: suppress the packet queue */
944 static void flush_packet_queue(AVFormatContext *s)
945 {
946 AVPacketList *pktl;
947
948 for(;;) {
949 pktl = s->packet_buffer;
950 if (!pktl)
951 break;
952 s->packet_buffer = pktl->next;
953 av_free_packet(&pktl->pkt);
954 av_free(pktl);
955 }
956 }
957
958 /*******************************************************/
959 /* seek support */
960
961 int av_find_default_stream_index(AVFormatContext *s)
962 {
963 int i;
964 AVStream *st;
965
966 if (s->nb_streams <= 0)
967 return -1;
968 for(i = 0; i < s->nb_streams; i++) {
969 st = s->streams[i];
970 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
971 return i;
972 }
973 }
974 return 0;
975 }
976
977 /**
978 * Flush the frame reader.
979 */
980 static void av_read_frame_flush(AVFormatContext *s)
981 {
982 AVStream *st;
983 int i;
984
985 flush_packet_queue(s);
986
987 /* free previous packet */
988 if (s->cur_st) {
989 if (s->cur_st->parser)
990 av_free_packet(&s->cur_pkt);
991 s->cur_st = NULL;
992 }
993 /* fail safe */
994 s->cur_ptr = NULL;
995 s->cur_len = 0;
996
997 /* for each stream, reset read state */
998 for(i = 0; i < s->nb_streams; i++) {
999 st = s->streams[i];
1000
1001 if (st->parser) {
1002 av_parser_close(st->parser);
1003 st->parser = NULL;
1004 }
1005 st->last_IP_pts = AV_NOPTS_VALUE;
1006 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
1007 }
1008 }
1009
1010 /**
1011 * Updates cur_dts of all streams based on given timestamp and AVStream.
1012 *
1013 * Stream ref_st unchanged, others set cur_dts in their native timebase
1014 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1015 * @param timestamp new dts expressed in time_base of param ref_st
1016 * @param ref_st reference stream giving time_base of param timestamp
1017 */
1018 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1019 int i;
1020
1021 for(i = 0; i < s->nb_streams; i++) {
1022 AVStream *st = s->streams[i];
1023
1024 st->cur_dts = av_rescale(timestamp,
1025 st->time_base.den * (int64_t)ref_st->time_base.num,
1026 st->time_base.num * (int64_t)ref_st->time_base.den);
1027 }
1028 }
1029
1030 /**
1031 * Add a index entry into a sorted list updateing if it is already there.
1032 *
1033 * @param timestamp timestamp in the timebase of the given stream
1034 */
1035 int av_add_index_entry(AVStream *st,
1036 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1037 {
1038 AVIndexEntry *entries, *ie;
1039 int index;
1040
1041 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1042 return -1;
1043
1044 entries = av_fast_realloc(st->index_entries,
1045 &st->index_entries_allocated_size,
1046 (st->nb_index_entries + 1) *
1047 sizeof(AVIndexEntry));
1048 if(!entries)
1049 return -1;
1050
1051 st->index_entries= entries;
1052
1053 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1054
1055 if(index<0){
1056 index= st->nb_index_entries++;
1057 ie= &entries[index];
1058 assert(index==0 || ie[-1].timestamp < timestamp);
1059 }else{
1060 ie= &entries[index];
1061 if(ie->timestamp != timestamp){
1062 if(ie->timestamp <= timestamp)
1063 return -1;
1064 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1065 st->nb_index_entries++;
1066 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1067 distance= ie->min_distance;
1068 }
1069
1070 ie->pos = pos;
1071 ie->timestamp = timestamp;
1072 ie->min_distance= distance;
1073 ie->size= size;
1074 ie->flags = flags;
1075
1076 return index;
1077 }
1078
1079 /**
1080 * build an index for raw streams using a parser.
1081 */
1082 static void av_build_index_raw(AVFormatContext *s)
1083 {
1084 AVPacket pkt1, *pkt = &pkt1;
1085 int ret;
1086 AVStream *st;
1087
1088 st = s->streams[0];
1089 av_read_frame_flush(s);
1090 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1091
1092 for(;;) {
1093 ret = av_read_frame(s, pkt);
1094 if (ret < 0)
1095 break;
1096 if (pkt->stream_index == 0 && st->parser &&
1097 (pkt->flags & PKT_FLAG_KEY)) {
1098 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1099 0, 0, AVINDEX_KEYFRAME);
1100 }
1101 av_free_packet(pkt);
1102 }
1103 }
1104
1105 /**
1106 * Returns TRUE if we deal with a raw stream.
1107 *
1108 * Raw codec data and parsing needed.
1109 */
1110 static int is_raw_stream(AVFormatContext *s)
1111 {
1112 AVStream *st;
1113
1114 if (s->nb_streams != 1)
1115 return 0;
1116 st = s->streams[0];
1117 if (!st->need_parsing)
1118 return 0;
1119 return 1;
1120 }
1121
1122 /**
1123 * Gets the index for a specific timestamp.
1124 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1125 * the timestamp which is <= the requested one, if backward is 0
1126 * then it will be >=
1127 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1128 * @return < 0 if no such timestamp could be found
1129 */
1130 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1131 int flags)
1132 {
1133 AVIndexEntry *entries= st->index_entries;
1134 int nb_entries= st->nb_index_entries;
1135 int a, b, m;
1136 int64_t timestamp;
1137
1138 a = - 1;
1139 b = nb_entries;
1140
1141 while (b - a > 1) {
1142 m = (a + b) >> 1;
1143 timestamp = entries[m].timestamp;
1144 if(timestamp >= wanted_timestamp)
1145 b = m;
1146 if(timestamp <= wanted_timestamp)
1147 a = m;
1148 }
1149 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1150
1151 if(!(flags & AVSEEK_FLAG_ANY)){
1152 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1153 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1154 }
1155 }
1156
1157 if(m == nb_entries)
1158 return -1;
1159 return m;
1160 }
1161
1162 #define DEBUG_SEEK
1163
1164 /**
1165 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1166 * this isnt supposed to be called directly by a user application, but by demuxers
1167 * @param target_ts target timestamp in the time base of the given stream
1168 * @param stream_index stream number
1169 */
1170 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1171 AVInputFormat *avif= s->iformat;
1172 int64_t pos_min, pos_max, pos, pos_limit;
1173 int64_t ts_min, ts_max, ts;
1174 int64_t start_pos, filesize;
1175 int index, no_change;
1176 AVStream *st;
1177
1178 if (stream_index < 0)
1179 return -1;
1180
1181 #ifdef DEBUG_SEEK
1182 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1183 #endif
1184
1185 ts_max=
1186 ts_min= AV_NOPTS_VALUE;
1187 pos_limit= -1; //gcc falsely says it may be uninitalized
1188
1189 st= s->streams[stream_index];
1190 if(st->index_entries){
1191 AVIndexEntry *e;
1192
1193 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1194 index= FFMAX(index, 0);
1195 e= &st->index_entries[index];
1196
1197 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1198 pos_min= e->pos;
1199 ts_min= e->timestamp;
1200 #ifdef DEBUG_SEEK
1201 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1202 pos_min,ts_min);
1203 #endif
1204 }else{
1205 assert(index==0);
1206 }
1207
1208 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1209 assert(index < st->nb_index_entries);
1210 if(index >= 0){
1211 e= &st->index_entries[index];
1212 assert(e->timestamp >= target_ts);
1213 pos_max= e->pos;
1214 ts_max= e->timestamp;
1215 pos_limit= pos_max - e->min_distance;
1216 #ifdef DEBUG_SEEK
1217 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1218 pos_max,pos_limit, ts_max);
1219 #endif
1220 }
1221 }
1222
1223 if(ts_min == AV_NOPTS_VALUE){
1224 pos_min = s->data_offset;
1225 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1226 if (ts_min == AV_NOPTS_VALUE)
1227 return -1;
1228 }
1229
1230 if(ts_max == AV_NOPTS_VALUE){
1231 int step= 1024;
1232 filesize = url_fsize(&s->pb);
1233 pos_max = filesize - 1;
1234 do{
1235 pos_max -= step;
1236 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1237 step += step;
1238 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1239 if (ts_max == AV_NOPTS_VALUE)
1240 return -1;
1241
1242 for(;;){
1243 int64_t tmp_pos= pos_max + 1;
1244 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1245 if(tmp_ts == AV_NOPTS_VALUE)
1246 break;
1247 ts_max= tmp_ts;
1248 pos_max= tmp_pos;
1249 if(tmp_pos >= filesize)
1250 break;
1251 }
1252 pos_limit= pos_max;
1253 }
1254
1255 if(ts_min > ts_max){
1256 return -1;
1257 }else if(ts_min == ts_max){
1258 pos_limit= pos_min;
1259 }
1260
1261 no_change=0;
1262 while (pos_min < pos_limit) {
1263 #ifdef DEBUG_SEEK
1264 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1265 pos_min, pos_max,
1266 ts_min, ts_max);
1267 #endif
1268 assert(pos_limit <= pos_max);
1269
1270 if(no_change==0){
1271 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1272 // interpolate position (better than dichotomy)
1273 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1274 + pos_min - approximate_keyframe_distance;
1275 }else if(no_change==1){
1276 // bisection, if interpolation failed to change min or max pos last time
1277 pos = (pos_min + pos_limit)>>1;
1278 }else{
1279 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1280 pos=pos_min;
1281 }
1282 if(pos <= pos_min)
1283 pos= pos_min + 1;
1284 else if(pos > pos_limit)
1285 pos= pos_limit;
1286 start_pos= pos;
1287
1288 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1289 if(pos == pos_max)
1290 no_change++;
1291 else
1292 no_change=0;
1293 #ifdef DEBUG_SEEK
1294 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1295 #endif
1296 assert(ts != AV_NOPTS_VALUE);
1297 if (target_ts <= ts) {
1298 pos_limit = start_pos - 1;
1299 pos_max = pos;
1300 ts_max = ts;
1301 }
1302 if (target_ts >= ts) {
1303 pos_min = pos;
1304 ts_min = ts;
1305 }
1306 }
1307
1308 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1309 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1310 #ifdef DEBUG_SEEK
1311 pos_min = pos;
1312 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1313 pos_min++;
1314 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1315 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1316 pos, ts_min, target_ts, ts_max);
1317 #endif
1318 /* do the seek */
1319 url_fseek(&s->pb, pos, SEEK_SET);
1320
1321 av_update_cur_dts(s, st, ts);
1322
1323 return 0;
1324 }
1325
1326 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1327 int64_t pos_min, pos_max;
1328 #if 0
1329 AVStream *st;
1330
1331 if (stream_index < 0)
1332 return -1;
1333
1334 st= s->streams[stream_index];
1335 #endif
1336
1337 pos_min = s->data_offset;
1338 pos_max = url_fsize(&s->pb) - 1;
1339
1340 if (pos < pos_min) pos= pos_min;
1341 else if(pos > pos_max) pos= pos_max;
1342
1343 url_fseek(&s->pb, pos, SEEK_SET);
1344
1345 #if 0
1346 av_update_cur_dts(s, st, ts);
1347 #endif
1348 return 0;
1349 }
1350
1351 static int av_seek_frame_generic(AVFormatContext *s,
1352 int stream_index, int64_t timestamp, int flags)
1353 {
1354 int index;
1355 AVStream *st;
1356 AVIndexEntry *ie;
1357
1358 if (!s->index_built) {
1359 if (is_raw_stream(s)) {
1360 av_build_index_raw(s);
1361 } else {
1362 return -1;
1363 }
1364 s->index_built = 1;
1365 }
1366
1367 st = s->streams[stream_index];
1368 index = av_index_search_timestamp(st, timestamp, flags);
1369 if (index < 0)
1370 return -1;
1371
1372 /* now we have found the index, we can seek */
1373 ie = &st->index_entries[index];
1374 av_read_frame_flush(s);
1375 url_fseek(&s->pb, ie->pos, SEEK_SET);
1376
1377 av_update_cur_dts(s, st, ie->timestamp);
1378
1379 return 0;
1380 }
1381
1382 /**
1383 * Seek to the key frame at timestamp.
1384 * 'timestamp' in 'stream_index'.
1385 * @param stream_index If stream_index is (-1), a default
1386 * stream is selected, and timestamp is automatically converted
1387 * from AV_TIME_BASE units to the stream specific time_base.
1388 * @param timestamp timestamp in AVStream.time_base units
1389 * or if there is no stream specified then in AV_TIME_BASE units
1390 * @param flags flags which select direction and seeking mode
1391 * @return >= 0 on success
1392 */
1393 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1394 {
1395 int ret;
1396 AVStream *st;
1397
1398 av_read_frame_flush(s);
1399
1400 if(flags & AVSEEK_FLAG_BYTE)
1401 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1402
1403 if(stream_index < 0){
1404 stream_index= av_find_default_stream_index(s);
1405 if(stream_index < 0)
1406 return -1;
1407
1408 st= s->streams[stream_index];
1409 /* timestamp for default must be expressed in AV_TIME_BASE units */
1410 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1411 }
1412 st= s->streams[stream_index];
1413
1414 /* first, we try the format specific seek */
1415 if (s->iformat->read_seek)
1416 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1417 else
1418 ret = -1;
1419 if (ret >= 0) {
1420 return 0;
1421 }
1422
1423 if(s->iformat->read_timestamp)
1424 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1425 else
1426 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1427 }
1428
1429 /*******************************************************/
1430
1431 /**
1432 * Returns TRUE if the stream has accurate timings in any stream.
1433 *
1434 * @return TRUE if the stream has accurate timings for at least one component.
1435 */
1436 static int av_has_timings(AVFormatContext *ic)
1437 {
1438 int i;
1439 AVStream *st;
1440
1441 for(i = 0;i < ic->nb_streams; i++) {
1442 st = ic->streams[i];
1443 if (st->start_time != AV_NOPTS_VALUE &&
1444 st->duration != AV_NOPTS_VALUE)
1445 return 1;
1446 }
1447 return 0;
1448 }
1449
1450 /**
1451 * Estimate the stream timings from the one of each components.
1452 *
1453 * Also computes the global bitrate if possible.
1454 */
1455 static void av_update_stream_timings(AVFormatContext *ic)
1456 {
1457 int64_t start_time, start_time1, end_time, end_time1;
1458 int i;
1459 AVStream *st;
1460
1461 start_time = MAXINT64;
1462 end_time = MININT64;
1463 for(i = 0;i < ic->nb_streams; i++) {
1464 st = ic->streams[i];
1465 if (st->start_time != AV_NOPTS_VALUE) {
1466 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1467 if (start_time1 < start_time)
1468 start_time = start_time1;
1469 if (st->duration != AV_NOPTS_VALUE) {
1470 end_time1 = start_time1
1471 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1472 if (end_time1 > end_time)
1473 end_time = end_time1;
1474 }
1475 }
1476 }
1477 if (start_time != MAXINT64) {
1478 ic->start_time = start_time;
1479 if (end_time != MININT64) {
1480 ic->duration = end_time - start_time;
1481 if (ic->file_size > 0) {
1482 /* compute the bit rate */
1483 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1484 (double)ic->duration;
1485 }
1486 }
1487 }
1488
1489 }
1490
1491 static void fill_all_stream_timings(AVFormatContext *ic)
1492 {
1493 int i;
1494 AVStream *st;
1495
1496 av_update_stream_timings(ic);
1497 for(i = 0;i < ic->nb_streams; i++) {
1498 st = ic->streams[i];
1499 if (st->start_time == AV_NOPTS_VALUE) {
1500 if(ic->start_time != AV_NOPTS_VALUE)
1501 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1502 if(ic->duration != AV_NOPTS_VALUE)
1503 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1504 }
1505 }
1506 }
1507
1508 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1509 {
1510 int64_t filesize, duration;
1511 int bit_rate, i;
1512 AVStream *st;
1513
1514 /* if bit_rate is already set, we believe it */
1515 if (ic->bit_rate == 0) {
1516 bit_rate = 0;
1517 for(i=0;i<ic->nb_streams;i++) {
1518 st = ic->streams[i];
1519 bit_rate += st->codec->bit_rate;
1520 }
1521 ic->bit_rate = bit_rate;
1522 }
1523
1524 /* if duration is already set, we believe it */
1525 if (ic->duration == AV_NOPTS_VALUE &&
1526 ic->bit_rate != 0 &&
1527 ic->file_size != 0) {
1528 filesize = ic->file_size;
1529 if (filesize > 0) {
1530 for(i = 0; i < ic->nb_streams; i++) {
1531 st = ic->streams[i];
1532 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1533 if (st->start_time == AV_NOPTS_VALUE ||
1534 st->duration == AV_NOPTS_VALUE) {
1535 st->start_time = 0;
1536 st->duration = duration;
1537 }
1538 }
1539 }
1540 }
1541 }
1542
1543 #define DURATION_MAX_READ_SIZE 250000
1544
1545 /* only usable for MPEG-PS streams */
1546 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1547 {
1548 AVPacket pkt1, *pkt = &pkt1;
1549 AVStream *st;
1550 int read_size, i, ret;
1551 int64_t end_time;
1552 int64_t filesize, offset, duration;
1553
1554 /* free previous packet */
1555 if (ic->cur_st && ic->cur_st->parser)
1556 av_free_packet(&ic->cur_pkt);
1557 ic->cur_st = NULL;
1558
1559 /* flush packet queue */
1560 flush_packet_queue(ic);
1561
1562 for(i=0;i<ic->nb_streams;i++) {
1563 st = ic->streams[i];
1564 if (st->parser) {
1565 av_parser_close(st->parser);
1566 st->parser= NULL;
1567 }
1568 }
1569
1570 /* we read the first packets to get the first PTS (not fully
1571 accurate, but it is enough now) */
1572 url_fseek(&ic->pb, 0, SEEK_SET);
1573 read_size = 0;
1574 for(;;) {
1575 if (read_size >= DURATION_MAX_READ_SIZE)
1576 break;
1577 /* if all info is available, we can stop */
1578 for(i = 0;i < ic->nb_streams; i++) {
1579 st = ic->streams[i];
1580 if (st->start_time == AV_NOPTS_VALUE)
1581 break;
1582 }
1583 if (i == ic->nb_streams)
1584 break;
1585
1586 ret = av_read_packet(ic, pkt);
1587 if (ret != 0)
1588 break;
1589 read_size += pkt->size;
1590 st = ic->streams[pkt->stream_index];
1591 if (pkt->pts != AV_NOPTS_VALUE) {
1592 if (st->start_time == AV_NOPTS_VALUE)
1593 st->start_time = pkt->pts;
1594 }
1595 av_free_packet(pkt);
1596 }
1597
1598 /* estimate the end time (duration) */
1599 /* XXX: may need to support wrapping */
1600 filesize = ic->file_size;
1601 offset = filesize - DURATION_MAX_READ_SIZE;
1602 if (offset < 0)
1603 offset = 0;
1604
1605 url_fseek(&ic->pb, offset, SEEK_SET);
1606 read_size = 0;
1607 for(;;) {
1608 if (read_size >= DURATION_MAX_READ_SIZE)
1609 break;
1610 /* if all info is available, we can stop */
1611 for(i = 0;i < ic->nb_streams; i++) {
1612 st = ic->streams[i];
1613 if (st->duration == AV_NOPTS_VALUE)
1614 break;
1615 }
1616 if (i == ic->nb_streams)
1617 break;
1618
1619 ret = av_read_packet(ic, pkt);
1620 if (ret != 0)
1621 break;
1622 read_size += pkt->size;
1623 st = ic->streams[pkt->stream_index];
1624 if (pkt->pts != AV_NOPTS_VALUE) {
1625 end_time = pkt->pts;
1626 duration = end_time - st->start_time;
1627 if (duration > 0) {
1628 if (st->duration == AV_NOPTS_VALUE ||
1629 st->duration < duration)
1630 st->duration = duration;
1631 }
1632 }
1633 av_free_packet(pkt);
1634 }
1635
1636 fill_all_stream_timings(ic);
1637
1638 url_fseek(&ic->pb, 0, SEEK_SET);
1639 }
1640
1641 static void av_estimate_timings(AVFormatContext *ic)
1642 {
1643 int64_t file_size;
1644
1645 /* get the file size, if possible */
1646 if (ic->iformat->flags & AVFMT_NOFILE) {
1647 file_size = 0;
1648 } else {
1649 file_size = url_fsize(&ic->pb);
1650 if (file_size < 0)
1651 file_size = 0;
1652 }
1653 ic->file_size = file_size;
1654
1655 if ((!strcmp(ic->iformat->name, "mpeg") ||
1656 !strcmp(ic->iformat->name, "mpegts")) &&
1657 file_size && !ic->pb.is_streamed) {
1658 /* get accurate estimate from the PTSes */
1659 av_estimate_timings_from_pts(ic);
1660 } else if (av_has_timings(ic)) {
1661 /* at least one components has timings - we use them for all
1662 the components */
1663 fill_all_stream_timings(ic);
1664 } else {
1665 /* less precise: use bit rate info */
1666 av_estimate_timings_from_bit_rate(ic);
1667 }
1668 av_update_stream_timings(ic);
1669
1670 #if 0
1671 {
1672 int i;
1673 AVStream *st;
1674 for(i = 0;i < ic->nb_streams; i++) {
1675 st = ic->streams[i];
1676 printf("%d: start_time: %0.3f duration: %0.3f\n",
1677 i, (double)st->start_time / AV_TIME_BASE,
1678 (double)st->duration / AV_TIME_BASE);
1679 }
1680 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1681 (double)ic->start_time / AV_TIME_BASE,
1682 (double)ic->duration / AV_TIME_BASE,
1683 ic->bit_rate / 1000);
1684 }
1685 #endif
1686 }
1687
1688 static int has_codec_parameters(AVCodecContext *enc)
1689 {
1690 int val;
1691 switch(enc->codec_type) {
1692 case CODEC_TYPE_AUDIO:
1693 val = enc->sample_rate;
1694 break;
1695 case CODEC_TYPE_VIDEO:
1696 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1697 break;
1698 default:
1699 val = 1;
1700 break;
1701 }
1702 return (val != 0);
1703 }
1704
1705 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1706 {
1707 int16_t *samples;
1708 AVCodec *codec;
1709 int got_picture, ret=0;
1710 AVFrame picture;
1711
1712 if(!st->codec->codec){
1713 codec = avcodec_find_decoder(st->codec->codec_id);
1714 if (!codec)
1715 return -1;
1716 ret = avcodec_open(st->codec, codec);
1717 if (ret < 0)
1718 return ret;
1719 }
1720
1721 if(!has_codec_parameters(st->codec)){
1722 switch(st->codec->codec_type) {
1723 case CODEC_TYPE_VIDEO:
1724 ret = avcodec_decode_video(st->codec, &picture,
1725 &got_picture, (uint8_t *)data, size);
1726 break;
1727 case CODEC_TYPE_AUDIO:
1728 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1729 if (!samples)
1730 goto fail;
1731 ret = avcodec_decode_audio(st->codec, samples,
1732 &got_picture, (uint8_t *)data, size);
1733 av_free(samples);
1734 break;
1735 default:
1736 break;
1737 }
1738 }
1739 fail:
1740 return ret;
1741 }
1742
1743 /* absolute maximum size we read until we abort */
1744 #define MAX_READ_SIZE 5000000
1745
1746 /* maximum duration until we stop analysing the stream */
1747 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 3.0))
1748
1749 /**
1750 * Read the beginning of a media file to get stream information. This
1751 * is useful for file formats with no headers such as MPEG. This
1752 * function also compute the real frame rate in case of mpeg2 repeat
1753 * frame mode.
1754 *
1755 * @param ic media file handle
1756 * @return >=0 if OK. AVERROR_xxx if error.
1757 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1758 */
1759 int av_find_stream_info(AVFormatContext *ic)
1760 {
1761 int i, count, ret, read_size, j;
1762 AVStream *st;
1763 AVPacket pkt1, *pkt;
1764 AVPacketList *pktl=NULL, **ppktl;
1765 int64_t last_dts[MAX_STREAMS];
1766 int64_t duration_sum[MAX_STREAMS];
1767 int duration_count[MAX_STREAMS]={0};
1768
1769 for(i=0;i<ic->nb_streams;i++) {
1770 st = ic->streams[i];
1771 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1772 /* if(!st->time_base.num)
1773 st->time_base= */
1774 if(!st->codec->time_base.num)
1775 st->codec->time_base= st->time_base;
1776 }
1777 //only for the split stuff
1778 if (!st->parser) {
1779 st->parser = av_parser_init(st->codec->codec_id);
1780 if(st->need_parsing == 2 && st->parser){
1781 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1782 }
1783 }
1784 }
1785
1786 for(i=0;i<MAX_STREAMS;i++){
1787 last_dts[i]= AV_NOPTS_VALUE;
1788 duration_sum[i]= INT64_MAX;
1789 }
1790
1791 count = 0;
1792 read_size = 0;
1793 ppktl = &ic->packet_buffer;
1794 for(;;) {
1795 /* check if one codec still needs to be handled */
1796 for(i=0;i<ic->nb_streams;i++) {
1797 st = ic->streams[i];
1798 if (!has_codec_parameters(st->codec))
1799 break;
1800 /* variable fps and no guess at the real fps */
1801 if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
1802 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1803 break;
1804 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1805 break;
1806 }
1807 if (i == ic->nb_streams) {
1808 /* NOTE: if the format has no header, then we need to read
1809 some packets to get most of the streams, so we cannot
1810 stop here */
1811 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1812 /* if we found the info for all the codecs, we can stop */
1813 ret = count;
1814 break;
1815 }
1816 }
1817 /* we did not get all the codec info, but we read too much data */
1818 if (read_size >= MAX_READ_SIZE) {
1819 ret = count;
1820 break;
1821 }
1822
1823 /* NOTE: a new stream can be added there if no header in file
1824 (AVFMTCTX_NOHEADER) */
1825 ret = av_read_frame_internal(ic, &pkt1);
1826 if (ret < 0) {
1827 /* EOF or error */
1828 ret = -1; /* we could not have all the codec parameters before EOF */
1829 for(i=0;i<ic->nb_streams;i++) {
1830 st = ic->streams[i];
1831 if (!has_codec_parameters(st->codec)){
1832 char buf[256];
1833 avcodec_string(buf, sizeof(buf), st->codec, 0);
1834 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1835 } else {
1836 ret = 0;
1837 }
1838 }
1839 break;
1840 }
1841
1842 pktl = av_mallocz(sizeof(AVPacketList));
1843 if (!pktl) {
1844 ret = AVERROR_NOMEM;
1845 break;
1846 }
1847
1848 /* add the packet in the buffered packet list */
1849 *ppktl = pktl;
1850 ppktl = &pktl->next;
1851
1852 pkt = &pktl->pkt;
1853 *pkt = pkt1;
1854
1855 /* duplicate the packet */
1856 if (av_dup_packet(pkt) < 0) {
1857 ret = AVERROR_NOMEM;
1858 break;
1859 }
1860
1861 read_size += pkt->size;
1862
1863 st = ic->streams[pkt->stream_index];
1864 st->codec_info_duration += pkt->duration;
1865 if (pkt->duration != 0)
1866 st->codec_info_nb_frames++;
1867
1868 {
1869 int index= pkt->stream_index;
1870 int64_t last= last_dts[index];
1871 int64_t duration= pkt->dts - last;
1872
1873 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1874 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1875 duration_sum[index]= duration;
1876 duration_count[index]=1;
1877 }else{
1878 int factor= av_rescale(2*duration, duration_count[index], duration_sum[index]);
1879 if(factor==3)
1880 duration_count[index] *= 2;
1881 factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1882 duration_sum[index] += duration;
1883 duration_count[index]+= factor;
1884 }
1885 if(st->codec_info_nb_frames == 0 && 0)
1886 st->codec_info_duration += duration;
1887 }
1888 last_dts[pkt->stream_index]= pkt->dts;
1889 }
1890 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1891 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1892 if(i){
1893 st->codec->extradata_size= i;
1894 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1895 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1896 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1897 }
1898 }
1899
1900 /* if still no information, we try to open the codec and to
1901 decompress the frame. We try to avoid that in most cases as
1902 it takes longer and uses more memory. For MPEG4, we need to
1903 decompress for Quicktime. */
1904 if (!has_codec_parameters(st->codec) /*&&
1905 (st->codec->codec_id == CODEC_ID_FLV1 ||
1906 st->codec->codec_id == CODEC_ID_H264 ||
1907 st->codec->codec_id == CODEC_ID_H263 ||
1908 st->codec->codec_id == CODEC_ID_H261 ||
1909 st->codec->codec_id == CODEC_ID_VORBIS ||
1910 st->codec->codec_id == CODEC_ID_MJPEG ||
1911 st->codec->codec_id == CODEC_ID_PNG ||
1912 st->codec->codec_id == CODEC_ID_PAM ||
1913 st->codec->codec_id == CODEC_ID_PGM ||
1914 st->codec->codec_id == CODEC_ID_PGMYUV ||
1915 st->codec->codec_id == CODEC_ID_PBM ||
1916 st->codec->codec_id == CODEC_ID_PPM ||
1917 st->codec->codec_id == CODEC_ID_SHORTEN ||
1918 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1919 try_decode_frame(st, pkt->data, pkt->size);
1920
1921 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
1922 break;
1923 }
1924 count++;
1925 }
1926
1927 // close codecs which where opened in try_decode_frame()
1928 for(i=0;i<ic->nb_streams;i++) {
1929 st = ic->streams[i];
1930 if(st->codec->codec)
1931 avcodec_close(st->codec);
1932 }
1933 for(i=0;i<ic->nb_streams;i++) {
1934 st = ic->streams[i];
1935 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1936 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1937 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1938
1939 if(duration_count[i]
1940 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
1941 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1942 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den){
1943 int64_t num, den, error, best_error;
1944
1945 num= st->time_base.den*duration_count[i];
1946 den= st->time_base.num*duration_sum[i];
1947
1948 best_error= INT64_MAX;
1949 for(j=1; j<60*12; j++){
1950 error= FFABS(1001*12*num - 1001*j*den);
1951 if(error < best_error){
1952 best_error= error;
1953 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j, 12, INT_MAX);
1954 }
1955 }
1956 for(j=0; j<3; j++){
1957 static const int ticks[]= {24,30,60};
1958 error= FFABS(1001*12*num - 1000*12*den * ticks[j]);
1959 if(error < best_error){
1960 best_error= error;
1961 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, ticks[j]*1000, 1001, INT_MAX);
1962 }
1963 }
1964 }
1965
1966 if (!st->r_frame_rate.num){
1967 if( st->codec->time_base.den * (int64_t)st->time_base.num
1968 <= st->codec->time_base.num * (int64_t)st->time_base.den){
1969 st->r_frame_rate.num = st->codec->time_base.den;
1970 st->r_frame_rate.den = st->codec->time_base.num;
1971 }else{
1972 st->r_frame_rate.num = st->time_base.den;
1973 st->r_frame_rate.den = st->time_base.num;
1974 }
1975 }
1976 }
1977 }
1978
1979 av_estimate_timings(ic);
1980 #if 0
1981 /* correct DTS for b frame streams with no timestamps */
1982 for(i=0;i<ic->nb_streams;i++) {
1983 st = ic->streams[i];
1984 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1985 if(b-frames){
1986 ppktl = &ic->packet_buffer;
1987 while(ppkt1){
1988 if(ppkt1->stream_index != i)
1989 continue;
1990 if(ppkt1->pkt->dts < 0)
1991 break;
1992 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1993 break;
1994 ppkt1->pkt->dts -= delta;
1995 ppkt1= ppkt1->next;
1996 }
1997 if(ppkt1)
1998 continue;
1999 st->cur_dts -= delta;
2000 }
2001 }
2002 }
2003 #endif
2004 return ret;
2005 }
2006
2007 /*******************************************************/
2008
2009 /**
2010 * start playing a network based stream (e.g. RTSP stream) at the
2011 * current position
2012 */
2013 int av_read_play(AVFormatContext *s)
2014 {
2015 if (!s->iformat->read_play)
2016 return AVERROR_NOTSUPP;
2017 return s->iformat->read_play(s);
2018 }
2019
2020 /**
2021 * Pause a network based stream (e.g. RTSP stream).
2022 *
2023 * Use av_read_play() to resume it.
2024 */
2025 int av_read_pause(AVFormatContext *s)
2026 {
2027 if (!s->iformat->read_pause)
2028 return AVERROR_NOTSUPP;
2029 return s->iformat->read_pause(s);
2030 }
2031
2032 /**
2033 * Close a media file (but not its codecs).
2034 *
2035 * @param s media file handle
2036 */
2037 void av_close_input_file(AVFormatContext *s)
2038 {
2039 int i, must_open_file;
2040 AVStream *st;
2041
2042 /* free previous packet */
2043 if (s->cur_st && s->cur_st->parser)
2044 av_free_packet(&s->cur_pkt);
2045
2046 if (s->iformat->read_close)
2047 s->iformat->read_close(s);
2048 for(i=0;i<s->nb_streams;i++) {
2049 /* free all data in a stream component */
2050 st = s->streams[i];
2051 if (st->parser) {
2052 av_parser_close(st->parser);
2053 }
2054 av_free(st->index_entries);
2055 av_free(st->codec->extradata);
2056 av_free(st->codec);
2057 av_free(st);
2058 }
2059 flush_packet_queue(s);
2060 must_open_file = 1;
2061 if (s->iformat->flags & AVFMT_NOFILE) {
2062 must_open_file = 0;
2063 }
2064 if (must_open_file) {
2065 url_fclose(&s->pb);
2066 }
2067 av_freep(&s->priv_data);
2068 av_free(s);
2069 }
2070
2071 /**
2072 * Add a new stream to a media file.
2073 *
2074 * Can only be called in the read_header() function. If the flag
2075 * AVFMTCTX_NOHEADER is in the format context, then new streams
2076 * can be added in read_packet too.
2077 *
2078 * @param s media file handle
2079 * @param id file format dependent stream id
2080 */
2081 AVStream *av_new_stream(AVFormatContext *s, int id)
2082 {
2083 AVStream *st;
2084 int i;
2085
2086 if (s->nb_streams >= MAX_STREAMS)
2087 return NULL;
2088
2089 st = av_mallocz(sizeof(AVStream));
2090 if (!st)
2091 return NULL;
2092
2093 st->codec= avcodec_alloc_context();
2094 if (s->iformat) {
2095 /* no default bitrate if decoding */
2096 st->codec->bit_rate = 0;
2097 }
2098 st->index = s->nb_streams;
2099 st->id = id;
2100 st->start_time = AV_NOPTS_VALUE;
2101 st->duration = AV_NOPTS_VALUE;
2102 st->cur_dts = AV_NOPTS_VALUE;
2103
2104 /* default pts settings is MPEG like */
2105 av_set_pts_info(st, 33, 1, 90000);
2106 st->last_IP_pts = AV_NOPTS_VALUE;
2107 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2108 st->pts_buffer[i]= AV_NOPTS_VALUE;
2109
2110 s->streams[s->nb_streams++] = st;
2111 return st;
2112 }
2113
2114 /************************************************************/
2115 /* output media file */
2116
2117 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2118 {
2119 int ret;
2120
2121 if (s->oformat->priv_data_size > 0) {
2122 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2123 if (!s->priv_data)
2124 return AVERROR_NOMEM;
2125 } else
2126 s->priv_data = NULL;
2127
2128 if (s->oformat->set_parameters) {
2129 ret = s->oformat->set_parameters(s, ap);
2130 if (ret < 0)
2131 return ret;
2132 }
2133 return 0;
2134 }
2135
2136 /**
2137 * allocate the stream private data and write the stream header to an
2138 * output media file
2139 *
2140 * @param s media file handle
2141 * @return 0 if OK. AVERROR_xxx if error.
2142 */
2143 int av_write_header(AVFormatContext *s)
2144 {
2145 int ret, i;
2146 AVStream *st;
2147
2148 // some sanity checks
2149 for(i=0;i<s->nb_streams;i++) {
2150 st = s->streams[i];
2151
2152 switch (st->codec->codec_type) {
2153 case CODEC_TYPE_AUDIO:
2154 if(st->codec->sample_rate<=0){
2155 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2156 return -1;
2157 }
2158 break;
2159 case CODEC_TYPE_VIDEO:
2160 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2161 av_log(s, AV_LOG_ERROR, "time base not set\n");
2162 return -1;
2163 }
2164 if(st->codec->width<=0 || st->codec->height<=0){
2165 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2166 return -1;
2167 }
2168 break;
2169 }
2170 }
2171
2172 if(s->oformat->write_header){
2173 ret = s->oformat->write_header(s);
2174 if (ret < 0)
2175 return ret;
2176 }
2177
2178 /* init PTS generation */
2179 for(i=0;i<s->nb_streams;i++) {
2180 int64_t den = AV_NOPTS_VALUE;
2181 st = s->streams[i];
2182
2183 switch (st->codec->codec_type) {
2184 case CODEC_TYPE_AUDIO:
2185 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2186 break;
2187 case CODEC_TYPE_VIDEO:
2188 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2189 break;
2190 default:
2191 break;
2192 }
2193 if (den != AV_NOPTS_VALUE) {
2194 if (den <= 0)
2195 return AVERROR_INVALIDDATA;
2196 av_frac_init(&st->pts, 0, 0, den);
2197 }
2198 }
2199 return 0;
2200 }
2201
2202 //FIXME merge with compute_pkt_fields
2203 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2204 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2205 int num, den, frame_size, i;
2206
2207 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2208
2209 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2210 return -1;*/
2211
2212 /* duration field */
2213 if (pkt->duration == 0) {
2214 compute_frame_duration(&num, &den, st, NULL, pkt);
2215 if (den && num) {
2216 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2217 }
2218 }
2219
2220 //XXX/FIXME this is a temporary hack until all encoders output pts
2221 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2222 pkt->dts=
2223 // pkt->pts= st->cur_dts;
2224 pkt->pts= st->pts.val;
2225 }
2226
2227 //calculate dts from pts
2228 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2229 st->pts_buffer[0]= pkt->pts;
2230 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2231 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2232 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2233 SWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2234
2235 pkt->dts= st->pts_buffer[0];
2236 }
2237
2238 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2239 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2240 return -1;
2241 }
2242 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2243 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2244 return -1;
2245 }
2246
2247 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2248 st->cur_dts= pkt->dts;
2249 st->pts.val= pkt->dts;
2250
2251 /* update pts */
2252 switch (st->codec->codec_type) {
2253 case CODEC_TYPE_AUDIO:
2254 frame_size = get_audio_frame_size(st->codec, pkt->size);
2255
2256 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2257 but it would be better if we had the real timestamps from the encoder */
2258 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2259 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2260 }
2261 break;
2262 case CODEC_TYPE_VIDEO:
2263 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2264 break;
2265 default:
2266 break;
2267 }
2268 return 0;
2269 }
2270
2271 static void truncate_ts(AVStream *st, AVPacket *pkt){
2272 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2273
2274 // if(pkt->dts < 0)
2275 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2276
2277 pkt->pts &= pts_mask;
2278 pkt->dts &= pts_mask;
2279 }
2280
2281 /**
2282 * Write a packet to an output media file.
2283 *
2284 * The packet shall contain one audio or video frame.
2285 *
2286 * @param s media file handle
2287 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2288 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2289 */
2290 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2291 {
2292 int ret;
2293
2294 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2295 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2296 return ret;
2297
2298 truncate_ts(s->streams[pkt->stream_index], pkt);
2299
2300 ret= s->oformat->write_packet(s, pkt);
2301 if(!ret)
2302 ret= url_ferror(&s->pb);
2303 return ret;
2304 }
2305
2306 /**
2307 * Interleave a packet per DTS in an output media file.
2308 *
2309 * Packets with pkt->destruct == av_destruct_packet will be freed inside this function,
2310 * so they cannot be used after it, note calling av_free_packet() on them is still safe.
2311 *
2312 * @param s media file handle
2313 * @param out the interleaved packet will be output here
2314 * @param in the input packet
2315 * @param flush 1 if no further packets are available as input and all
2316 * remaining packets should be output
2317 * @return 1 if a packet was output, 0 if no packet could be output,
2318 * < 0 if an error occured
2319 */
2320 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2321 AVPacketList *pktl, **next_point, *this_pktl;
2322 int stream_count=0;
2323 int streams[MAX_STREAMS];
2324
2325 if(pkt){
2326 AVStream *st= s->streams[ pkt->stream_index];
2327
2328 // assert(pkt->destruct != av_destruct_packet); //FIXME
2329
2330 this_pktl = av_mallocz(sizeof(AVPacketList));
2331 this_pktl->pkt= *pkt;
2332 if(pkt->destruct == av_destruct_packet)
2333 pkt->destruct= NULL; // non shared -> must keep original from being freed
2334 else
2335 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2336
2337 next_point = &s->packet_buffer;
2338 while(*next_point){
2339 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2340 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2341 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2342 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2343 break;
2344 next_point= &(*next_point)->next;
2345 }
2346 this_pktl->next= *next_point;
2347 *next_point= this_pktl;
2348 }
2349
2350 memset(streams, 0, sizeof(streams));
2351 pktl= s->packet_buffer;
2352 while(pktl){
2353 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2354 if(streams[ pktl->pkt.stream_index ] == 0)
2355 stream_count++;
2356 streams[ pktl->pkt.stream_index ]++;
2357 pktl= pktl->next;
2358 }
2359
2360 if(s->nb_streams == stream_count || (flush && stream_count)){
2361 pktl= s->packet_buffer;
2362 *out= pktl->pkt;
2363
2364 s->packet_buffer= pktl->next;
2365 av_freep(&pktl);
2366 return 1;
2367 }else{
2368 av_init_packet(out);
2369 return 0;
2370 }
2371 }
2372
2373 /**
2374 * Interleaves a AVPacket correctly so it can be muxed.
2375 * @param out the interleaved packet will be output here
2376 * @param in the input packet
2377 * @param flush 1 if no further packets are available as input and all
2378 * remaining packets should be output
2379 * @return 1 if a packet was output, 0 if no packet could be output,
2380 * < 0 if an error occured
2381 */
2382 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2383 if(s->oformat->interleave_packet)
2384 return s->oformat->interleave_packet(s, out, in, flush);
2385 else
2386 return av_interleave_packet_per_dts(s, out, in, flush);
2387 }
2388
2389 /**
2390 * Writes a packet to an output media file ensuring correct interleaving.
2391 *
2392 * The packet must contain one audio or video frame.
2393 * If the packets are already correctly interleaved the application should
2394 * call av_write_frame() instead as its slightly faster, its also important
2395 * to keep in mind that completly non interleaved input will need huge amounts
2396 * of memory to interleave with this, so its prefereable to interleave at the
2397 * demuxer level
2398 *
2399 * @param s media file handle
2400 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2401 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2402 */
2403 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2404 AVStream *st= s->streams[ pkt->stream_index];
2405
2406 //FIXME/XXX/HACK drop zero sized packets
2407 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2408 return 0;
2409
2410 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2411 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2412 return -1;
2413
2414 if(pkt->dts == AV_NOPTS_VALUE)
2415 return -1;
2416
2417 for(;;){
2418 AVPacket opkt;
2419 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2420 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2421 return ret;
2422
2423 truncate_ts(s->streams[opkt.stream_index], &opkt);
2424 ret= s->oformat->write_packet(s, &opkt);
2425
2426 av_free_packet(&opkt);
2427 pkt= NULL;
2428
2429 if(ret<0)
2430 return ret;
2431 if(url_ferror(&s->pb))
2432 return url_ferror(&s->pb);
2433 }
2434 }
2435
2436 /**
2437 * @brief Write the stream trailer to an output media file and
2438 * free the file private data.
2439 *
2440 * @param s media file handle
2441 * @return 0 if OK. AVERROR_xxx if error.
2442 */
2443 int av_write_trailer(AVFormatContext *s)
2444 {
2445 int ret, i;
2446
2447 for(;;){
2448 AVPacket pkt;
2449 ret= av_interleave_packet(s, &pkt, NULL, 1);
2450 if(ret<0) //FIXME cleanup needed for ret<0 ?
2451 goto fail;
2452 if(!ret)
2453 break;
2454
2455 truncate_ts(s->streams[pkt.stream_index], &pkt);
2456 ret= s->oformat->write_packet(s, &pkt);
2457
2458 av_free_packet(&pkt);
2459
2460 if(ret<0)
2461 goto fail;
2462 if(url_ferror(&s->pb))
2463 goto fail;
2464 }
2465
2466 if(s->oformat->write_trailer)
2467 ret = s->oformat->write_trailer(s);
2468 fail:
2469 if(ret == 0)
2470 ret=url_ferror(&s->pb);
2471 for(i=0;i<s->nb_streams;i++)
2472 av_freep(&s->streams[i]->priv_data);
2473 av_freep(&s->priv_data);
2474 return ret;
2475 }
2476
2477 /* "user interface" functions */
2478
2479 void dump_format(AVFormatContext *ic,
2480 int index,
2481 const char *url,
2482 int is_output)
2483 {
2484 int i, flags;
2485 char buf[256];
2486
2487 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2488 is_output ? "Output" : "Input",
2489 index,
2490 is_output ? ic->oformat->name : ic->iformat->name,
2491 is_output ? "to" : "from", url);
2492 if (!is_output) {
2493 av_log(NULL, AV_LOG_INFO, " Duration: ");
2494 if (ic->duration != AV_NOPTS_VALUE) {
2495 int hours, mins, secs, us;
2496 secs = ic->duration / AV_TIME_BASE;
2497 us = ic->duration % AV_TIME_BASE;
2498 mins = secs / 60;
2499 secs %= 60;
2500 hours = mins / 60;
2501 mins %= 60;
2502 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2503 (10 * us) / AV_TIME_BASE);
2504 } else {
2505 av_log(NULL, AV_LOG_INFO, "N/A");
2506 }
2507 if (ic->start_time != AV_NOPTS_VALUE) {
2508 int secs, us;
2509 av_log(NULL, AV_LOG_INFO, ", start: ");
2510 secs = ic->start_time / AV_TIME_BASE;
2511 us = ic->start_time % AV_TIME_BASE;
2512 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2513 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2514 }
2515 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2516 if (ic->bit_rate) {
2517 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2518 } else {
2519 av_log(NULL, AV_LOG_INFO, "N/A");
2520 }
2521 av_log(NULL, AV_LOG_INFO, "\n");
2522 }
2523 for(i=0;i<ic->nb_streams;i++) {
2524 AVStream *st = ic->streams[i];
2525 int g= ff_gcd(st->time_base.num, st->time_base.den);
2526 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2527 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2528 /* the pid is an important information, so we display it */
2529 /* XXX: add a generic system */
2530 if (is_output)
2531 flags = ic->oformat->flags;
2532 else
2533 flags = ic->iformat->flags;
2534 if (flags & AVFMT_SHOW_IDS) {
2535 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2536 }
2537 if (strlen(st->language) > 0) {
2538 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2539 }
2540 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2541 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2542 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2543 if(st->r_frame_rate.den && st->r_frame_rate.num)
2544 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2545 /* else if(st->time_base.den && st->time_base.num)
2546 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2547 else
2548 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2549 }
2550 av_log(NULL, AV_LOG_INFO, "\n");
2551 }
2552 }
2553
2554 typedef struct {
2555 const char *abv;
2556 int width, height;
2557 int frame_rate, frame_rate_base;
2558 } AbvEntry;
2559
2560 static AbvEntry frame_abvs[] = {
2561 { "ntsc", 720, 480, 30000, 1001 },
2562 { "pal", 720, 576, 25, 1 },
2563 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2564 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2565 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2566 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2567 { "film", 352, 240, 24, 1 },
2568 { "ntsc-film", 352, 240, 24000, 1001 },
2569 { "sqcif", 128, 96, 0, 0 },
2570 { "qcif", 176, 144, 0, 0 },
2571 { "cif", 352, 288, 0, 0 },
2572 { "4cif", 704, 576, 0, 0 },
2573 };
2574
2575 /**
2576 * parses width and height out of string str.
2577 */
2578 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2579 {
2580 int i;
2581 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2582 const char *p;
2583 int frame_width = 0, frame_height = 0;
2584
2585 for(i=0;i<n;i++) {
2586 if (!strcmp(frame_abvs[i].abv, str)) {
2587 frame_width = frame_abvs[i].width;
2588 frame_height = frame_abvs[i].height;
2589 break;
2590 }
2591 }
2592 if (i == n) {
2593 p = str;
2594 frame_width = strtol(p, (char **)&p, 10);
2595 if (*p)
2596 p++;
2597 frame_height = strtol(p, (char **)&p, 10);
2598 }
2599 if (frame_width <= 0 || frame_height <= 0)
2600 return -1;
2601 *width_ptr = frame_width;
2602 *height_ptr = frame_height;
2603 return 0;
2604 }
2605
2606 /**
2607 * Converts frame rate from string to a fraction.
2608 *
2609 * First we try to get an exact integer or fractional frame rate.
2610 * If this fails we convert the frame rate to a double and return
2611 * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
2612 */
2613 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2614 {
2615 int i;
2616 char* cp;
2617
2618 /* First, we check our abbreviation table */
2619 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2620 if (!strcmp(frame_abvs[i].abv, arg)) {
2621 *frame_rate = frame_abvs[i].frame_rate;
2622 *frame_rate_base = frame_abvs[i].frame_rate_base;
2623 return 0;
2624 }
2625
2626 /* Then, we try to parse it as fraction */
2627 cp = strchr(arg, '/');
2628 if (!cp)
2629 cp = strchr(arg, ':');
2630 if (cp) {
2631 char* cpp;
2632 *frame_rate = strtol(arg, &cpp, 10);
2633 if (cpp != arg || cpp == cp)
2634 *frame_rate_base = strtol(cp+1, &cpp, 10);
2635 else
2636 *frame_rate = 0;
2637 }
2638 else {
2639 /* Finally we give up and parse it as double */
2640 AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
2641 *frame_rate_base = time_base.den;
2642 *frame_rate = time_base.num;
2643 }
2644 if (!*frame_rate || !*frame_rate_base)
2645 return -1;
2646 else
2647 return 0;
2648 }
2649
2650 /**
2651 * Converts date string to number of seconds since Jan 1st, 1970.
2652 *
2653 * @code
2654 * Syntax:
2655 * - If not a duration:
2656 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2657 * Time is localtime unless Z is suffixed to the end. In this case GMT
2658 * Return the date in micro seconds since 1970
2659 *
2660 * - If a duration:
2661 * HH[:MM[:SS[.m...]]]
2662 * S+[.m...]
2663 * @endcode
2664 */
2665 #ifndef CONFIG_WINCE
2666 int64_t parse_date(const char *datestr, int duration)
2667 {
2668 const char *p;
2669 int64_t t;
2670 struct tm dt;
2671 int i;
2672 static const char *date_fmt[] = {
2673 "%Y-%m-%d",
2674 "%Y%m%d",
2675 };
2676 static const char *time_fmt[] = {
2677 "%H:%M:%S",
2678 "%H%M%S",
2679 };
2680 const char *q;
2681 int is_utc, len;
2682 char lastch;
2683 int negative = 0;
2684
2685 #undef time
2686 time_t now = time(0);
2687
2688 len = strlen(datestr);
2689 if (len > 0)
2690 lastch = datestr[len - 1];
2691 else
2692 lastch = '\0';
2693 is_utc = (lastch == 'z' || lastch == 'Z');
2694
2695 memset(&dt, 0, sizeof(dt));
2696
2697 p = datestr;
2698 q = NULL;
2699 if (!duration) {
2700 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2701 q = small_strptime(p, date_fmt[i], &dt);
2702 if (q) {
2703 break;
2704 }
2705 }
2706
2707 if (!q) {
2708 if (is_utc) {
2709 dt = *gmtime(&now);
2710 } else {
2711 dt = *localtime(&now);
2712 }
2713 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2714 } else {
2715 p = q;
2716 }
2717
2718 if (*p == 'T' || *p == 't' || *p == ' ')
2719 p++;
2720
2721 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2722 q = small_strptime(p, time_fmt[i], &dt);
2723 if (q) {
2724 break;
2725 }
2726 }
2727 } else {
2728 if (p[0] == '-') {
2729 negative = 1;
2730 ++p;
2731 }
2732 q = small_strptime(p, time_fmt[0], &dt);
2733 if (!q) {
2734 dt.tm_sec = strtol(p, (char **)&q, 10);
2735 dt.tm_min = 0;
2736 dt.tm_hour = 0;
2737 }
2738 }
2739
2740 /* Now we have all the fields that we can get */
2741 if (!q) {
2742 if (duration)
2743 return 0;
2744 else
2745 return now * int64_t_C(1000000);
2746 }
2747
2748 if (duration) {
2749 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2750 } else {
2751 dt.tm_isdst = -1; /* unknown */
2752 if (is_utc) {
2753 t = mktimegm(&dt);
2754 } else {
2755 t = mktime(&dt);
2756 }
2757 }
2758
2759 t *= 1000000;
2760
2761 if (*q == '.') {
2762 int val, n;
2763 q++;
2764 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2765 if (!isdigit(*q))
2766 break;
2767 val += n * (*q - '0');
2768 }
2769 t += val;
2770 }
2771 return negative ? -t : t;
2772 }
2773 #endif /* CONFIG_WINCE */
2774
2775 /**
2776 * Attempts to find a specific tag in a URL.
2777 *
2778 * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
2779 * Return 1 if found.
2780 */
2781 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2782 {
2783 const char *p;
2784 char tag[128], *q;
2785
2786 p = info;
2787 if (*p == '?')
2788 p++;
2789 for(;;) {
2790 q = tag;
2791 while (*p != '\0' && *p != '=' && *p != '&') {
2792 if ((q - tag) < sizeof(tag) - 1)
2793 *q++ = *p;
2794 p++;
2795 }
2796 *q = '\0';
2797 q = arg;
2798 if (*p == '=') {
2799 p++;
2800 while (*p != '&' && *p != '\0') {
2801 if ((q - arg) < arg_size - 1) {
2802 if (*p == '+')
2803 *q++ = ' ';
2804 else
2805 *q++ = *p;
2806 }
2807 p++;
2808 }
2809 *q = '\0';
2810 }
2811 if (!strcmp(tag, tag1))
2812 return 1;
2813 if (*p != '&')
2814 break;
2815 p++;
2816 }
2817 return 0;
2818 }
2819
2820 /**
2821 * Returns in 'buf' the path with '%d' replaced by number.
2822
2823 * Also handles the '%0nd' format where 'n' is the total number
2824 * of digits and '%%'.
2825 *
2826 * @param buf destination buffer
2827 * @param buf_size destination buffer size
2828 * @param path numbered sequence string
2829 * @number frame number
2830 * @return 0 if OK, -1 if format error.
2831 */
2832 int av_get_frame_filename(char *buf, int buf_size,
2833 const char *path, int number)
2834 {
2835 const char *p;
2836 char *q, buf1[20], c;
2837 int nd, len, percentd_found;
2838
2839 q = buf;
2840 p = path;
2841 percentd_found = 0;
2842 for(;;) {
2843 c = *p++;
2844 if (c == '\0')
2845 break;
2846 if (c == '%') {
2847 do {
2848 nd = 0;
2849 while (isdigit(*p)) {
2850 nd = nd * 10 + *p++ - '0';
2851 }
2852 c = *p++;
2853 } while (isdigit(c));
2854
2855 switch(c) {
2856 case '%':
2857 goto addchar;
2858 case 'd':
2859 if (percentd_found)
2860 goto fail;
2861 percentd_found = 1;
2862 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2863 len = strlen(buf1);
2864 if ((q - buf + len) > buf_size - 1)
2865 goto fail;
2866 memcpy(q, buf1, len);
2867 q += len;
2868 break;
2869 default:
2870 goto fail;
2871 }
2872 } else {
2873 addchar:
2874 if ((q - buf) < buf_size - 1)
2875 *q++ = c;
2876 }
2877 }
2878 if (!percentd_found)
2879 goto fail;
2880 *q = '\0';
2881 return 0;
2882 fail:
2883 *q = '\0';
2884 return -1;
2885 }
2886
2887 /**
2888 * Print nice hexa dump of a buffer
2889 * @param f stream for output
2890 * @param buf buffer
2891 * @param size buffer size
2892 */
2893 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2894 {
2895 int len, i, j, c;
2896
2897 for(i=0;i<size;i+=16) {
2898 len = size - i;
2899 if (len > 16)
2900 len = 16;
2901 fprintf(f, "%08x ", i);
2902 for(j=0;j<16;j++) {
2903 if (j < len)
2904 fprintf(f, " %02x", buf[i+j]);
2905 else
2906 fprintf(f, " ");
2907 }
2908 fprintf(f, " ");
2909 for(j=0;j<len;j++) {
2910 c = buf[i+j];
2911 if (c < ' ' || c > '~')
2912 c = '.';
2913 fprintf(f, "%c", c);
2914 }
2915 fprintf(f, "\n");
2916 }
2917 }
2918
2919 /**
2920 * Print on 'f' a nice dump of a packet
2921 * @param f stream for output
2922 * @param pkt packet to dump
2923 * @param dump_payload true if the payload must be displayed too
2924 */
2925 //FIXME needs to know the time_base
2926 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2927 {
2928 fprintf(f, "stream #%d:\n", pkt->stream_index);
2929 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2930 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2931 /* DTS is _always_ valid after av_read_frame() */
2932 fprintf(f, " dts=");
2933 if (pkt->dts == AV_NOPTS_VALUE)
2934 fprintf(f, "N/A");
2935 else
2936 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2937 /* PTS may be not known if B frames are present */
2938 fprintf(f, " pts=");
2939 if (pkt->pts == AV_NOPTS_VALUE)
2940 fprintf(f, "N/A");
2941 else
2942 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2943 fprintf(f, "\n");
2944 fprintf(f, " size=%d\n", pkt->size);
2945 if (dump_payload)
2946 av_hex_dump(f, pkt->data, pkt->size);
2947 }
2948
2949 void url_split(char *proto, int proto_size,
2950 char *authorization, int authorization_size,
2951 char *hostname, int hostname_size,
2952 int *port_ptr,
2953 char *path, int path_size,
2954 const char *url)
2955 {
2956 const char *p;
2957 char *q;
2958 int port;
2959
2960 port = -1;
2961
2962 p = url;
2963 q = proto;
2964 while (*p != ':' && *p != '\0') {
2965 if ((q - proto) < proto_size - 1)
2966 *q++ = *p;
2967 p++;
2968 }
2969 if (proto_size > 0)
2970 *q = '\0';
2971 if (authorization_size > 0)
2972 authorization[0] = '\0';
2973 if (*p == '\0') {
2974 if (proto_size > 0)
2975 proto[0] = '\0';
2976 if (hostname_size > 0)
2977 hostname[0] = '\0';
2978 p = url;
2979 } else {
2980 char *at,*slash; // PETR: position of '@' character and '/' character
2981
2982 p++;
2983 if (*p == '/')
2984 p++;
2985 if (*p == '/')
2986 p++;
2987 at = strchr(p,'@'); // PETR: get the position of '@'
2988 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2989 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2990
2991 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2992
2993 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2994 if (*p == '@') { // PETR: passed '@'
2995 if (authorization_size > 0)
2996 *q = '\0';
2997 q = hostname;
2998 at = NULL;
2999 } else if (!at) { // PETR: hostname
3000 if ((q - hostname) < hostname_size - 1)
3001 *q++ = *p;
3002 } else {
3003 if ((q - authorization) < authorization_size - 1)
3004 *q++ = *p;
3005 }
3006 p++;
3007 }
3008 if (hostname_size > 0)
3009 *q = '\0';
3010 if (*p == ':') {
3011 p++;
3012 port = strtoul(p, (char **)&p, 10);
3013 }
3014 }
3015 if (port_ptr)
3016 *port_ptr = port;
3017 pstrcpy(path, path_size, p);
3018 }
3019
3020 /**
3021 * Set the pts for a given stream.
3022 *
3023 * @param s stream
3024 * @param pts_wrap_bits number of bits effectively used by the pts
3025 * (used for wrap control, 33 is the value for MPEG)
3026 * @param pts_num numerator to convert to seconds (MPEG: 1)
3027 * @param pts_den denominator to convert to seconds (MPEG: 90000)
3028 */
3029 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3030 int pts_num, int pts_den)
3031 {
3032 s->pts_wrap_bits = pts_wrap_bits;
3033 s->time_base.num = pts_num;
3034 s->time_base.den = pts_den;
3035 }
3036
3037 /* fraction handling */
3038
3039 /**
3040 * f = val + (num / den) + 0.5.
3041 *
3042 * 'num' is normalized so that it is such as 0 <= num < den.
3043 *
3044 * @param f fractional number
3045 * @param val integer value
3046 * @param num must be >= 0
3047 * @param den must be >= 1
3048 */
3049 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3050 {
3051 num += (den >> 1);
3052 if (num >= den) {
3053 val += num / den;
3054 num = num % den;
3055 }
3056 f->val = val;
3057 f->num = num;
3058 f->den = den;
3059 }
3060
3061 /**
3062 * Set f to (val + 0.5).
3063 */
3064 static void av_frac_set(AVFrac *f, int64_t val)
3065 {
3066 f->val = val;
3067 f->num = f->den >> 1;
3068 }
3069
3070 /**
3071 * Fractionnal addition to f: f = f + (incr / f->den).
3072 *
3073 * @param f fractional number
3074 * @param incr increment, can be positive or negative
3075 */
3076 static void av_frac_add(AVFrac *f, int64_t incr)
3077 {
3078 int64_t num, den;
3079
3080 num = f->num + incr;
3081 den = f->den;
3082 if (num < 0) {
3083 f->val += num / den;
3084 num = num % den;
3085 if (num < 0) {
3086 num += den;
3087 f->val--;
3088 }
3089 } else if (num >= den) {
3090 f->val += num / den;
3091 num = num % den;
3092 }
3093 f->num = num;
3094 }
3095
3096 /**
3097 * register a new image format
3098 * @param img_fmt Image format descriptor
3099 */
3100 void av_register_image_format(AVImageFormat *img_fmt)
3101 {
3102 AVImageFormat **p;
3103
3104 p = &first_image_format;
3105 while (*p != NULL) p = &(*p)->next;
3106 *p = img_fmt;
3107 img_fmt->next = NULL;
3108 }
3109
3110 /**
3111 * Guesses image format based on data in the image.
3112 */
3113 AVImageFormat *av_probe_image_format(AVProbeData *pd)
3114 {
3115 AVImageFormat *fmt1, *fmt;
3116 int score, score_max;
3117
3118 fmt = NULL;
3119 score_max = 0;
3120 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3121 if (fmt1->img_probe) {
3122 score = fmt1->img_probe(pd);
3123 if (score > score_max) {
3124 score_max = score;
3125 fmt = fmt1;
3126 }
3127 }
3128 }
3129 return fmt;
3130 }
3131
3132 /**
3133 * Guesses image format based on file name extensions.
3134 */
3135 AVImageFormat *guess_image_format(const char *filename)
3136 {
3137 AVImageFormat *fmt1;
3138
3139 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3140 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
3141 return fmt1;
3142 }
3143 return NULL;
3144 }
3145
3146 /**
3147 * Read an image from a stream.
3148 * @param gb byte stream containing the image
3149 * @param fmt image format, NULL if probing is required
3150 */
3151 int av_read_image(ByteIOContext *pb, const char *filename,
3152 AVImageFormat *fmt,
3153 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
3154 {
3155 uint8_t buf[PROBE_BUF_MIN];
3156 AVProbeData probe_data, *pd = &probe_data;
3157 offset_t pos;
3158 int ret;
3159
3160 if (!fmt) {
3161 pd->filename = filename;
3162 pd->buf = buf;
3163 pos = url_ftell(pb);
3164 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_MIN);
3165 url_fseek(pb, pos, SEEK_SET);
3166 fmt = av_probe_image_format(pd);
3167 }
3168 if (!fmt)
3169 return AVERROR_NOFMT;
3170 ret = fmt->img_read(pb, alloc_cb, opaque);
3171 return ret;
3172 }
3173
3174 /**
3175 * Write an image to a stream.
3176 * @param pb byte stream for the image output
3177 * @param fmt image format
3178 * @param img image data and informations
3179 */
3180 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
3181 {
3182 return fmt->img_write(pb, img);
3183 }
3184