451706dcc67355f939c403323df2068e20a4e846
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include "avformat.h"
20 #include "allformats.h"
21
22 #undef NDEBUG
23 #include <assert.h>
24
25 /**
26 * @file libavformat/utils.c
27 * Various utility functions for using ffmpeg library.
28 */
29
30 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
31 static void av_frac_add(AVFrac *f, int64_t incr);
32 static void av_frac_set(AVFrac *f, int64_t val);
33
34 /** head of registered input format linked list. */
35 AVInputFormat *first_iformat = NULL;
36 /** head of registered output format linked list. */
37 AVOutputFormat *first_oformat = NULL;
38 /** head of registered image format linked list. */
39 AVImageFormat *first_image_format = NULL;
40
41 void av_register_input_format(AVInputFormat *format)
42 {
43 AVInputFormat **p;
44 p = &first_iformat;
45 while (*p != NULL) p = &(*p)->next;
46 *p = format;
47 format->next = NULL;
48 }
49
50 void av_register_output_format(AVOutputFormat *format)
51 {
52 AVOutputFormat **p;
53 p = &first_oformat;
54 while (*p != NULL) p = &(*p)->next;
55 *p = format;
56 format->next = NULL;
57 }
58
59 int match_ext(const char *filename, const char *extensions)
60 {
61 const char *ext, *p;
62 char ext1[32], *q;
63
64 if(!filename)
65 return 0;
66
67 ext = strrchr(filename, '.');
68 if (ext) {
69 ext++;
70 p = extensions;
71 for(;;) {
72 q = ext1;
73 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
74 *q++ = *p++;
75 *q = '\0';
76 if (!strcasecmp(ext1, ext))
77 return 1;
78 if (*p == '\0')
79 break;
80 p++;
81 }
82 }
83 return 0;
84 }
85
86 AVOutputFormat *guess_format(const char *short_name, const char *filename,
87 const char *mime_type)
88 {
89 AVOutputFormat *fmt, *fmt_found;
90 int score_max, score;
91
92 /* specific test for image sequences */
93 #ifdef CONFIG_IMAGE2_MUXER
94 if (!short_name && filename &&
95 filename_number_test(filename) >= 0 &&
96 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
97 return guess_format("image2", NULL, NULL);
98 }
99 #endif
100 if (!short_name && filename &&
101 filename_number_test(filename) >= 0 &&
102 guess_image_format(filename)) {
103 return guess_format("image", NULL, NULL);
104 }
105
106 /* find the proper file type */
107 fmt_found = NULL;
108 score_max = 0;
109 fmt = first_oformat;
110 while (fmt != NULL) {
111 score = 0;
112 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
113 score += 100;
114 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
115 score += 10;
116 if (filename && fmt->extensions &&
117 match_ext(filename, fmt->extensions)) {
118 score += 5;
119 }
120 if (score > score_max) {
121 score_max = score;
122 fmt_found = fmt;
123 }
124 fmt = fmt->next;
125 }
126 return fmt_found;
127 }
128
129 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
130 const char *mime_type)
131 {
132 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
133
134 if (fmt) {
135 AVOutputFormat *stream_fmt;
136 char stream_format_name[64];
137
138 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
139 stream_fmt = guess_format(stream_format_name, NULL, NULL);
140
141 if (stream_fmt)
142 fmt = stream_fmt;
143 }
144
145 return fmt;
146 }
147
148 /**
149 * Guesses the codec id based upon muxer and filename.
150 */
151 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
152 const char *filename, const char *mime_type, enum CodecType type){
153 if(type == CODEC_TYPE_VIDEO){
154 enum CodecID codec_id= CODEC_ID_NONE;
155
156 #ifdef CONFIG_IMAGE2_MUXER
157 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
158 codec_id= av_guess_image2_codec(filename);
159 }
160 #endif
161 if(codec_id == CODEC_ID_NONE)
162 codec_id= fmt->video_codec;
163 return codec_id;
164 }else if(type == CODEC_TYPE_AUDIO)
165 return fmt->audio_codec;
166 else
167 return CODEC_ID_NONE;
168 }
169
170 /**
171 * finds AVInputFormat based on input format's short name.
172 */
173 AVInputFormat *av_find_input_format(const char *short_name)
174 {
175 AVInputFormat *fmt;
176 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
177 if (!strcmp(fmt->name, short_name))
178 return fmt;
179 }
180 return NULL;
181 }
182
183 /* memory handling */
184
185 /**
186 * Default packet destructor.
187 */
188 void av_destruct_packet(AVPacket *pkt)
189 {
190 av_free(pkt->data);
191 pkt->data = NULL; pkt->size = 0;
192 }
193
194 /**
195 * Allocate the payload of a packet and intialized its fields to default values.
196 *
197 * @param pkt packet
198 * @param size wanted payload size
199 * @return 0 if OK. AVERROR_xxx otherwise.
200 */
201 int av_new_packet(AVPacket *pkt, int size)
202 {
203 void *data;
204 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
205 return AVERROR_NOMEM;
206 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
207 if (!data)
208 return AVERROR_NOMEM;
209 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
210
211 av_init_packet(pkt);
212 pkt->data = data;
213 pkt->size = size;
214 pkt->destruct = av_destruct_packet;
215 return 0;
216 }
217
218 /**
219 * Allocate and read the payload of a packet and intialized its fields to default values.
220 *
221 * @param pkt packet
222 * @param size wanted payload size
223 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
224 */
225 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
226 {
227 int ret= av_new_packet(pkt, size);
228
229 if(ret<0)
230 return ret;
231
232 pkt->pos= url_ftell(s);
233
234 ret= get_buffer(s, pkt->data, size);
235 if(ret<=0)
236 av_free_packet(pkt);
237 else
238 pkt->size= ret;
239
240 return ret;
241 }
242
243 /* This is a hack - the packet memory allocation stuff is broken. The
244 packet is allocated if it was not really allocated */
245 int av_dup_packet(AVPacket *pkt)
246 {
247 if (pkt->destruct != av_destruct_packet) {
248 uint8_t *data;
249 /* we duplicate the packet and don't forget to put the padding
250 again */
251 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
252 return AVERROR_NOMEM;
253 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
254 if (!data) {
255 return AVERROR_NOMEM;
256 }
257 memcpy(data, pkt->data, pkt->size);
258 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
259 pkt->data = data;
260 pkt->destruct = av_destruct_packet;
261 }
262 return 0;
263 }
264
265 /* fifo handling */
266
267 int fifo_init(FifoBuffer *f, int size)
268 {
269 f->buffer = av_malloc(size);
270 if (!f->buffer)
271 return -1;
272 f->end = f->buffer + size;
273 f->wptr = f->rptr = f->buffer;
274 return 0;
275 }
276
277 void fifo_free(FifoBuffer *f)
278 {
279 av_free(f->buffer);
280 }
281
282 int fifo_size(FifoBuffer *f, uint8_t *rptr)
283 {
284 int size;
285
286 if(!rptr)
287 rptr= f->rptr;
288
289 if (f->wptr >= rptr) {
290 size = f->wptr - rptr;
291 } else {
292 size = (f->end - rptr) + (f->wptr - f->buffer);
293 }
294 return size;
295 }
296
297 /**
298 * Get data from the fifo (returns -1 if not enough data).
299 */
300 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
301 {
302 uint8_t *rptr;
303 int size, len;
304
305 if(!rptr_ptr)
306 rptr_ptr= &f->rptr;
307 rptr = *rptr_ptr;
308
309 if (f->wptr >= rptr) {
310 size = f->wptr - rptr;
311 } else {
312 size = (f->end - rptr) + (f->wptr - f->buffer);
313 }
314
315 if (size < buf_size)
316 return -1;
317 while (buf_size > 0) {
318 len = f->end - rptr;
319 if (len > buf_size)
320 len = buf_size;
321 memcpy(buf, rptr, len);
322 buf += len;
323 rptr += len;
324 if (rptr >= f->end)
325 rptr = f->buffer;
326 buf_size -= len;
327 }
328 *rptr_ptr = rptr;
329 return 0;
330 }
331
332 /**
333 * Resizes a FIFO.
334 */
335 void fifo_realloc(FifoBuffer *f, unsigned int new_size){
336 unsigned int old_size= f->end - f->buffer;
337
338 if(old_size < new_size){
339 uint8_t *old= f->buffer;
340
341 f->buffer= av_realloc(f->buffer, new_size);
342
343 f->rptr += f->buffer - old;
344 f->wptr += f->buffer - old;
345
346 if(f->wptr < f->rptr){
347 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
348 f->rptr += new_size - old_size;
349 }
350 f->end= f->buffer + new_size;
351 }
352 }
353
354 void fifo_write(FifoBuffer *f, const uint8_t *buf, int size, uint8_t **wptr_ptr)
355 {
356 int len;
357 uint8_t *wptr;
358
359 if(!wptr_ptr)
360 wptr_ptr= &f->wptr;
361 wptr = *wptr_ptr;
362
363 while (size > 0) {
364 len = f->end - wptr;
365 if (len > size)
366 len = size;
367 memcpy(wptr, buf, len);
368 wptr += len;
369 if (wptr >= f->end)
370 wptr = f->buffer;
371 buf += len;
372 size -= len;
373 }
374 *wptr_ptr = wptr;
375 }
376
377 /* get data from the fifo (return -1 if not enough data) */
378 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
379 {
380 uint8_t *rptr = *rptr_ptr;
381 int size, len;
382
383 if (f->wptr >= rptr) {
384 size = f->wptr - rptr;
385 } else {
386 size = (f->end - rptr) + (f->wptr - f->buffer);
387 }
388
389 if (size < buf_size)
390 return -1;
391 while (buf_size > 0) {
392 len = f->end - rptr;
393 if (len > buf_size)
394 len = buf_size;
395 put_buffer(pb, rptr, len);
396 rptr += len;
397 if (rptr >= f->end)
398 rptr = f->buffer;
399 buf_size -= len;
400 }
401 *rptr_ptr = rptr;
402 return 0;
403 }
404
405 int filename_number_test(const char *filename)
406 {
407 char buf[1024];
408 if(!filename)
409 return -1;
410 return get_frame_filename(buf, sizeof(buf), filename, 1);
411 }
412
413 /**
414 * Guess file format.
415 */
416 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
417 {
418 AVInputFormat *fmt1, *fmt;
419 int score, score_max;
420
421 fmt = NULL;
422 score_max = 0;
423 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
424 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
425 continue;
426 score = 0;
427 if (fmt1->read_probe) {
428 score = fmt1->read_probe(pd);
429 } else if (fmt1->extensions) {
430 if (match_ext(pd->filename, fmt1->extensions)) {
431 score = 50;
432 }
433 }
434 if (score > score_max) {
435 score_max = score;
436 fmt = fmt1;
437 }
438 }
439 return fmt;
440 }
441
442 /************************************************************/
443 /* input media file */
444
445 /**
446 * Open a media file from an IO stream. 'fmt' must be specified.
447 */
448 static const char* format_to_name(void* ptr)
449 {
450 AVFormatContext* fc = (AVFormatContext*) ptr;
451 if(fc->iformat) return fc->iformat->name;
452 else if(fc->oformat) return fc->oformat->name;
453 else return "NULL";
454 }
455
456 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
457
458 AVFormatContext *av_alloc_format_context(void)
459 {
460 AVFormatContext *ic;
461 ic = av_mallocz(sizeof(AVFormatContext));
462 if (!ic) return ic;
463 ic->av_class = &av_format_context_class;
464 return ic;
465 }
466
467 /**
468 * Allocates all the structures needed to read an input stream.
469 * This does not open the needed codecs for decoding the stream[s].
470 */
471 int av_open_input_stream(AVFormatContext **ic_ptr,
472 ByteIOContext *pb, const char *filename,
473 AVInputFormat *fmt, AVFormatParameters *ap)
474 {
475 int err;
476 AVFormatContext *ic;
477 AVFormatParameters default_ap;
478
479 if(!ap){
480 ap=&default_ap;
481 memset(ap, 0, sizeof(default_ap));
482 }
483
484 ic = av_alloc_format_context();
485 if (!ic) {
486 err = AVERROR_NOMEM;
487 goto fail;
488 }
489 ic->iformat = fmt;
490 if (pb)
491 ic->pb = *pb;
492 ic->duration = AV_NOPTS_VALUE;
493 ic->start_time = AV_NOPTS_VALUE;
494 pstrcpy(ic->filename, sizeof(ic->filename), filename);
495
496 /* allocate private data */
497 if (fmt->priv_data_size > 0) {
498 ic->priv_data = av_mallocz(fmt->priv_data_size);
499 if (!ic->priv_data) {
500 err = AVERROR_NOMEM;
501 goto fail;
502 }
503 } else {
504 ic->priv_data = NULL;
505 }
506
507 err = ic->iformat->read_header(ic, ap);
508 if (err < 0)
509 goto fail;
510
511 if (pb)
512 ic->data_offset = url_ftell(&ic->pb);
513
514 *ic_ptr = ic;
515 return 0;
516 fail:
517 if (ic) {
518 av_freep(&ic->priv_data);
519 }
520 av_free(ic);
521 *ic_ptr = NULL;
522 return err;
523 }
524
525 /** Size of probe buffer, for guessing file type from file contents. */
526 #define PROBE_BUF_MIN 2048
527 #define PROBE_BUF_MAX (1<<20)
528
529 /**
530 * Open a media file as input. The codec are not opened. Only the file
531 * header (if present) is read.
532 *
533 * @param ic_ptr the opened media file handle is put here
534 * @param filename filename to open.
535 * @param fmt if non NULL, force the file format to use
536 * @param buf_size optional buffer size (zero if default is OK)
537 * @param ap additionnal parameters needed when opening the file (NULL if default)
538 * @return 0 if OK. AVERROR_xxx otherwise.
539 */
540 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
541 AVInputFormat *fmt,
542 int buf_size,
543 AVFormatParameters *ap)
544 {
545 int err, must_open_file, file_opened, probe_size;
546 AVProbeData probe_data, *pd = &probe_data;
547 ByteIOContext pb1, *pb = &pb1;
548
549 file_opened = 0;
550 pd->filename = "";
551 if (filename)
552 pd->filename = filename;
553 pd->buf = NULL;
554 pd->buf_size = 0;
555
556 if (!fmt) {
557 /* guess format if no file can be opened */
558 fmt = av_probe_input_format(pd, 0);
559 }
560
561 /* do not open file if the format does not need it. XXX: specific
562 hack needed to handle RTSP/TCP */
563 must_open_file = 1;
564 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
565 must_open_file = 0;
566 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
567 }
568
569 if (!fmt || must_open_file) {
570 /* if no file needed do not try to open one */
571 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
572 err = AVERROR_IO;
573 goto fail;
574 }
575 file_opened = 1;
576 if (buf_size > 0) {
577 url_setbufsize(pb, buf_size);
578 }
579
580 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
581 /* read probe data */
582 pd->buf= av_realloc(pd->buf, probe_size);
583 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
584 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
585 url_fclose(pb);
586 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
587 file_opened = 0;
588 err = AVERROR_IO;
589 goto fail;
590 }
591 }
592 /* guess file format */
593 fmt = av_probe_input_format(pd, 1);
594 }
595 av_freep(&pd->buf);
596 }
597
598 /* if still no format found, error */
599 if (!fmt) {
600 err = AVERROR_NOFMT;
601 goto fail;
602 }
603
604 /* XXX: suppress this hack for redirectors */
605 #ifdef CONFIG_NETWORK
606 if (fmt == &redir_demuxer) {
607 err = redir_open(ic_ptr, pb);
608 url_fclose(pb);
609 return err;
610 }
611 #endif
612
613 /* check filename in case of an image number is expected */
614 if (fmt->flags & AVFMT_NEEDNUMBER) {
615 if (filename_number_test(filename) < 0) {
616 err = AVERROR_NUMEXPECTED;
617 goto fail;
618 }
619 }
620 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
621 if (err)
622 goto fail;
623 return 0;
624 fail:
625 av_freep(&pd->buf);
626 if (file_opened)
627 url_fclose(pb);
628 *ic_ptr = NULL;
629 return err;
630
631 }
632
633 /*******************************************************/
634
635 /**
636 * Read a transport packet from a media file.
637 *
638 * This function is absolete and should never be used.
639 * Use av_read_frame() instead.
640 *
641 * @param s media file handle
642 * @param pkt is filled
643 * @return 0 if OK. AVERROR_xxx if error.
644 */
645 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
646 {
647 return s->iformat->read_packet(s, pkt);
648 }
649
650 /**********************************************************/
651
652 /**
653 * Get the number of samples of an audio frame. Return (-1) if error.
654 */
655 static int get_audio_frame_size(AVCodecContext *enc, int size)
656 {
657 int frame_size;
658
659 if (enc->frame_size <= 1) {
660 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
661
662 if (bits_per_sample) {
663 if (enc->channels == 0)
664 return -1;
665 frame_size = (size << 3) / (bits_per_sample * enc->channels);
666 } else {
667 /* used for example by ADPCM codecs */
668 if (enc->bit_rate == 0)
669 return -1;
670 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
671 }
672 } else {
673 frame_size = enc->frame_size;
674 }
675 return frame_size;
676 }
677
678
679 /**
680 * Return the frame duration in seconds, return 0 if not available.
681 */
682 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
683 AVCodecParserContext *pc, AVPacket *pkt)
684 {
685 int frame_size;
686
687 *pnum = 0;
688 *pden = 0;
689 switch(st->codec->codec_type) {
690 case CODEC_TYPE_VIDEO:
691 if(st->time_base.num*1000LL > st->time_base.den){
692 *pnum = st->time_base.num;
693 *pden = st->time_base.den;
694 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
695 *pnum = st->codec->time_base.num;
696 *pden = st->codec->time_base.den;
697 if (pc && pc->repeat_pict) {
698 *pden *= 2;
699 *pnum = (*pnum) * (2 + pc->repeat_pict);
700 }
701 }
702 break;
703 case CODEC_TYPE_AUDIO:
704 frame_size = get_audio_frame_size(st->codec, pkt->size);
705 if (frame_size < 0)
706 break;
707 *pnum = frame_size;
708 *pden = st->codec->sample_rate;
709 break;
710 default:
711 break;
712 }
713 }
714
715 static int is_intra_only(AVCodecContext *enc){
716 if(enc->codec_type == CODEC_TYPE_AUDIO){
717 return 1;
718 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
719 switch(enc->codec_id){
720 case CODEC_ID_MJPEG:
721 case CODEC_ID_MJPEGB:
722 case CODEC_ID_LJPEG:
723 case CODEC_ID_RAWVIDEO:
724 case CODEC_ID_DVVIDEO:
725 case CODEC_ID_HUFFYUV:
726 case CODEC_ID_FFVHUFF:
727 case CODEC_ID_ASV1:
728 case CODEC_ID_ASV2:
729 case CODEC_ID_VCR1:
730 return 1;
731 default: break;
732 }
733 }
734 return 0;
735 }
736
737 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
738 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
739 int64_t delta= last_ts - mask/2;
740 return ((lsb - delta)&mask) + delta;
741 }
742
743 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
744 AVCodecParserContext *pc, AVPacket *pkt)
745 {
746 int num, den, presentation_delayed;
747 /* handle wrapping */
748 if(st->cur_dts != AV_NOPTS_VALUE){
749 if(pkt->pts != AV_NOPTS_VALUE)
750 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
751 if(pkt->dts != AV_NOPTS_VALUE)
752 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
753 }
754
755 if (pkt->duration == 0) {
756 compute_frame_duration(&num, &den, st, pc, pkt);
757 if (den && num) {
758 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
759 }
760 }
761
762 if(is_intra_only(st->codec))
763 pkt->flags |= PKT_FLAG_KEY;
764
765 /* do we have a video B frame ? */
766 presentation_delayed = 0;
767 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
768 /* XXX: need has_b_frame, but cannot get it if the codec is
769 not initialized */
770 if (( st->codec->codec_id == CODEC_ID_H264
771 || st->codec->has_b_frames) &&
772 pc && pc->pict_type != FF_B_TYPE)
773 presentation_delayed = 1;
774 /* this may be redundant, but it shouldnt hurt */
775 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
776 presentation_delayed = 1;
777 }
778
779 if(st->cur_dts == AV_NOPTS_VALUE){
780 if(presentation_delayed) st->cur_dts = -pkt->duration;
781 else st->cur_dts = 0;
782 }
783
784 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
785 /* interpolate PTS and DTS if they are not present */
786 if (presentation_delayed) {
787 /* DTS = decompression time stamp */
788 /* PTS = presentation time stamp */
789 if (pkt->dts == AV_NOPTS_VALUE) {
790 /* if we know the last pts, use it */
791 if(st->last_IP_pts != AV_NOPTS_VALUE)
792 st->cur_dts = pkt->dts = st->last_IP_pts;
793 else
794 pkt->dts = st->cur_dts;
795 } else {
796 st->cur_dts = pkt->dts;
797 }
798 /* this is tricky: the dts must be incremented by the duration
799 of the frame we are displaying, i.e. the last I or P frame */
800 if (st->last_IP_duration == 0)
801 st->cur_dts += pkt->duration;
802 else
803 st->cur_dts += st->last_IP_duration;
804 st->last_IP_duration = pkt->duration;
805 st->last_IP_pts= pkt->pts;
806 /* cannot compute PTS if not present (we can compute it only
807 by knowing the futur */
808 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
809 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
810 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
811 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
812 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
813 pkt->pts += pkt->duration;
814 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
815 }
816 }
817
818 /* presentation is not delayed : PTS and DTS are the same */
819 if (pkt->pts == AV_NOPTS_VALUE) {
820 if (pkt->dts == AV_NOPTS_VALUE) {
821 pkt->pts = st->cur_dts;
822 pkt->dts = st->cur_dts;
823 }
824 else {
825 st->cur_dts = pkt->dts;
826 pkt->pts = pkt->dts;
827 }
828 } else {
829 st->cur_dts = pkt->pts;
830 pkt->dts = pkt->pts;
831 }
832 st->cur_dts += pkt->duration;
833 }
834 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
835
836 /* update flags */
837 if (pc) {
838 pkt->flags = 0;
839 /* key frame computation */
840 switch(st->codec->codec_type) {
841 case CODEC_TYPE_VIDEO:
842 if (pc->pict_type == FF_I_TYPE)
843 pkt->flags |= PKT_FLAG_KEY;
844 break;
845 case CODEC_TYPE_AUDIO:
846 pkt->flags |= PKT_FLAG_KEY;
847 break;
848 default:
849 break;
850 }
851 }
852 }
853
854 void av_destruct_packet_nofree(AVPacket *pkt)
855 {
856 pkt->data = NULL; pkt->size = 0;
857 }
858
859 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
860 {
861 AVStream *st;
862 int len, ret, i;
863
864 for(;;) {
865 /* select current input stream component */
866 st = s->cur_st;
867 if (st) {
868 if (!st->need_parsing || !st->parser) {
869 /* no parsing needed: we just output the packet as is */
870 /* raw data support */
871 *pkt = s->cur_pkt;
872 compute_pkt_fields(s, st, NULL, pkt);
873 s->cur_st = NULL;
874 break;
875 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
876 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
877 s->cur_ptr, s->cur_len,
878 s->cur_pkt.pts, s->cur_pkt.dts);
879 s->cur_pkt.pts = AV_NOPTS_VALUE;
880 s->cur_pkt.dts = AV_NOPTS_VALUE;
881 /* increment read pointer */
882 s->cur_ptr += len;
883 s->cur_len -= len;
884
885 /* return packet if any */
886 if (pkt->size) {
887 got_packet:
888 pkt->duration = 0;
889 pkt->stream_index = st->index;
890 pkt->pts = st->parser->pts;
891 pkt->dts = st->parser->dts;
892 pkt->destruct = av_destruct_packet_nofree;
893 compute_pkt_fields(s, st, st->parser, pkt);
894 break;
895 }
896 } else {
897 /* free packet */
898 av_free_packet(&s->cur_pkt);
899 s->cur_st = NULL;
900 }
901 } else {
902 /* read next packet */
903 ret = av_read_packet(s, &s->cur_pkt);
904 if (ret < 0) {
905 if (ret == -EAGAIN)
906 return ret;
907 /* return the last frames, if any */
908 for(i = 0; i < s->nb_streams; i++) {
909 st = s->streams[i];
910 if (st->parser && st->need_parsing) {
911 av_parser_parse(st->parser, st->codec,
912 &pkt->data, &pkt->size,
913 NULL, 0,
914 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
915 if (pkt->size)
916 goto got_packet;
917 }
918 }
919 /* no more packets: really terminates parsing */
920 return ret;
921 }
922
923 st = s->streams[s->cur_pkt.stream_index];
924 if(st->codec->debug & FF_DEBUG_PTS)
925 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%lld, dts=%lld, size=%d\n",
926 s->cur_pkt.stream_index,
927 s->cur_pkt.pts,
928 s->cur_pkt.dts,
929 s->cur_pkt.size);
930
931 s->cur_st = st;
932 s->cur_ptr = s->cur_pkt.data;
933 s->cur_len = s->cur_pkt.size;
934 if (st->need_parsing && !st->parser) {
935 st->parser = av_parser_init(st->codec->codec_id);
936 if (!st->parser) {
937 /* no parser available : just output the raw packets */
938 st->need_parsing = 0;
939 }else if(st->need_parsing == 2){
940 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
941 }
942 }
943 }
944 }
945 if(st->codec->debug & FF_DEBUG_PTS)
946 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%lld, dts=%lld, size=%d\n",
947 pkt->stream_index,
948 pkt->pts,
949 pkt->dts,
950 pkt->size);
951
952 return 0;
953 }
954
955 /**
956 * Return the next frame of a stream.
957 *
958 * The returned packet is valid
959 * until the next av_read_frame() or until av_close_input_file() and
960 * must be freed with av_free_packet. For video, the packet contains
961 * exactly one frame. For audio, it contains an integer number of
962 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
963 * data). If the audio frames have a variable size (e.g. MPEG audio),
964 * then it contains one frame.
965 *
966 * pkt->pts, pkt->dts and pkt->duration are always set to correct
967 * values in AV_TIME_BASE unit (and guessed if the format cannot
968 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
969 * has B frames, so it is better to rely on pkt->dts if you do not
970 * decompress the payload.
971 *
972 * @return 0 if OK, < 0 if error or end of file.
973 */
974 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
975 {
976 AVPacketList *pktl;
977 int eof=0;
978 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
979
980 for(;;){
981 pktl = s->packet_buffer;
982 if (pktl) {
983 AVPacket *next_pkt= &pktl->pkt;
984
985 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
986 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
987 if( pktl->pkt.stream_index == next_pkt->stream_index
988 && next_pkt->dts < pktl->pkt.dts
989 && pktl->pkt.pts != pktl->pkt.dts //not b frame
990 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
991 next_pkt->pts= pktl->pkt.dts;
992 }
993 pktl= pktl->next;
994 }
995 pktl = s->packet_buffer;
996 }
997
998 if( next_pkt->pts != AV_NOPTS_VALUE
999 || next_pkt->dts == AV_NOPTS_VALUE
1000 || !genpts || eof){
1001 /* read packet from packet buffer, if there is data */
1002 *pkt = *next_pkt;
1003 s->packet_buffer = pktl->next;
1004 av_free(pktl);
1005 return 0;
1006 }
1007 }
1008 if(genpts){
1009 AVPacketList **plast_pktl= &s->packet_buffer;
1010 int ret= av_read_frame_internal(s, pkt);
1011 if(ret<0){
1012 if(pktl && ret != -EAGAIN){
1013 eof=1;
1014 continue;
1015 }else
1016 return ret;
1017 }
1018
1019 /* duplicate the packet */
1020 if (av_dup_packet(pkt) < 0)
1021 return AVERROR_NOMEM;
1022
1023 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
1024
1025 pktl = av_mallocz(sizeof(AVPacketList));
1026 if (!pktl)
1027 return AVERROR_NOMEM;
1028
1029 /* add the packet in the buffered packet list */
1030 *plast_pktl = pktl;
1031 pktl->pkt= *pkt;
1032 }else{
1033 assert(!s->packet_buffer);
1034 return av_read_frame_internal(s, pkt);
1035 }
1036 }
1037 }
1038
1039 /* XXX: suppress the packet queue */
1040 static void flush_packet_queue(AVFormatContext *s)
1041 {
1042 AVPacketList *pktl;
1043
1044 for(;;) {
1045 pktl = s->packet_buffer;
1046 if (!pktl)
1047 break;
1048 s->packet_buffer = pktl->next;
1049 av_free_packet(&pktl->pkt);
1050 av_free(pktl);
1051 }
1052 }
1053
1054 /*******************************************************/
1055 /* seek support */
1056
1057 int av_find_default_stream_index(AVFormatContext *s)
1058 {
1059 int i;
1060 AVStream *st;
1061
1062 if (s->nb_streams <= 0)
1063 return -1;
1064 for(i = 0; i < s->nb_streams; i++) {
1065 st = s->streams[i];
1066 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1067 return i;
1068 }
1069 }
1070 return 0;
1071 }
1072
1073 /**
1074 * Flush the frame reader.
1075 */
1076 static void av_read_frame_flush(AVFormatContext *s)
1077 {
1078 AVStream *st;
1079 int i;
1080
1081 flush_packet_queue(s);
1082
1083 /* free previous packet */
1084 if (s->cur_st) {
1085 if (s->cur_st->parser)
1086 av_free_packet(&s->cur_pkt);
1087 s->cur_st = NULL;
1088 }
1089 /* fail safe */
1090 s->cur_ptr = NULL;
1091 s->cur_len = 0;
1092
1093 /* for each stream, reset read state */
1094 for(i = 0; i < s->nb_streams; i++) {
1095 st = s->streams[i];
1096
1097 if (st->parser) {
1098 av_parser_close(st->parser);
1099 st->parser = NULL;
1100 }
1101 st->last_IP_pts = AV_NOPTS_VALUE;
1102 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
1103 }
1104 }
1105
1106 /**
1107 * Updates cur_dts of all streams based on given timestamp and AVStream.
1108 *
1109 * Stream ref_st unchanged, others set cur_dts in their native timebase
1110 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1111 * @param timestamp new dts expressed in time_base of param ref_st
1112 * @param ref_st reference stream giving time_base of param timestamp
1113 */
1114 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1115 int i;
1116
1117 for(i = 0; i < s->nb_streams; i++) {
1118 AVStream *st = s->streams[i];
1119
1120 st->cur_dts = av_rescale(timestamp,
1121 st->time_base.den * (int64_t)ref_st->time_base.num,
1122 st->time_base.num * (int64_t)ref_st->time_base.den);
1123 }
1124 }
1125
1126 /**
1127 * Add a index entry into a sorted list updateing if it is already there.
1128 *
1129 * @param timestamp timestamp in the timebase of the given stream
1130 */
1131 int av_add_index_entry(AVStream *st,
1132 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1133 {
1134 AVIndexEntry *entries, *ie;
1135 int index;
1136
1137 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1138 return -1;
1139
1140 entries = av_fast_realloc(st->index_entries,
1141 &st->index_entries_allocated_size,
1142 (st->nb_index_entries + 1) *
1143 sizeof(AVIndexEntry));
1144 if(!entries)
1145 return -1;
1146
1147 st->index_entries= entries;
1148
1149 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1150
1151 if(index<0){
1152 index= st->nb_index_entries++;
1153 ie= &entries[index];
1154 assert(index==0 || ie[-1].timestamp < timestamp);
1155 }else{
1156 ie= &entries[index];
1157 if(ie->timestamp != timestamp){
1158 if(ie->timestamp <= timestamp)
1159 return -1;
1160 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1161 st->nb_index_entries++;
1162 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1163 distance= ie->min_distance;
1164 }
1165
1166 ie->pos = pos;
1167 ie->timestamp = timestamp;
1168 ie->min_distance= distance;
1169 ie->size= size;
1170 ie->flags = flags;
1171
1172 return index;
1173 }
1174
1175 /**
1176 * build an index for raw streams using a parser.
1177 */
1178 static void av_build_index_raw(AVFormatContext *s)
1179 {
1180 AVPacket pkt1, *pkt = &pkt1;
1181 int ret;
1182 AVStream *st;
1183
1184 st = s->streams[0];
1185 av_read_frame_flush(s);
1186 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1187
1188 for(;;) {
1189 ret = av_read_frame(s, pkt);
1190 if (ret < 0)
1191 break;
1192 if (pkt->stream_index == 0 && st->parser &&
1193 (pkt->flags & PKT_FLAG_KEY)) {
1194 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1195 0, 0, AVINDEX_KEYFRAME);
1196 }
1197 av_free_packet(pkt);
1198 }
1199 }
1200
1201 /**
1202 * Returns TRUE if we deal with a raw stream.
1203 *
1204 * Raw codec data and parsing needed.
1205 */
1206 static int is_raw_stream(AVFormatContext *s)
1207 {
1208 AVStream *st;
1209
1210 if (s->nb_streams != 1)
1211 return 0;
1212 st = s->streams[0];
1213 if (!st->need_parsing)
1214 return 0;
1215 return 1;
1216 }
1217
1218 /**
1219 * Gets the index for a specific timestamp.
1220 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1221 * the timestamp which is <= the requested one, if backward is 0
1222 * then it will be >=
1223 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1224 * @return < 0 if no such timestamp could be found
1225 */
1226 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1227 int flags)
1228 {
1229 AVIndexEntry *entries= st->index_entries;
1230 int nb_entries= st->nb_index_entries;
1231 int a, b, m;
1232 int64_t timestamp;
1233
1234 a = - 1;
1235 b = nb_entries;
1236
1237 while (b - a > 1) {
1238 m = (a + b) >> 1;
1239 timestamp = entries[m].timestamp;
1240 if(timestamp >= wanted_timestamp)
1241 b = m;
1242 if(timestamp <= wanted_timestamp)
1243 a = m;
1244 }
1245 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1246
1247 if(!(flags & AVSEEK_FLAG_ANY)){
1248 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1249 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1250 }
1251 }
1252
1253 if(m == nb_entries)
1254 return -1;
1255 return m;
1256 }
1257
1258 #define DEBUG_SEEK
1259
1260 /**
1261 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1262 * this isnt supposed to be called directly by a user application, but by demuxers
1263 * @param target_ts target timestamp in the time base of the given stream
1264 * @param stream_index stream number
1265 */
1266 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1267 AVInputFormat *avif= s->iformat;
1268 int64_t pos_min, pos_max, pos, pos_limit;
1269 int64_t ts_min, ts_max, ts;
1270 int64_t start_pos, filesize;
1271 int index, no_change;
1272 AVStream *st;
1273
1274 if (stream_index < 0)
1275 return -1;
1276
1277 #ifdef DEBUG_SEEK
1278 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1279 #endif
1280
1281 ts_max=
1282 ts_min= AV_NOPTS_VALUE;
1283 pos_limit= -1; //gcc falsely says it may be uninitalized
1284
1285 st= s->streams[stream_index];
1286 if(st->index_entries){
1287 AVIndexEntry *e;
1288
1289 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1290 index= FFMAX(index, 0);
1291 e= &st->index_entries[index];
1292
1293 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1294 pos_min= e->pos;
1295 ts_min= e->timestamp;
1296 #ifdef DEBUG_SEEK
1297 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1298 pos_min,ts_min);
1299 #endif
1300 }else{
1301 assert(index==0);
1302 }
1303
1304 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1305 assert(index < st->nb_index_entries);
1306 if(index >= 0){
1307 e= &st->index_entries[index];
1308 assert(e->timestamp >= target_ts);
1309 pos_max= e->pos;
1310 ts_max= e->timestamp;
1311 pos_limit= pos_max - e->min_distance;
1312 #ifdef DEBUG_SEEK
1313 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1314 pos_max,pos_limit, ts_max);
1315 #endif
1316 }
1317 }
1318
1319 if(ts_min == AV_NOPTS_VALUE){
1320 pos_min = s->data_offset;
1321 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1322 if (ts_min == AV_NOPTS_VALUE)
1323 return -1;
1324 }
1325
1326 if(ts_max == AV_NOPTS_VALUE){
1327 int step= 1024;
1328 filesize = url_fsize(&s->pb);
1329 pos_max = filesize - 1;
1330 do{
1331 pos_max -= step;
1332 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1333 step += step;
1334 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1335 if (ts_max == AV_NOPTS_VALUE)
1336 return -1;
1337
1338 for(;;){
1339 int64_t tmp_pos= pos_max + 1;
1340 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1341 if(tmp_ts == AV_NOPTS_VALUE)
1342 break;
1343 ts_max= tmp_ts;
1344 pos_max= tmp_pos;
1345 if(tmp_pos >= filesize)
1346 break;
1347 }
1348 pos_limit= pos_max;
1349 }
1350
1351 if(ts_min > ts_max){
1352 return -1;
1353 }else if(ts_min == ts_max){
1354 pos_limit= pos_min;
1355 }
1356
1357 no_change=0;
1358 while (pos_min < pos_limit) {
1359 #ifdef DEBUG_SEEK
1360 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1361 pos_min, pos_max,
1362 ts_min, ts_max);
1363 #endif
1364 assert(pos_limit <= pos_max);
1365
1366 if(no_change==0){
1367 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1368 // interpolate position (better than dichotomy)
1369 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1370 + pos_min - approximate_keyframe_distance;
1371 }else if(no_change==1){
1372 // bisection, if interpolation failed to change min or max pos last time
1373 pos = (pos_min + pos_limit)>>1;
1374 }else{
1375 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1376 pos=pos_min;
1377 }
1378 if(pos <= pos_min)
1379 pos= pos_min + 1;
1380 else if(pos > pos_limit)
1381 pos= pos_limit;
1382 start_pos= pos;
1383
1384 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1385 if(pos == pos_max)
1386 no_change++;
1387 else
1388 no_change=0;
1389 #ifdef DEBUG_SEEK
1390 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1391 #endif
1392 assert(ts != AV_NOPTS_VALUE);
1393 if (target_ts <= ts) {
1394 pos_limit = start_pos - 1;
1395 pos_max = pos;
1396 ts_max = ts;
1397 }
1398 if (target_ts >= ts) {
1399 pos_min = pos;
1400 ts_min = ts;
1401 }
1402 }
1403
1404 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1405 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1406 #ifdef DEBUG_SEEK
1407 pos_min = pos;
1408 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1409 pos_min++;
1410 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1411 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1412 pos, ts_min, target_ts, ts_max);
1413 #endif
1414 /* do the seek */
1415 url_fseek(&s->pb, pos, SEEK_SET);
1416
1417 av_update_cur_dts(s, st, ts);
1418
1419 return 0;
1420 }
1421
1422 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1423 int64_t pos_min, pos_max;
1424 #if 0
1425 AVStream *st;
1426
1427 if (stream_index < 0)
1428 return -1;
1429
1430 st= s->streams[stream_index];
1431 #endif
1432
1433 pos_min = s->data_offset;
1434 pos_max = url_fsize(&s->pb) - 1;
1435
1436 if (pos < pos_min) pos= pos_min;
1437 else if(pos > pos_max) pos= pos_max;
1438
1439 url_fseek(&s->pb, pos, SEEK_SET);
1440
1441 #if 0
1442 av_update_cur_dts(s, st, ts);
1443 #endif
1444 return 0;
1445 }
1446
1447 static int av_seek_frame_generic(AVFormatContext *s,
1448 int stream_index, int64_t timestamp, int flags)
1449 {
1450 int index;
1451 AVStream *st;
1452 AVIndexEntry *ie;
1453
1454 if (!s->index_built) {
1455 if (is_raw_stream(s)) {
1456 av_build_index_raw(s);
1457 } else {
1458 return -1;
1459 }
1460 s->index_built = 1;
1461 }
1462
1463 st = s->streams[stream_index];
1464 index = av_index_search_timestamp(st, timestamp, flags);
1465 if (index < 0)
1466 return -1;
1467
1468 /* now we have found the index, we can seek */
1469 ie = &st->index_entries[index];
1470 av_read_frame_flush(s);
1471 url_fseek(&s->pb, ie->pos, SEEK_SET);
1472
1473 av_update_cur_dts(s, st, ie->timestamp);
1474
1475 return 0;
1476 }
1477
1478 /**
1479 * Seek to the key frame at timestamp.
1480 * 'timestamp' in 'stream_index'.
1481 * @param stream_index If stream_index is (-1), a default
1482 * stream is selected, and timestamp is automatically converted
1483 * from AV_TIME_BASE units to the stream specific time_base.
1484 * @param timestamp timestamp in AVStream.time_base units
1485 * or if there is no stream specified then in AV_TIME_BASE units
1486 * @param flags flags which select direction and seeking mode
1487 * @return >= 0 on success
1488 */
1489 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1490 {
1491 int ret;
1492 AVStream *st;
1493
1494 av_read_frame_flush(s);
1495
1496 if(flags & AVSEEK_FLAG_BYTE)
1497 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1498
1499 if(stream_index < 0){
1500 stream_index= av_find_default_stream_index(s);
1501 if(stream_index < 0)
1502 return -1;
1503
1504 st= s->streams[stream_index];
1505 /* timestamp for default must be expressed in AV_TIME_BASE units */
1506 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1507 }
1508 st= s->streams[stream_index];
1509
1510 /* first, we try the format specific seek */
1511 if (s->iformat->read_seek)
1512 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1513 else
1514 ret = -1;
1515 if (ret >= 0) {
1516 return 0;
1517 }
1518
1519 if(s->iformat->read_timestamp)
1520 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1521 else
1522 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1523 }
1524
1525 /*******************************************************/
1526
1527 /**
1528 * Returns TRUE if the stream has accurate timings in any stream.
1529 *
1530 * @return TRUE if the stream has accurate timings for at least one component.
1531 */
1532 static int av_has_timings(AVFormatContext *ic)
1533 {
1534 int i;
1535 AVStream *st;
1536
1537 for(i = 0;i < ic->nb_streams; i++) {
1538 st = ic->streams[i];
1539 if (st->start_time != AV_NOPTS_VALUE &&
1540 st->duration != AV_NOPTS_VALUE)
1541 return 1;
1542 }
1543 return 0;
1544 }
1545
1546 /**
1547 * Estimate the stream timings from the one of each components.
1548 *
1549 * Also computes the global bitrate if possible.
1550 */
1551 static void av_update_stream_timings(AVFormatContext *ic)
1552 {
1553 int64_t start_time, start_time1, end_time, end_time1;
1554 int i;
1555 AVStream *st;
1556
1557 start_time = MAXINT64;
1558 end_time = MININT64;
1559 for(i = 0;i < ic->nb_streams; i++) {
1560 st = ic->streams[i];
1561 if (st->start_time != AV_NOPTS_VALUE) {
1562 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1563 if (start_time1 < start_time)
1564 start_time = start_time1;
1565 if (st->duration != AV_NOPTS_VALUE) {
1566 end_time1 = start_time1
1567 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1568 if (end_time1 > end_time)
1569 end_time = end_time1;
1570 }
1571 }
1572 }
1573 if (start_time != MAXINT64) {
1574 ic->start_time = start_time;
1575 if (end_time != MININT64) {
1576 ic->duration = end_time - start_time;
1577 if (ic->file_size > 0) {
1578 /* compute the bit rate */
1579 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1580 (double)ic->duration;
1581 }
1582 }
1583 }
1584
1585 }
1586
1587 static void fill_all_stream_timings(AVFormatContext *ic)
1588 {
1589 int i;
1590 AVStream *st;
1591
1592 av_update_stream_timings(ic);
1593 for(i = 0;i < ic->nb_streams; i++) {
1594 st = ic->streams[i];
1595 if (st->start_time == AV_NOPTS_VALUE) {
1596 if(ic->start_time != AV_NOPTS_VALUE)
1597 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1598 if(ic->duration != AV_NOPTS_VALUE)
1599 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1600 }
1601 }
1602 }
1603
1604 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1605 {
1606 int64_t filesize, duration;
1607 int bit_rate, i;
1608 AVStream *st;
1609
1610 /* if bit_rate is already set, we believe it */
1611 if (ic->bit_rate == 0) {
1612 bit_rate = 0;
1613 for(i=0;i<ic->nb_streams;i++) {
1614 st = ic->streams[i];
1615 bit_rate += st->codec->bit_rate;
1616 }
1617 ic->bit_rate = bit_rate;
1618 }
1619
1620 /* if duration is already set, we believe it */
1621 if (ic->duration == AV_NOPTS_VALUE &&
1622 ic->bit_rate != 0 &&
1623 ic->file_size != 0) {
1624 filesize = ic->file_size;
1625 if (filesize > 0) {
1626 for(i = 0; i < ic->nb_streams; i++) {
1627 st = ic->streams[i];
1628 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1629 if (st->start_time == AV_NOPTS_VALUE ||
1630 st->duration == AV_NOPTS_VALUE) {
1631 st->start_time = 0;
1632 st->duration = duration;
1633 }
1634 }
1635 }
1636 }
1637 }
1638
1639 #define DURATION_MAX_READ_SIZE 250000
1640
1641 /* only usable for MPEG-PS streams */
1642 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1643 {
1644 AVPacket pkt1, *pkt = &pkt1;
1645 AVStream *st;
1646 int read_size, i, ret;
1647 int64_t end_time;
1648 int64_t filesize, offset, duration;
1649
1650 /* free previous packet */
1651 if (ic->cur_st && ic->cur_st->parser)
1652 av_free_packet(&ic->cur_pkt);
1653 ic->cur_st = NULL;
1654
1655 /* flush packet queue */
1656 flush_packet_queue(ic);
1657
1658 for(i=0;i<ic->nb_streams;i++) {
1659 st = ic->streams[i];
1660 if (st->parser) {
1661 av_parser_close(st->parser);
1662 st->parser= NULL;
1663 }
1664 }
1665
1666 /* we read the first packets to get the first PTS (not fully
1667 accurate, but it is enough now) */
1668 url_fseek(&ic->pb, 0, SEEK_SET);
1669 read_size = 0;
1670 for(;;) {
1671 if (read_size >= DURATION_MAX_READ_SIZE)
1672 break;
1673 /* if all info is available, we can stop */
1674 for(i = 0;i < ic->nb_streams; i++) {
1675 st = ic->streams[i];
1676 if (st->start_time == AV_NOPTS_VALUE)
1677 break;
1678 }
1679 if (i == ic->nb_streams)
1680 break;
1681
1682 ret = av_read_packet(ic, pkt);
1683 if (ret != 0)
1684 break;
1685 read_size += pkt->size;
1686 st = ic->streams[pkt->stream_index];
1687 if (pkt->pts != AV_NOPTS_VALUE) {
1688 if (st->start_time == AV_NOPTS_VALUE)
1689 st->start_time = pkt->pts;
1690 }
1691 av_free_packet(pkt);
1692 }
1693
1694 /* estimate the end time (duration) */
1695 /* XXX: may need to support wrapping */
1696 filesize = ic->file_size;
1697 offset = filesize - DURATION_MAX_READ_SIZE;
1698 if (offset < 0)
1699 offset = 0;
1700
1701 url_fseek(&ic->pb, offset, SEEK_SET);
1702 read_size = 0;
1703 for(;;) {
1704 if (read_size >= DURATION_MAX_READ_SIZE)
1705 break;
1706 /* if all info is available, we can stop */
1707 for(i = 0;i < ic->nb_streams; i++) {
1708 st = ic->streams[i];
1709 if (st->duration == AV_NOPTS_VALUE)
1710 break;
1711 }
1712 if (i == ic->nb_streams)
1713 break;
1714
1715 ret = av_read_packet(ic, pkt);
1716 if (ret != 0)
1717 break;
1718 read_size += pkt->size;
1719 st = ic->streams[pkt->stream_index];
1720 if (pkt->pts != AV_NOPTS_VALUE) {
1721 end_time = pkt->pts;
1722 duration = end_time - st->start_time;
1723 if (duration > 0) {
1724 if (st->duration == AV_NOPTS_VALUE ||
1725 st->duration < duration)
1726 st->duration = duration;
1727 }
1728 }
1729 av_free_packet(pkt);
1730 }
1731
1732 fill_all_stream_timings(ic);
1733
1734 url_fseek(&ic->pb, 0, SEEK_SET);
1735 }
1736
1737 static void av_estimate_timings(AVFormatContext *ic)
1738 {
1739 int64_t file_size;
1740
1741 /* get the file size, if possible */
1742 if (ic->iformat->flags & AVFMT_NOFILE) {
1743 file_size = 0;
1744 } else {
1745 file_size = url_fsize(&ic->pb);
1746 if (file_size < 0)
1747 file_size = 0;
1748 }
1749 ic->file_size = file_size;
1750
1751 if ((!strcmp(ic->iformat->name, "mpeg") ||
1752 !strcmp(ic->iformat->name, "mpegts")) &&
1753 file_size && !ic->pb.is_streamed) {
1754 /* get accurate estimate from the PTSes */
1755 av_estimate_timings_from_pts(ic);
1756 } else if (av_has_timings(ic)) {
1757 /* at least one components has timings - we use them for all
1758 the components */
1759 fill_all_stream_timings(ic);
1760 } else {
1761 /* less precise: use bit rate info */
1762 av_estimate_timings_from_bit_rate(ic);
1763 }
1764 av_update_stream_timings(ic);
1765
1766 #if 0
1767 {
1768 int i;
1769 AVStream *st;
1770 for(i = 0;i < ic->nb_streams; i++) {
1771 st = ic->streams[i];
1772 printf("%d: start_time: %0.3f duration: %0.3f\n",
1773 i, (double)st->start_time / AV_TIME_BASE,
1774 (double)st->duration / AV_TIME_BASE);
1775 }
1776 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1777 (double)ic->start_time / AV_TIME_BASE,
1778 (double)ic->duration / AV_TIME_BASE,
1779 ic->bit_rate / 1000);
1780 }
1781 #endif
1782 }
1783
1784 static int has_codec_parameters(AVCodecContext *enc)
1785 {
1786 int val;
1787 switch(enc->codec_type) {
1788 case CODEC_TYPE_AUDIO:
1789 val = enc->sample_rate;
1790 break;
1791 case CODEC_TYPE_VIDEO:
1792 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1793 break;
1794 default:
1795 val = 1;
1796 break;
1797 }
1798 return (val != 0);
1799 }
1800
1801 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1802 {
1803 int16_t *samples;
1804 AVCodec *codec;
1805 int got_picture, ret=0;
1806 AVFrame picture;
1807
1808 if(!st->codec->codec){
1809 codec = avcodec_find_decoder(st->codec->codec_id);
1810 if (!codec)
1811 return -1;
1812 ret = avcodec_open(st->codec, codec);
1813 if (ret < 0)
1814 return ret;
1815 }
1816
1817 if(!has_codec_parameters(st->codec)){
1818 switch(st->codec->codec_type) {
1819 case CODEC_TYPE_VIDEO:
1820 ret = avcodec_decode_video(st->codec, &picture,
1821 &got_picture, (uint8_t *)data, size);
1822 break;
1823 case CODEC_TYPE_AUDIO:
1824 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1825 if (!samples)
1826 goto fail;
1827 ret = avcodec_decode_audio(st->codec, samples,
1828 &got_picture, (uint8_t *)data, size);
1829 av_free(samples);
1830 break;
1831 default:
1832 break;
1833 }
1834 }
1835 fail:
1836 return ret;
1837 }
1838
1839 /* absolute maximum size we read until we abort */
1840 #define MAX_READ_SIZE 5000000
1841
1842 /* maximum duration until we stop analysing the stream */
1843 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 3.0))
1844
1845 /**
1846 * Read the beginning of a media file to get stream information. This
1847 * is useful for file formats with no headers such as MPEG. This
1848 * function also compute the real frame rate in case of mpeg2 repeat
1849 * frame mode.
1850 *
1851 * @param ic media file handle
1852 * @return >=0 if OK. AVERROR_xxx if error.
1853 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1854 */
1855 int av_find_stream_info(AVFormatContext *ic)
1856 {
1857 int i, count, ret, read_size, j;
1858 AVStream *st;
1859 AVPacket pkt1, *pkt;
1860 AVPacketList *pktl=NULL, **ppktl;
1861 int64_t last_dts[MAX_STREAMS];
1862 int64_t duration_sum[MAX_STREAMS];
1863 int duration_count[MAX_STREAMS]={0};
1864
1865 for(i=0;i<ic->nb_streams;i++) {
1866 st = ic->streams[i];
1867 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1868 /* if(!st->time_base.num)
1869 st->time_base= */
1870 if(!st->codec->time_base.num)
1871 st->codec->time_base= st->time_base;
1872 }
1873 //only for the split stuff
1874 if (!st->parser) {
1875 st->parser = av_parser_init(st->codec->codec_id);
1876 if(st->need_parsing == 2 && st->parser){
1877 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1878 }
1879 }
1880 }
1881
1882 for(i=0;i<MAX_STREAMS;i++){
1883 last_dts[i]= AV_NOPTS_VALUE;
1884 duration_sum[i]= INT64_MAX;
1885 }
1886
1887 count = 0;
1888 read_size = 0;
1889 ppktl = &ic->packet_buffer;
1890 for(;;) {
1891 /* check if one codec still needs to be handled */
1892 for(i=0;i<ic->nb_streams;i++) {
1893 st = ic->streams[i];
1894 if (!has_codec_parameters(st->codec))
1895 break;
1896 /* variable fps and no guess at the real fps */
1897 if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
1898 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1899 break;
1900 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1901 break;
1902 }
1903 if (i == ic->nb_streams) {
1904 /* NOTE: if the format has no header, then we need to read
1905 some packets to get most of the streams, so we cannot
1906 stop here */
1907 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1908 /* if we found the info for all the codecs, we can stop */
1909 ret = count;
1910 break;
1911 }
1912 } else {
1913 /* we did not get all the codec info, but we read too much data */
1914 if (read_size >= MAX_READ_SIZE) {
1915 ret = count;
1916 break;
1917 }
1918 }
1919
1920 /* NOTE: a new stream can be added there if no header in file
1921 (AVFMTCTX_NOHEADER) */
1922 ret = av_read_frame_internal(ic, &pkt1);
1923 if (ret < 0) {
1924 /* EOF or error */
1925 ret = -1; /* we could not have all the codec parameters before EOF */
1926 for(i=0;i<ic->nb_streams;i++) {
1927 st = ic->streams[i];
1928 if (!has_codec_parameters(st->codec)){
1929 char buf[256];
1930 avcodec_string(buf, sizeof(buf), st->codec, 0);
1931 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1932 } else {
1933 ret = 0;
1934 }
1935 }
1936 break;
1937 }
1938
1939 pktl = av_mallocz(sizeof(AVPacketList));
1940 if (!pktl) {
1941 ret = AVERROR_NOMEM;
1942 break;
1943 }
1944
1945 /* add the packet in the buffered packet list */
1946 *ppktl = pktl;
1947 ppktl = &pktl->next;
1948
1949 pkt = &pktl->pkt;
1950 *pkt = pkt1;
1951
1952 /* duplicate the packet */
1953 if (av_dup_packet(pkt) < 0) {
1954 ret = AVERROR_NOMEM;
1955 break;
1956 }
1957
1958 read_size += pkt->size;
1959
1960 st = ic->streams[pkt->stream_index];
1961 st->codec_info_duration += pkt->duration;
1962 if (pkt->duration != 0)
1963 st->codec_info_nb_frames++;
1964
1965 {
1966 int index= pkt->stream_index;
1967 int64_t last= last_dts[index];
1968 int64_t duration= pkt->dts - last;
1969
1970 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1971 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1972 duration_sum[index]= duration;
1973 duration_count[index]=1;
1974 }else{
1975 int factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1976 duration_sum[index] += duration;
1977 duration_count[index]+= factor;
1978 }
1979 if(st->codec_info_nb_frames == 0 && 0)
1980 st->codec_info_duration += duration;
1981 }
1982 last_dts[pkt->stream_index]= pkt->dts;
1983 }
1984 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1985 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1986 if(i){
1987 st->codec->extradata_size= i;
1988 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1989 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1990 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1991 }
1992 }
1993
1994 /* if still no information, we try to open the codec and to
1995 decompress the frame. We try to avoid that in most cases as
1996 it takes longer and uses more memory. For MPEG4, we need to
1997 decompress for Quicktime. */
1998 if (!has_codec_parameters(st->codec) /*&&
1999 (st->codec->codec_id == CODEC_ID_FLV1 ||
2000 st->codec->codec_id == CODEC_ID_H264 ||
2001 st->codec->codec_id == CODEC_ID_H263 ||
2002 st->codec->codec_id == CODEC_ID_H261 ||
2003 st->codec->codec_id == CODEC_ID_VORBIS ||
2004 st->codec->codec_id == CODEC_ID_MJPEG ||
2005 st->codec->codec_id == CODEC_ID_PNG ||
2006 st->codec->codec_id == CODEC_ID_PAM ||
2007 st->codec->codec_id == CODEC_ID_PGM ||
2008 st->codec->codec_id == CODEC_ID_PGMYUV ||
2009 st->codec->codec_id == CODEC_ID_PBM ||
2010 st->codec->codec_id == CODEC_ID_PPM ||
2011 st->codec->codec_id == CODEC_ID_SHORTEN ||
2012 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2013 try_decode_frame(st, pkt->data, pkt->size);
2014
2015 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
2016 break;
2017 }
2018 count++;
2019 }
2020
2021 // close codecs which where opened in try_decode_frame()
2022 for(i=0;i<ic->nb_streams;i++) {
2023 st = ic->streams[i];
2024 if(st->codec->codec)
2025 avcodec_close(st->codec);
2026 }
2027 for(i=0;i<ic->nb_streams;i++) {
2028 st = ic->streams[i];
2029 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2030 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2031 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2032
2033 if(duration_count[i] && st->codec->time_base.num*101LL <= st->codec->time_base.den &&
2034 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den){
2035 int64_t num, den, error, best_error;
2036
2037 num= st->time_base.den*duration_count[i];
2038 den= st->time_base.num*duration_sum[i];
2039
2040 best_error= INT64_MAX;
2041 for(j=1; j<60*12; j++){
2042 error= ABS(1001*12*num - 1001*j*den);
2043 if(error < best_error){
2044 best_error= error;
2045 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j, 12, INT_MAX);
2046 }
2047 }
2048 for(j=24; j<=30; j+=6){
2049 error= ABS(1001*12*num - 1000*12*j*den);
2050 if(error < best_error){
2051 best_error= error;
2052 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j*1000, 1001, INT_MAX);
2053 }
2054 }
2055 }
2056
2057 /* set real frame rate info */
2058 /* compute the real frame rate for telecine */
2059 if ((st->codec->codec_id == CODEC_ID_MPEG1VIDEO ||
2060 st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
2061 st->codec->sub_id == 2) {
2062 if (st->codec_info_nb_frames >= 20) {
2063 float coded_frame_rate, est_frame_rate;
2064 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
2065 (double)st->codec_info_duration ;
2066 coded_frame_rate = 1.0/av_q2d(st->codec->time_base);
2067 #if 0
2068 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
2069 coded_frame_rate, est_frame_rate);
2070 #endif
2071 /* if we detect that it could be a telecine, we
2072 signal it. It would be better to do it at a
2073 higher level as it can change in a film */
2074 if (coded_frame_rate >= 24.97 &&
2075 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
2076 st->r_frame_rate = (AVRational){24000, 1001};
2077 }
2078 }
2079 }
2080 /* if no real frame rate, use the codec one */
2081 if (!st->r_frame_rate.num){
2082 st->r_frame_rate.num = st->codec->time_base.den;
2083 st->r_frame_rate.den = st->codec->time_base.num;
2084 }
2085 }
2086 }
2087
2088 av_estimate_timings(ic);
2089 #if 0
2090 /* correct DTS for b frame streams with no timestamps */
2091 for(i=0;i<ic->nb_streams;i++) {
2092 st = ic->streams[i];
2093 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2094 if(b-frames){
2095 ppktl = &ic->packet_buffer;
2096 while(ppkt1){
2097 if(ppkt1->stream_index != i)
2098 continue;
2099 if(ppkt1->pkt->dts < 0)
2100 break;
2101 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2102 break;
2103 ppkt1->pkt->dts -= delta;
2104 ppkt1= ppkt1->next;
2105 }
2106 if(ppkt1)
2107 continue;
2108 st->cur_dts -= delta;
2109 }
2110 }
2111 }
2112 #endif
2113 return ret;
2114 }
2115
2116 /*******************************************************/
2117
2118 /**
2119 * start playing a network based stream (e.g. RTSP stream) at the
2120 * current position
2121 */
2122 int av_read_play(AVFormatContext *s)
2123 {
2124 if (!s->iformat->read_play)
2125 return AVERROR_NOTSUPP;
2126 return s->iformat->read_play(s);
2127 }
2128
2129 /**
2130 * Pause a network based stream (e.g. RTSP stream).
2131 *
2132 * Use av_read_play() to resume it.
2133 */
2134 int av_read_pause(AVFormatContext *s)
2135 {
2136 if (!s->iformat->read_pause)
2137 return AVERROR_NOTSUPP;
2138 return s->iformat->read_pause(s);
2139 }
2140
2141 /**
2142 * Close a media file (but not its codecs).
2143 *
2144 * @param s media file handle
2145 */
2146 void av_close_input_file(AVFormatContext *s)
2147 {
2148 int i, must_open_file;
2149 AVStream *st;
2150
2151 /* free previous packet */
2152 if (s->cur_st && s->cur_st->parser)
2153 av_free_packet(&s->cur_pkt);
2154
2155 if (s->iformat->read_close)
2156 s->iformat->read_close(s);
2157 for(i=0;i<s->nb_streams;i++) {
2158 /* free all data in a stream component */
2159 st = s->streams[i];
2160 if (st->parser) {
2161 av_parser_close(st->parser);
2162 }
2163 av_free(st->index_entries);
2164 av_free(st->codec->extradata);
2165 av_free(st->codec);
2166 av_free(st);
2167 }
2168 flush_packet_queue(s);
2169 must_open_file = 1;
2170 if (s->iformat->flags & AVFMT_NOFILE) {
2171 must_open_file = 0;
2172 }
2173 if (must_open_file) {
2174 url_fclose(&s->pb);
2175 }
2176 av_freep(&s->priv_data);
2177 av_free(s);
2178 }
2179
2180 /**
2181 * Add a new stream to a media file.
2182 *
2183 * Can only be called in the read_header() function. If the flag
2184 * AVFMTCTX_NOHEADER is in the format context, then new streams
2185 * can be added in read_packet too.
2186 *
2187 * @param s media file handle
2188 * @param id file format dependent stream id
2189 */
2190 AVStream *av_new_stream(AVFormatContext *s, int id)
2191 {
2192 AVStream *st;
2193
2194 if (s->nb_streams >= MAX_STREAMS)
2195 return NULL;
2196
2197 st = av_mallocz(sizeof(AVStream));
2198 if (!st)
2199 return NULL;
2200
2201 st->codec= avcodec_alloc_context();
2202 if (s->iformat) {
2203 /* no default bitrate if decoding */
2204 st->codec->bit_rate = 0;
2205 }
2206 st->index = s->nb_streams;
2207 st->id = id;
2208 st->start_time = AV_NOPTS_VALUE;
2209 st->duration = AV_NOPTS_VALUE;
2210 st->cur_dts = AV_NOPTS_VALUE;
2211
2212 /* default pts settings is MPEG like */
2213 av_set_pts_info(st, 33, 1, 90000);
2214 st->last_IP_pts = AV_NOPTS_VALUE;
2215
2216 s->streams[s->nb_streams++] = st;
2217 return st;
2218 }
2219
2220 /************************************************************/
2221 /* output media file */
2222
2223 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2224 {
2225 int ret;
2226
2227 if (s->oformat->priv_data_size > 0) {
2228 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2229 if (!s->priv_data)
2230 return AVERROR_NOMEM;
2231 } else
2232 s->priv_data = NULL;
2233
2234 if (s->oformat->set_parameters) {
2235 ret = s->oformat->set_parameters(s, ap);
2236 if (ret < 0)
2237 return ret;
2238 }
2239 return 0;
2240 }
2241
2242 /**
2243 * allocate the stream private data and write the stream header to an
2244 * output media file
2245 *
2246 * @param s media file handle
2247 * @return 0 if OK. AVERROR_xxx if error.
2248 */
2249 int av_write_header(AVFormatContext *s)
2250 {
2251 int ret, i;
2252 AVStream *st;
2253
2254 // some sanity checks
2255 for(i=0;i<s->nb_streams;i++) {
2256 st = s->streams[i];
2257
2258 switch (st->codec->codec_type) {
2259 case CODEC_TYPE_AUDIO:
2260 if(st->codec->sample_rate<=0){
2261 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2262 return -1;
2263 }
2264 break;
2265 case CODEC_TYPE_VIDEO:
2266 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2267 av_log(s, AV_LOG_ERROR, "time base not set\n");
2268 return -1;
2269 }
2270 if(st->codec->width<=0 || st->codec->height<=0){
2271 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2272 return -1;
2273 }
2274 break;
2275 }
2276 }
2277
2278 if(s->oformat->write_header){
2279 ret = s->oformat->write_header(s);
2280 if (ret < 0)
2281 return ret;
2282 }
2283
2284 /* init PTS generation */
2285 for(i=0;i<s->nb_streams;i++) {
2286 int64_t den = AV_NOPTS_VALUE;
2287 st = s->streams[i];
2288
2289 switch (st->codec->codec_type) {
2290 case CODEC_TYPE_AUDIO:
2291 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2292 break;
2293 case CODEC_TYPE_VIDEO:
2294 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2295 break;
2296 default:
2297 break;
2298 }
2299 if (den != AV_NOPTS_VALUE) {
2300 if (den <= 0)
2301 return AVERROR_INVALIDDATA;
2302 av_frac_init(&st->pts, 0, 0, den);
2303 }
2304 }
2305 return 0;
2306 }
2307
2308 //FIXME merge with compute_pkt_fields
2309 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2310 int b_frames = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames);
2311 int num, den, frame_size;
2312
2313 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2314
2315 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2316 return -1;*/
2317
2318 /* duration field */
2319 if (pkt->duration == 0) {
2320 compute_frame_duration(&num, &den, st, NULL, pkt);
2321 if (den && num) {
2322 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2323 }
2324 }
2325
2326 //XXX/FIXME this is a temporary hack until all encoders output pts
2327 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2328 pkt->dts=
2329 // pkt->pts= st->cur_dts;
2330 pkt->pts= st->pts.val;
2331 }
2332
2333 //calculate dts from pts
2334 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2335 if(b_frames){
2336 if(st->last_IP_pts == AV_NOPTS_VALUE){
2337 st->last_IP_pts= -pkt->duration;
2338 }
2339 if(st->last_IP_pts < pkt->pts){
2340 pkt->dts= st->last_IP_pts;
2341 st->last_IP_pts= pkt->pts;
2342 }else
2343 pkt->dts= pkt->pts;
2344 }else
2345 pkt->dts= pkt->pts;
2346 }
2347
2348 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2349 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2350 return -1;
2351 }
2352 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2353 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2354 return -1;
2355 }
2356
2357 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2358 st->cur_dts= pkt->dts;
2359 st->pts.val= pkt->dts;
2360
2361 /* update pts */
2362 switch (st->codec->codec_type) {
2363 case CODEC_TYPE_AUDIO:
2364 frame_size = get_audio_frame_size(st->codec, pkt->size);
2365
2366 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2367 but it would be better if we had the real timestamps from the encoder */
2368 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2369 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2370 }
2371 break;
2372 case CODEC_TYPE_VIDEO:
2373 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2374 break;
2375 default:
2376 break;
2377 }
2378 return 0;
2379 }
2380
2381 static void truncate_ts(AVStream *st, AVPacket *pkt){
2382 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2383
2384 // if(pkt->dts < 0)
2385 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2386
2387 pkt->pts &= pts_mask;
2388 pkt->dts &= pts_mask;
2389 }
2390
2391 /**
2392 * Write a packet to an output media file.
2393 *
2394 * The packet shall contain one audio or video frame.
2395 *
2396 * @param s media file handle
2397 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2398 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2399 */
2400 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2401 {
2402 int ret;
2403
2404 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2405 if(ret<0)
2406 return ret;
2407
2408 truncate_ts(s->streams[pkt->stream_index], pkt);
2409
2410 ret= s->oformat->write_packet(s, pkt);
2411 if(!ret)
2412 ret= url_ferror(&s->pb);
2413 return ret;
2414 }
2415
2416 /**
2417 * interleave_packet implementation which will interleave per DTS.
2418 * packets with pkt->destruct == av_destruct_packet will be freed inside this function.
2419 * so they cannot be used after it, note calling av_free_packet() on them is still safe
2420 */
2421 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2422 AVPacketList *pktl, **next_point, *this_pktl;
2423 int stream_count=0;
2424 int streams[MAX_STREAMS];
2425
2426 if(pkt){
2427 AVStream *st= s->streams[ pkt->stream_index];
2428
2429 // assert(pkt->destruct != av_destruct_packet); //FIXME
2430
2431 this_pktl = av_mallocz(sizeof(AVPacketList));
2432 this_pktl->pkt= *pkt;
2433 if(pkt->destruct == av_destruct_packet)
2434 pkt->destruct= NULL; // non shared -> must keep original from being freed
2435 else
2436 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2437
2438 next_point = &s->packet_buffer;
2439 while(*next_point){
2440 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2441 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2442 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2443 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2444 break;
2445 next_point= &(*next_point)->next;
2446 }
2447 this_pktl->next= *next_point;
2448 *next_point= this_pktl;
2449 }
2450
2451 memset(streams, 0, sizeof(streams));
2452 pktl= s->packet_buffer;
2453 while(pktl){
2454 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2455 if(streams[ pktl->pkt.stream_index ] == 0)
2456 stream_count++;
2457 streams[ pktl->pkt.stream_index ]++;
2458 pktl= pktl->next;
2459 }
2460
2461 if(s->nb_streams == stream_count || (flush && stream_count)){
2462 pktl= s->packet_buffer;
2463 *out= pktl->pkt;
2464
2465 s->packet_buffer= pktl->next;
2466 av_freep(&pktl);
2467 return 1;
2468 }else{
2469 av_init_packet(out);
2470 return 0;
2471 }
2472 }
2473
2474 /**
2475 * Interleaves a AVPacket correctly so it can be muxed.
2476 * @param out the interleaved packet will be output here
2477 * @param in the input packet
2478 * @param flush 1 if no further packets are available as input and all
2479 * remaining packets should be output
2480 * @return 1 if a packet was output, 0 if no packet could be output,
2481 * < 0 if an error occured
2482 */
2483 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2484 if(s->oformat->interleave_packet)
2485 return s->oformat->interleave_packet(s, out, in, flush);
2486 else
2487 return av_interleave_packet_per_dts(s, out, in, flush);
2488 }
2489
2490 /**
2491 * Writes a packet to an output media file ensuring correct interleaving.
2492 *
2493 * The packet must contain one audio or video frame.
2494 * If the packets are already correctly interleaved the application should
2495 * call av_write_frame() instead as its slightly faster, its also important
2496 * to keep in mind that completly non interleaved input will need huge amounts
2497 * of memory to interleave with this, so its prefereable to interleave at the
2498 * demuxer level
2499 *
2500 * @param s media file handle
2501 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2502 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2503 */
2504 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2505 AVStream *st= s->streams[ pkt->stream_index];
2506
2507 //FIXME/XXX/HACK drop zero sized packets
2508 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2509 return 0;
2510
2511 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2512 if(compute_pkt_fields2(st, pkt) < 0)
2513 return -1;
2514
2515 if(pkt->dts == AV_NOPTS_VALUE)
2516 return -1;
2517
2518 for(;;){
2519 AVPacket opkt;
2520 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2521 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2522 return ret;
2523
2524 truncate_ts(s->streams[opkt.stream_index], &opkt);
2525 ret= s->oformat->write_packet(s, &opkt);
2526
2527 av_free_packet(&opkt);
2528 pkt= NULL;
2529
2530 if(ret<0)
2531 return ret;
2532 if(url_ferror(&s->pb))
2533 return url_ferror(&s->pb);
2534 }
2535 }
2536
2537 /**
2538 * @brief Write the stream trailer to an output media file and
2539 * free the file private data.
2540 *
2541 * @param s media file handle
2542 * @return 0 if OK. AVERROR_xxx if error.
2543 */
2544 int av_write_trailer(AVFormatContext *s)
2545 {
2546 int ret, i;
2547
2548 for(;;){
2549 AVPacket pkt;
2550 ret= av_interleave_packet(s, &pkt, NULL, 1);
2551 if(ret<0) //FIXME cleanup needed for ret<0 ?
2552 goto fail;
2553 if(!ret)
2554 break;
2555
2556 truncate_ts(s->streams[pkt.stream_index], &pkt);
2557 ret= s->oformat->write_packet(s, &pkt);
2558
2559 av_free_packet(&pkt);
2560
2561 if(ret<0)
2562 goto fail;
2563 if(url_ferror(&s->pb))
2564 goto fail;
2565 }
2566
2567 if(s->oformat->write_trailer)
2568 ret = s->oformat->write_trailer(s);
2569 fail:
2570 if(ret == 0)
2571 ret=url_ferror(&s->pb);
2572 for(i=0;i<s->nb_streams;i++)
2573 av_freep(&s->streams[i]->priv_data);
2574 av_freep(&s->priv_data);
2575 return ret;
2576 }
2577
2578 /* "user interface" functions */
2579
2580 void dump_format(AVFormatContext *ic,
2581 int index,
2582 const char *url,
2583 int is_output)
2584 {
2585 int i, flags;
2586 char buf[256];
2587
2588 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2589 is_output ? "Output" : "Input",
2590 index,
2591 is_output ? ic->oformat->name : ic->iformat->name,
2592 is_output ? "to" : "from", url);
2593 if (!is_output) {
2594 av_log(NULL, AV_LOG_INFO, " Duration: ");
2595 if (ic->duration != AV_NOPTS_VALUE) {
2596 int hours, mins, secs, us;
2597 secs = ic->duration / AV_TIME_BASE;
2598 us = ic->duration % AV_TIME_BASE;
2599 mins = secs / 60;
2600 secs %= 60;
2601 hours = mins / 60;
2602 mins %= 60;
2603 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2604 (10 * us) / AV_TIME_BASE);
2605 } else {
2606 av_log(NULL, AV_LOG_INFO, "N/A");
2607 }
2608 if (ic->start_time != AV_NOPTS_VALUE) {
2609 int secs, us;
2610 av_log(NULL, AV_LOG_INFO, ", start: ");
2611 secs = ic->start_time / AV_TIME_BASE;
2612 us = ic->start_time % AV_TIME_BASE;
2613 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2614 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2615 }
2616 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2617 if (ic->bit_rate) {
2618 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2619 } else {
2620 av_log(NULL, AV_LOG_INFO, "N/A");
2621 }
2622 av_log(NULL, AV_LOG_INFO, "\n");
2623 }
2624 for(i=0;i<ic->nb_streams;i++) {
2625 AVStream *st = ic->streams[i];
2626 int g= ff_gcd(st->time_base.num, st->time_base.den);
2627 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2628 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2629 /* the pid is an important information, so we display it */
2630 /* XXX: add a generic system */
2631 if (is_output)
2632 flags = ic->oformat->flags;
2633 else
2634 flags = ic->iformat->flags;
2635 if (flags & AVFMT_SHOW_IDS) {
2636 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2637 }
2638 if (strlen(st->language) > 0) {
2639 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2640 }
2641 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2642 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2643 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2644 if(st->r_frame_rate.den && st->r_frame_rate.num)
2645 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2646 /* else if(st->time_base.den && st->time_base.num)
2647 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2648 else
2649 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2650 }
2651 av_log(NULL, AV_LOG_INFO, "\n");
2652 }
2653 }
2654
2655 typedef struct {
2656 const char *abv;
2657 int width, height;
2658 int frame_rate, frame_rate_base;
2659 } AbvEntry;
2660
2661 static AbvEntry frame_abvs[] = {
2662 { "ntsc", 720, 480, 30000, 1001 },
2663 { "pal", 720, 576, 25, 1 },
2664 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2665 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2666 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2667 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2668 { "film", 352, 240, 24, 1 },
2669 { "ntsc-film", 352, 240, 24000, 1001 },
2670 { "sqcif", 128, 96, 0, 0 },
2671 { "qcif", 176, 144, 0, 0 },
2672 { "cif", 352, 288, 0, 0 },
2673 { "4cif", 704, 576, 0, 0 },
2674 };
2675
2676 /**
2677 * parses width and height out of string str.
2678 */
2679 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2680 {
2681 int i;
2682 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2683 const char *p;
2684 int frame_width = 0, frame_height = 0;
2685
2686 for(i=0;i<n;i++) {
2687 if (!strcmp(frame_abvs[i].abv, str)) {
2688 frame_width = frame_abvs[i].width;
2689 frame_height = frame_abvs[i].height;
2690 break;
2691 }
2692 }
2693 if (i == n) {
2694 p = str;
2695 frame_width = strtol(p, (char **)&p, 10);
2696 if (*p)
2697 p++;
2698 frame_height = strtol(p, (char **)&p, 10);
2699 }
2700 if (frame_width <= 0 || frame_height <= 0)
2701 return -1;
2702 *width_ptr = frame_width;
2703 *height_ptr = frame_height;
2704 return 0;
2705 }
2706
2707 /**
2708 * Converts frame rate from string to a fraction.
2709 *
2710 * First we try to get an exact integer or fractional frame rate.
2711 * If this fails we convert the frame rate to a double and return
2712 * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
2713 */
2714 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2715 {
2716 int i;
2717 char* cp;
2718
2719 /* First, we check our abbreviation table */
2720 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2721 if (!strcmp(frame_abvs[i].abv, arg)) {
2722 *frame_rate = frame_abvs[i].frame_rate;
2723 *frame_rate_base = frame_abvs[i].frame_rate_base;
2724 return 0;
2725 }
2726
2727 /* Then, we try to parse it as fraction */
2728 cp = strchr(arg, '/');
2729 if (!cp)
2730 cp = strchr(arg, ':');
2731 if (cp) {
2732 char* cpp;
2733 *frame_rate = strtol(arg, &cpp, 10);
2734 if (cpp != arg || cpp == cp)
2735 *frame_rate_base = strtol(cp+1, &cpp, 10);
2736 else
2737 *frame_rate = 0;
2738 }
2739 else {
2740 /* Finally we give up and parse it as double */
2741 AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
2742 *frame_rate_base = time_base.den;
2743 *frame_rate = time_base.num;
2744 }
2745 if (!*frame_rate || !*frame_rate_base)
2746 return -1;
2747 else
2748 return 0;
2749 }
2750
2751 /**
2752 * Converts date string to number of seconds since Jan 1st, 1970.
2753 *
2754 * @code
2755 * Syntax:
2756 * - If not a duration:
2757 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2758 * Time is localtime unless Z is suffixed to the end. In this case GMT
2759 * Return the date in micro seconds since 1970
2760 *
2761 * - If a duration:
2762 * HH[:MM[:SS[.m...]]]
2763 * S+[.m...]
2764 * @endcode
2765 */
2766 #ifndef CONFIG_WINCE
2767 int64_t parse_date(const char *datestr, int duration)
2768 {
2769 const char *p;
2770 int64_t t;
2771 struct tm dt;
2772 int i;
2773 static const char *date_fmt[] = {
2774 "%Y-%m-%d",
2775 "%Y%m%d",
2776 };
2777 static const char *time_fmt[] = {
2778 "%H:%M:%S",
2779 "%H%M%S",
2780 };
2781 const char *q;
2782 int is_utc, len;
2783 char lastch;
2784 int negative = 0;
2785
2786 #undef time
2787 time_t now = time(0);
2788
2789 len = strlen(datestr);
2790 if (len > 0)
2791 lastch = datestr[len - 1];
2792 else
2793 lastch = '\0';
2794 is_utc = (lastch == 'z' || lastch == 'Z');
2795
2796 memset(&dt, 0, sizeof(dt));
2797
2798 p = datestr;
2799 q = NULL;
2800 if (!duration) {
2801 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2802 q = small_strptime(p, date_fmt[i], &dt);
2803 if (q) {
2804 break;
2805 }
2806 }
2807
2808 if (!q) {
2809 if (is_utc) {
2810 dt = *gmtime(&now);
2811 } else {
2812 dt = *localtime(&now);
2813 }
2814 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2815 } else {
2816 p = q;
2817 }
2818
2819 if (*p == 'T' || *p == 't' || *p == ' ')
2820 p++;
2821
2822 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2823 q = small_strptime(p, time_fmt[i], &dt);
2824 if (q) {
2825 break;
2826 }
2827 }
2828 } else {
2829 if (p[0] == '-') {
2830 negative = 1;
2831 ++p;
2832 }
2833 q = small_strptime(p, time_fmt[0], &dt);
2834 if (!q) {
2835 dt.tm_sec = strtol(p, (char **)&q, 10);
2836 dt.tm_min = 0;
2837 dt.tm_hour = 0;
2838 }
2839 }
2840
2841 /* Now we have all the fields that we can get */
2842 if (!q) {
2843 if (duration)
2844 return 0;
2845 else
2846 return now * int64_t_C(1000000);
2847 }
2848
2849 if (duration) {
2850 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2851 } else {
2852 dt.tm_isdst = -1; /* unknown */
2853 if (is_utc) {
2854 t = mktimegm(&dt);
2855 } else {
2856 t = mktime(&dt);
2857 }
2858 }
2859
2860 t *= 1000000;
2861
2862 if (*q == '.') {
2863 int val, n;
2864 q++;
2865 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2866 if (!isdigit(*q))
2867 break;
2868 val += n * (*q - '0');
2869 }
2870 t += val;
2871 }
2872 return negative ? -t : t;
2873 }
2874 #endif /* CONFIG_WINCE */
2875
2876 /**
2877 * Attempts to find a specific tag in a URL.
2878 *
2879 * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
2880 * Return 1 if found.
2881 */
2882 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2883 {
2884 const char *p;
2885 char tag[128], *q;
2886
2887 p = info;
2888 if (*p == '?')
2889 p++;
2890 for(;;) {
2891 q = tag;
2892 while (*p != '\0' && *p != '=' && *p != '&') {
2893 if ((q - tag) < sizeof(tag) - 1)
2894 *q++ = *p;
2895 p++;
2896 }
2897 *q = '\0';
2898 q = arg;
2899 if (*p == '=') {
2900 p++;
2901 while (*p != '&' && *p != '\0') {
2902 if ((q - arg) < arg_size - 1) {
2903 if (*p == '+')
2904 *q++ = ' ';
2905 else
2906 *q++ = *p;
2907 }
2908 p++;
2909 }
2910 *q = '\0';
2911 }
2912 if (!strcmp(tag, tag1))
2913 return 1;
2914 if (*p != '&')
2915 break;
2916 p++;
2917 }
2918 return 0;
2919 }
2920
2921 /**
2922 * Returns in 'buf' the path with '%d' replaced by number.
2923 *
2924 * Also handles the '%0nd' format where 'n' is the total number
2925 * of digits and '%%'. Return 0 if OK, and -1 if format error.
2926 */
2927 int get_frame_filename(char *buf, int buf_size,
2928 const char *path, int number)
2929 {
2930 const char *p;
2931 char *q, buf1[20], c;
2932 int nd, len, percentd_found;
2933
2934 q = buf;
2935 p = path;
2936 percentd_found = 0;
2937 for(;;) {
2938 c = *p++;
2939 if (c == '\0')
2940 break;
2941 if (c == '%') {
2942 do {
2943 nd = 0;
2944 while (isdigit(*p)) {
2945 nd = nd * 10 + *p++ - '0';
2946 }
2947 c = *p++;
2948 } while (isdigit(c));
2949
2950 switch(c) {
2951 case '%':
2952 goto addchar;
2953 case 'd':
2954 if (percentd_found)
2955 goto fail;
2956 percentd_found = 1;
2957 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2958 len = strlen(buf1);
2959 if ((q - buf + len) > buf_size - 1)
2960 goto fail;
2961 memcpy(q, buf1, len);
2962 q += len;
2963 break;
2964 default:
2965 goto fail;
2966 }
2967 } else {
2968 addchar:
2969 if ((q - buf) < buf_size - 1)
2970 *q++ = c;
2971 }
2972 }
2973 if (!percentd_found)
2974 goto fail;
2975 *q = '\0';
2976 return 0;
2977 fail:
2978 *q = '\0';
2979 return -1;
2980 }
2981
2982 /**
2983 * Print nice hexa dump of a buffer
2984 * @param f stream for output
2985 * @param buf buffer
2986 * @param size buffer size
2987 */
2988 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2989 {
2990 int len, i, j, c;
2991
2992 for(i=0;i<size;i+=16) {
2993 len = size - i;
2994 if (len > 16)
2995 len = 16;
2996 fprintf(f, "%08x ", i);
2997 for(j=0;j<16;j++) {
2998 if (j < len)
2999 fprintf(f, " %02x", buf[i+j]);
3000 else
3001 fprintf(f, " ");
3002 }
3003 fprintf(f, " ");
3004 for(j=0;j<len;j++) {
3005 c = buf[i+j];
3006 if (c < ' ' || c > '~')
3007 c = '.';
3008 fprintf(f, "%c", c);
3009 }
3010 fprintf(f, "\n");
3011 }
3012 }
3013
3014 /**
3015 * Print on 'f' a nice dump of a packet
3016 * @param f stream for output
3017 * @param pkt packet to dump
3018 * @param dump_payload true if the payload must be displayed too
3019 */
3020 //FIXME needs to know the time_base
3021 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3022 {
3023 fprintf(f, "stream #%d:\n", pkt->stream_index);
3024 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3025 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3026 /* DTS is _always_ valid after av_read_frame() */
3027 fprintf(f, " dts=");
3028 if (pkt->dts == AV_NOPTS_VALUE)
3029 fprintf(f, "N/A");
3030 else
3031 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
3032 /* PTS may be not known if B frames are present */
3033 fprintf(f, " pts=");
3034 if (pkt->pts == AV_NOPTS_VALUE)
3035 fprintf(f, "N/A");
3036 else
3037 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
3038 fprintf(f, "\n");
3039 fprintf(f, " size=%d\n", pkt->size);
3040 if (dump_payload)
3041 av_hex_dump(f, pkt->data, pkt->size);
3042 }
3043
3044 void url_split(char *proto, int proto_size,
3045 char *authorization, int authorization_size,
3046 char *hostname, int hostname_size,
3047 int *port_ptr,
3048 char *path, int path_size,
3049 const char *url)
3050 {
3051 const char *p;
3052 char *q;
3053 int port;
3054
3055 port = -1;
3056
3057 p = url;
3058 q = proto;
3059 while (*p != ':' && *p != '\0') {
3060 if ((q - proto) < proto_size - 1)
3061 *q++ = *p;
3062 p++;
3063 }
3064 if (proto_size > 0)
3065 *q = '\0';
3066 if (authorization_size > 0)
3067 authorization[0] = '\0';
3068 if (*p == '\0') {
3069 if (proto_size > 0)
3070 proto[0] = '\0';
3071 if (hostname_size > 0)
3072 hostname[0] = '\0';
3073 p = url;
3074 } else {
3075 char *at,*slash; // PETR: position of '@' character and '/' character
3076
3077 p++;
3078 if (*p == '/')
3079 p++;
3080 if (*p == '/')
3081 p++;
3082 at = strchr(p,'@'); // PETR: get the position of '@'
3083 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
3084 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
3085
3086 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
3087
3088 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
3089 if (*p == '@') { // PETR: passed '@'
3090 if (authorization_size > 0)
3091 *q = '\0';
3092 q = hostname;
3093 at = NULL;
3094 } else if (!at) { // PETR: hostname
3095 if ((q - hostname) < hostname_size - 1)
3096 *q++ = *p;
3097 } else {
3098 if ((q - authorization) < authorization_size - 1)
3099 *q++ = *p;
3100 }
3101 p++;
3102 }
3103 if (hostname_size > 0)
3104 *q = '\0';
3105 if (*p == ':') {
3106 p++;
3107 port = strtoul(p, (char **)&p, 10);
3108 }
3109 }
3110 if (port_ptr)
3111 *port_ptr = port;
3112 pstrcpy(path, path_size, p);
3113 }
3114
3115 /**
3116 * Set the pts for a given stream.
3117 *
3118 * @param s stream
3119 * @param pts_wrap_bits number of bits effectively used by the pts
3120 * (used for wrap control, 33 is the value for MPEG)
3121 * @param pts_num numerator to convert to seconds (MPEG: 1)
3122 * @param pts_den denominator to convert to seconds (MPEG: 90000)
3123 */
3124 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3125 int pts_num, int pts_den)
3126 {
3127 s->pts_wrap_bits = pts_wrap_bits;
3128 s->time_base.num = pts_num;
3129 s->time_base.den = pts_den;
3130 }
3131
3132 /* fraction handling */
3133
3134 /**
3135 * f = val + (num / den) + 0.5.
3136 *
3137 * 'num' is normalized so that it is such as 0 <= num < den.
3138 *
3139 * @param f fractional number
3140 * @param val integer value
3141 * @param num must be >= 0
3142 * @param den must be >= 1
3143 */
3144 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3145 {
3146 num += (den >> 1);
3147 if (num >= den) {
3148 val += num / den;
3149 num = num % den;
3150 }
3151 f->val = val;
3152 f->num = num;
3153 f->den = den;
3154 }
3155
3156 /**
3157 * Set f to (val + 0.5).
3158 */
3159 static void av_frac_set(AVFrac *f, int64_t val)
3160 {
3161 f->val = val;
3162 f->num = f->den >> 1;
3163 }
3164
3165 /**
3166 * Fractionnal addition to f: f = f + (incr / f->den).
3167 *
3168 * @param f fractional number
3169 * @param incr increment, can be positive or negative
3170 */
3171 static void av_frac_add(AVFrac *f, int64_t incr)
3172 {
3173 int64_t num, den;
3174
3175 num = f->num + incr;
3176 den = f->den;
3177 if (num < 0) {
3178 f->val += num / den;
3179 num = num % den;
3180 if (num < 0) {
3181 num += den;
3182 f->val--;
3183 }
3184 } else if (num >= den) {
3185 f->val += num / den;
3186 num = num % den;
3187 }
3188 f->num = num;
3189 }
3190
3191 /**
3192 * register a new image format
3193 * @param img_fmt Image format descriptor
3194 */
3195 void av_register_image_format(AVImageFormat *img_fmt)
3196 {
3197 AVImageFormat **p;
3198
3199 p = &first_image_format;
3200 while (*p != NULL) p = &(*p)->next;
3201 *p = img_fmt;
3202 img_fmt->next = NULL;
3203 }
3204
3205 /**
3206 * Guesses image format based on data in the image.
3207 */
3208 AVImageFormat *av_probe_image_format(AVProbeData *pd)
3209 {
3210 AVImageFormat *fmt1, *fmt;
3211 int score, score_max;
3212
3213 fmt = NULL;
3214 score_max = 0;
3215 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3216 if (fmt1->img_probe) {
3217 score = fmt1->img_probe(pd);
3218 if (score > score_max) {
3219 score_max = score;
3220 fmt = fmt1;
3221 }
3222 }
3223 }
3224 return fmt;
3225 }
3226
3227 /**
3228 * Guesses image format based on file name extensions.
3229 */
3230 AVImageFormat *guess_image_format(const char *filename)
3231 {
3232 AVImageFormat *fmt1;
3233
3234 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3235 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
3236 return fmt1;
3237 }
3238 return NULL;
3239 }
3240
3241 /**
3242 * Read an image from a stream.
3243 * @param gb byte stream containing the image
3244 * @param fmt image format, NULL if probing is required
3245 */
3246 int av_read_image(ByteIOContext *pb, const char *filename,
3247 AVImageFormat *fmt,
3248 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
3249 {
3250 uint8_t buf[PROBE_BUF_MIN];
3251 AVProbeData probe_data, *pd = &probe_data;
3252 offset_t pos;
3253 int ret;
3254
3255 if (!fmt) {
3256 pd->filename = filename;
3257 pd->buf = buf;
3258 pos = url_ftell(pb);
3259 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_MIN);
3260 url_fseek(pb, pos, SEEK_SET);
3261 fmt = av_probe_image_format(pd);
3262 }
3263 if (!fmt)
3264 return AVERROR_NOFMT;
3265 ret = fmt->img_read(pb, alloc_cb, opaque);
3266 return ret;
3267 }
3268
3269 /**
3270 * Write an image to a stream.
3271 * @