d135015d3ae6db0d188f389a2ab4f9612870417f
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include "avformat.h"
20
21 #undef NDEBUG
22 #include <assert.h>
23
24 /**
25 * @file libavformat/utils.c
26 * Various utility functions for using ffmpeg library.
27 */
28
29 /** head of registered input format linked list. */
30 AVInputFormat *first_iformat = NULL;
31 /** head of registered output format linked list. */
32 AVOutputFormat *first_oformat = NULL;
33 /** head of registered image format linked list. */
34 AVImageFormat *first_image_format = NULL;
35
36 void av_register_input_format(AVInputFormat *format)
37 {
38 AVInputFormat **p;
39 p = &first_iformat;
40 while (*p != NULL) p = &(*p)->next;
41 *p = format;
42 format->next = NULL;
43 }
44
45 void av_register_output_format(AVOutputFormat *format)
46 {
47 AVOutputFormat **p;
48 p = &first_oformat;
49 while (*p != NULL) p = &(*p)->next;
50 *p = format;
51 format->next = NULL;
52 }
53
54 int match_ext(const char *filename, const char *extensions)
55 {
56 const char *ext, *p;
57 char ext1[32], *q;
58
59 if(!filename)
60 return 0;
61
62 ext = strrchr(filename, '.');
63 if (ext) {
64 ext++;
65 p = extensions;
66 for(;;) {
67 q = ext1;
68 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
69 *q++ = *p++;
70 *q = '\0';
71 if (!strcasecmp(ext1, ext))
72 return 1;
73 if (*p == '\0')
74 break;
75 p++;
76 }
77 }
78 return 0;
79 }
80
81 AVOutputFormat *guess_format(const char *short_name, const char *filename,
82 const char *mime_type)
83 {
84 AVOutputFormat *fmt, *fmt_found;
85 int score_max, score;
86
87 /* specific test for image sequences */
88 if (!short_name && filename &&
89 filename_number_test(filename) >= 0 &&
90 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
91 return guess_format("image2", NULL, NULL);
92 }
93 if (!short_name && filename &&
94 filename_number_test(filename) >= 0 &&
95 guess_image_format(filename)) {
96 return guess_format("image", NULL, NULL);
97 }
98
99 /* find the proper file type */
100 fmt_found = NULL;
101 score_max = 0;
102 fmt = first_oformat;
103 while (fmt != NULL) {
104 score = 0;
105 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
106 score += 100;
107 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
108 score += 10;
109 if (filename && fmt->extensions &&
110 match_ext(filename, fmt->extensions)) {
111 score += 5;
112 }
113 if (score > score_max) {
114 score_max = score;
115 fmt_found = fmt;
116 }
117 fmt = fmt->next;
118 }
119 return fmt_found;
120 }
121
122 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
123 const char *mime_type)
124 {
125 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
126
127 if (fmt) {
128 AVOutputFormat *stream_fmt;
129 char stream_format_name[64];
130
131 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
132 stream_fmt = guess_format(stream_format_name, NULL, NULL);
133
134 if (stream_fmt)
135 fmt = stream_fmt;
136 }
137
138 return fmt;
139 }
140
141 /**
142 * Guesses the codec id based upon muxer and filename.
143 */
144 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
145 const char *filename, const char *mime_type, enum CodecType type){
146 if(type == CODEC_TYPE_VIDEO){
147 enum CodecID codec_id= CODEC_ID_NONE;
148
149 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
150 codec_id= av_guess_image2_codec(filename);
151 }
152 if(codec_id == CODEC_ID_NONE)
153 codec_id= fmt->video_codec;
154 return codec_id;
155 }else if(type == CODEC_TYPE_AUDIO)
156 return fmt->audio_codec;
157 else
158 return CODEC_ID_NONE;
159 }
160
161 /**
162 * finds AVInputFormat based on input format's short name.
163 */
164 AVInputFormat *av_find_input_format(const char *short_name)
165 {
166 AVInputFormat *fmt;
167 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
168 if (!strcmp(fmt->name, short_name))
169 return fmt;
170 }
171 return NULL;
172 }
173
174 /* memory handling */
175
176 /**
177 * Default packet destructor.
178 */
179 void av_destruct_packet(AVPacket *pkt)
180 {
181 av_free(pkt->data);
182 pkt->data = NULL; pkt->size = 0;
183 }
184
185 /**
186 * Allocate the payload of a packet and intialized its fields to default values.
187 *
188 * @param pkt packet
189 * @param size wanted payload size
190 * @return 0 if OK. AVERROR_xxx otherwise.
191 */
192 int av_new_packet(AVPacket *pkt, int size)
193 {
194 void *data;
195 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
196 return AVERROR_NOMEM;
197 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
198 if (!data)
199 return AVERROR_NOMEM;
200 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
201
202 av_init_packet(pkt);
203 pkt->data = data;
204 pkt->size = size;
205 pkt->destruct = av_destruct_packet;
206 return 0;
207 }
208
209 /**
210 * Allocate and read the payload of a packet and intialized its fields to default values.
211 *
212 * @param pkt packet
213 * @param size wanted payload size
214 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
215 */
216 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
217 {
218 int ret= av_new_packet(pkt, size);
219
220 if(ret<0)
221 return ret;
222
223 pkt->pos= url_ftell(s);
224
225 ret= get_buffer(s, pkt->data, size);
226 if(ret<=0)
227 av_free_packet(pkt);
228 else
229 pkt->size= ret;
230
231 return ret;
232 }
233
234 /* This is a hack - the packet memory allocation stuff is broken. The
235 packet is allocated if it was not really allocated */
236 int av_dup_packet(AVPacket *pkt)
237 {
238 if (pkt->destruct != av_destruct_packet) {
239 uint8_t *data;
240 /* we duplicate the packet and don't forget to put the padding
241 again */
242 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
243 return AVERROR_NOMEM;
244 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
245 if (!data) {
246 return AVERROR_NOMEM;
247 }
248 memcpy(data, pkt->data, pkt->size);
249 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
250 pkt->data = data;
251 pkt->destruct = av_destruct_packet;
252 }
253 return 0;
254 }
255
256 /* fifo handling */
257
258 int fifo_init(FifoBuffer *f, int size)
259 {
260 f->buffer = av_malloc(size);
261 if (!f->buffer)
262 return -1;
263 f->end = f->buffer + size;
264 f->wptr = f->rptr = f->buffer;
265 return 0;
266 }
267
268 void fifo_free(FifoBuffer *f)
269 {
270 av_free(f->buffer);
271 }
272
273 int fifo_size(FifoBuffer *f, uint8_t *rptr)
274 {
275 int size;
276
277 if(!rptr)
278 rptr= f->rptr;
279
280 if (f->wptr >= rptr) {
281 size = f->wptr - rptr;
282 } else {
283 size = (f->end - rptr) + (f->wptr - f->buffer);
284 }
285 return size;
286 }
287
288 /**
289 * Get data from the fifo (returns -1 if not enough data).
290 */
291 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
292 {
293 uint8_t *rptr;
294 int size, len;
295
296 if(!rptr_ptr)
297 rptr_ptr= &f->rptr;
298 rptr = *rptr_ptr;
299
300 if (f->wptr >= rptr) {
301 size = f->wptr - rptr;
302 } else {
303 size = (f->end - rptr) + (f->wptr - f->buffer);
304 }
305
306 if (size < buf_size)
307 return -1;
308 while (buf_size > 0) {
309 len = f->end - rptr;
310 if (len > buf_size)
311 len = buf_size;
312 memcpy(buf, rptr, len);
313 buf += len;
314 rptr += len;
315 if (rptr >= f->end)
316 rptr = f->buffer;
317 buf_size -= len;
318 }
319 *rptr_ptr = rptr;
320 return 0;
321 }
322
323 /**
324 * Resizes a FIFO.
325 */
326 void fifo_realloc(FifoBuffer *f, unsigned int new_size){
327 unsigned int old_size= f->end - f->buffer;
328
329 if(old_size < new_size){
330 uint8_t *old= f->buffer;
331
332 f->buffer= av_realloc(f->buffer, new_size);
333
334 f->rptr += f->buffer - old;
335 f->wptr += f->buffer - old;
336
337 if(f->wptr < f->rptr){
338 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
339 f->rptr += new_size - old_size;
340 }
341 f->end= f->buffer + new_size;
342 }
343 }
344
345 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
346 {
347 int len;
348 uint8_t *wptr;
349
350 if(!wptr_ptr)
351 wptr_ptr= &f->wptr;
352 wptr = *wptr_ptr;
353
354 while (size > 0) {
355 len = f->end - wptr;
356 if (len > size)
357 len = size;
358 memcpy(wptr, buf, len);
359 wptr += len;
360 if (wptr >= f->end)
361 wptr = f->buffer;
362 buf += len;
363 size -= len;
364 }
365 *wptr_ptr = wptr;
366 }
367
368 /* get data from the fifo (return -1 if not enough data) */
369 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
370 {
371 uint8_t *rptr = *rptr_ptr;
372 int size, len;
373
374 if (f->wptr >= rptr) {
375 size = f->wptr - rptr;
376 } else {
377 size = (f->end - rptr) + (f->wptr - f->buffer);
378 }
379
380 if (size < buf_size)
381 return -1;
382 while (buf_size > 0) {
383 len = f->end - rptr;
384 if (len > buf_size)
385 len = buf_size;
386 put_buffer(pb, rptr, len);
387 rptr += len;
388 if (rptr >= f->end)
389 rptr = f->buffer;
390 buf_size -= len;
391 }
392 *rptr_ptr = rptr;
393 return 0;
394 }
395
396 int filename_number_test(const char *filename)
397 {
398 char buf[1024];
399 if(!filename)
400 return -1;
401 return get_frame_filename(buf, sizeof(buf), filename, 1);
402 }
403
404 /**
405 * Guess file format.
406 */
407 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
408 {
409 AVInputFormat *fmt1, *fmt;
410 int score, score_max;
411
412 fmt = NULL;
413 score_max = 0;
414 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
415 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
416 continue;
417 score = 0;
418 if (fmt1->read_probe) {
419 score = fmt1->read_probe(pd);
420 } else if (fmt1->extensions) {
421 if (match_ext(pd->filename, fmt1->extensions)) {
422 score = 50;
423 }
424 }
425 if (score > score_max) {
426 score_max = score;
427 fmt = fmt1;
428 }
429 }
430 return fmt;
431 }
432
433 /************************************************************/
434 /* input media file */
435
436 /**
437 * Open a media file from an IO stream. 'fmt' must be specified.
438 */
439 static const char* format_to_name(void* ptr)
440 {
441 AVFormatContext* fc = (AVFormatContext*) ptr;
442 if(fc->iformat) return fc->iformat->name;
443 else if(fc->oformat) return fc->oformat->name;
444 else return "NULL";
445 }
446
447 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
448
449 AVFormatContext *av_alloc_format_context(void)
450 {
451 AVFormatContext *ic;
452 ic = av_mallocz(sizeof(AVFormatContext));
453 if (!ic) return ic;
454 ic->av_class = &av_format_context_class;
455 return ic;
456 }
457
458 /**
459 * Allocates all the structures needed to read an input stream.
460 * This does not open the needed codecs for decoding the stream[s].
461 */
462 int av_open_input_stream(AVFormatContext **ic_ptr,
463 ByteIOContext *pb, const char *filename,
464 AVInputFormat *fmt, AVFormatParameters *ap)
465 {
466 int err;
467 AVFormatContext *ic;
468 AVFormatParameters default_ap;
469
470 if(!ap){
471 ap=&default_ap;
472 memset(ap, 0, sizeof(default_ap));
473 }
474
475 ic = av_alloc_format_context();
476 if (!ic) {
477 err = AVERROR_NOMEM;
478 goto fail;
479 }
480 ic->iformat = fmt;
481 if (pb)
482 ic->pb = *pb;
483 ic->duration = AV_NOPTS_VALUE;
484 ic->start_time = AV_NOPTS_VALUE;
485 pstrcpy(ic->filename, sizeof(ic->filename), filename);
486
487 /* allocate private data */
488 if (fmt->priv_data_size > 0) {
489 ic->priv_data = av_mallocz(fmt->priv_data_size);
490 if (!ic->priv_data) {
491 err = AVERROR_NOMEM;
492 goto fail;
493 }
494 } else {
495 ic->priv_data = NULL;
496 }
497
498 err = ic->iformat->read_header(ic, ap);
499 if (err < 0)
500 goto fail;
501
502 if (pb)
503 ic->data_offset = url_ftell(&ic->pb);
504
505 *ic_ptr = ic;
506 return 0;
507 fail:
508 if (ic) {
509 av_freep(&ic->priv_data);
510 }
511 av_free(ic);
512 *ic_ptr = NULL;
513 return err;
514 }
515
516 /** Size of probe buffer, for guessing file type from file contents. */
517 #define PROBE_BUF_MIN 2048
518 #define PROBE_BUF_MAX (1<<20)
519
520 /**
521 * Open a media file as input. The codec are not opened. Only the file
522 * header (if present) is read.
523 *
524 * @param ic_ptr the opened media file handle is put here
525 * @param filename filename to open.
526 * @param fmt if non NULL, force the file format to use
527 * @param buf_size optional buffer size (zero if default is OK)
528 * @param ap additionnal parameters needed when opening the file (NULL if default)
529 * @return 0 if OK. AVERROR_xxx otherwise.
530 */
531 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
532 AVInputFormat *fmt,
533 int buf_size,
534 AVFormatParameters *ap)
535 {
536 int err, must_open_file, file_opened, probe_size;
537 AVProbeData probe_data, *pd = &probe_data;
538 ByteIOContext pb1, *pb = &pb1;
539
540 file_opened = 0;
541 pd->filename = "";
542 if (filename)
543 pd->filename = filename;
544 pd->buf = NULL;
545 pd->buf_size = 0;
546
547 if (!fmt) {
548 /* guess format if no file can be opened */
549 fmt = av_probe_input_format(pd, 0);
550 }
551
552 /* do not open file if the format does not need it. XXX: specific
553 hack needed to handle RTSP/TCP */
554 must_open_file = 1;
555 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
556 must_open_file = 0;
557 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
558 }
559
560 if (!fmt || must_open_file) {
561 /* if no file needed do not try to open one */
562 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
563 err = AVERROR_IO;
564 goto fail;
565 }
566 file_opened = 1;
567 if (buf_size > 0) {
568 url_setbufsize(pb, buf_size);
569 }
570
571 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
572 /* read probe data */
573 pd->buf= av_realloc(pd->buf, probe_size);
574 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
575 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
576 url_fclose(pb);
577 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
578 file_opened = 0;
579 err = AVERROR_IO;
580 goto fail;
581 }
582 }
583 /* guess file format */
584 fmt = av_probe_input_format(pd, 1);
585 }
586 av_freep(&pd->buf);
587 }
588
589 /* if still no format found, error */
590 if (!fmt) {
591 err = AVERROR_NOFMT;
592 goto fail;
593 }
594
595 /* XXX: suppress this hack for redirectors */
596 #ifdef CONFIG_NETWORK
597 if (fmt == &redir_demux) {
598 err = redir_open(ic_ptr, pb);
599 url_fclose(pb);
600 return err;
601 }
602 #endif
603
604 /* check filename in case of an image number is expected */
605 if (fmt->flags & AVFMT_NEEDNUMBER) {
606 if (filename_number_test(filename) < 0) {
607 err = AVERROR_NUMEXPECTED;
608 goto fail;
609 }
610 }
611 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
612 if (err)
613 goto fail;
614 return 0;
615 fail:
616 av_freep(&pd->buf);
617 if (file_opened)
618 url_fclose(pb);
619 *ic_ptr = NULL;
620 return err;
621
622 }
623
624 /*******************************************************/
625
626 /**
627 * Read a transport packet from a media file.
628 *
629 * This function is absolete and should never be used.
630 * Use av_read_frame() instead.
631 *
632 * @param s media file handle
633 * @param pkt is filled
634 * @return 0 if OK. AVERROR_xxx if error.
635 */
636 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
637 {
638 return s->iformat->read_packet(s, pkt);
639 }
640
641 /**********************************************************/
642
643 /**
644 * Get the number of samples of an audio frame. Return (-1) if error.
645 */
646 static int get_audio_frame_size(AVCodecContext *enc, int size)
647 {
648 int frame_size;
649
650 if (enc->frame_size <= 1) {
651 /* specific hack for pcm codecs because no frame size is
652 provided */
653 switch(enc->codec_id) {
654 case CODEC_ID_PCM_S32LE:
655 case CODEC_ID_PCM_S32BE:
656 case CODEC_ID_PCM_U32LE:
657 case CODEC_ID_PCM_U32BE:
658 if (enc->channels == 0)
659 return -1;
660 frame_size = size / (4 * enc->channels);
661 break;
662 case CODEC_ID_PCM_S24LE:
663 case CODEC_ID_PCM_S24BE:
664 case CODEC_ID_PCM_U24LE:
665 case CODEC_ID_PCM_U24BE:
666 case CODEC_ID_PCM_S24DAUD:
667 if (enc->channels == 0)
668 return -1;
669 frame_size = size / (3 * enc->channels);
670 break;
671 case CODEC_ID_PCM_S16LE:
672 case CODEC_ID_PCM_S16BE:
673 case CODEC_ID_PCM_U16LE:
674 case CODEC_ID_PCM_U16BE:
675 if (enc->channels == 0)
676 return -1;
677 frame_size = size / (2 * enc->channels);
678 break;
679 case CODEC_ID_PCM_S8:
680 case CODEC_ID_PCM_U8:
681 case CODEC_ID_PCM_MULAW:
682 case CODEC_ID_PCM_ALAW:
683 if (enc->channels == 0)
684 return -1;
685 frame_size = size / (enc->channels);
686 break;
687 default:
688 /* used for example by ADPCM codecs */
689 if (enc->bit_rate == 0)
690 return -1;
691 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
692 break;
693 }
694 } else {
695 frame_size = enc->frame_size;
696 }
697 return frame_size;
698 }
699
700
701 /**
702 * Return the frame duration in seconds, return 0 if not available.
703 */
704 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
705 AVCodecParserContext *pc, AVPacket *pkt)
706 {
707 int frame_size;
708
709 *pnum = 0;
710 *pden = 0;
711 switch(st->codec->codec_type) {
712 case CODEC_TYPE_VIDEO:
713 if(st->time_base.num*1000LL > st->time_base.den){
714 *pnum = st->time_base.num;
715 *pden = st->time_base.den;
716 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
717 *pnum = st->codec->time_base.num;
718 *pden = st->codec->time_base.den;
719 if (pc && pc->repeat_pict) {
720 *pden *= 2;
721 *pnum = (*pnum) * (2 + pc->repeat_pict);
722 }
723 }
724 break;
725 case CODEC_TYPE_AUDIO:
726 frame_size = get_audio_frame_size(st->codec, pkt->size);
727 if (frame_size < 0)
728 break;
729 *pnum = frame_size;
730 *pden = st->codec->sample_rate;
731 break;
732 default:
733 break;
734 }
735 }
736
737 static int is_intra_only(AVCodecContext *enc){
738 if(enc->codec_type == CODEC_TYPE_AUDIO){
739 return 1;
740 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
741 switch(enc->codec_id){
742 case CODEC_ID_MJPEG:
743 case CODEC_ID_MJPEGB:
744 case CODEC_ID_LJPEG:
745 case CODEC_ID_RAWVIDEO:
746 case CODEC_ID_DVVIDEO:
747 case CODEC_ID_HUFFYUV:
748 case CODEC_ID_FFVHUFF:
749 case CODEC_ID_ASV1:
750 case CODEC_ID_ASV2:
751 case CODEC_ID_VCR1:
752 return 1;
753 default: break;
754 }
755 }
756 return 0;
757 }
758
759 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
760 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
761 int64_t delta= last_ts - mask/2;
762 return ((lsb - delta)&mask) + delta;
763 }
764
765 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
766 AVCodecParserContext *pc, AVPacket *pkt)
767 {
768 int num, den, presentation_delayed;
769 /* handle wrapping */
770 if(st->cur_dts != AV_NOPTS_VALUE){
771 if(pkt->pts != AV_NOPTS_VALUE)
772 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
773 if(pkt->dts != AV_NOPTS_VALUE)
774 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
775 }
776
777 if (pkt->duration == 0) {
778 compute_frame_duration(&num, &den, st, pc, pkt);
779 if (den && num) {
780 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
781 }
782 }
783
784 if(is_intra_only(st->codec))
785 pkt->flags |= PKT_FLAG_KEY;
786
787 /* do we have a video B frame ? */
788 presentation_delayed = 0;
789 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
790 /* XXX: need has_b_frame, but cannot get it if the codec is
791 not initialized */
792 if (( st->codec->codec_id == CODEC_ID_H264
793 || st->codec->has_b_frames) &&
794 pc && pc->pict_type != FF_B_TYPE)
795 presentation_delayed = 1;
796 /* this may be redundant, but it shouldnt hurt */
797 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
798 presentation_delayed = 1;
799 }
800
801 if(st->cur_dts == AV_NOPTS_VALUE){
802 if(presentation_delayed) st->cur_dts = -pkt->duration;
803 else st->cur_dts = 0;
804 }
805
806 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
807 /* interpolate PTS and DTS if they are not present */
808 if (presentation_delayed) {
809 /* DTS = decompression time stamp */
810 /* PTS = presentation time stamp */
811 if (pkt->dts == AV_NOPTS_VALUE) {
812 /* if we know the last pts, use it */
813 if(st->last_IP_pts != AV_NOPTS_VALUE)
814 st->cur_dts = pkt->dts = st->last_IP_pts;
815 else
816 pkt->dts = st->cur_dts;
817 } else {
818 st->cur_dts = pkt->dts;
819 }
820 /* this is tricky: the dts must be incremented by the duration
821 of the frame we are displaying, i.e. the last I or P frame */
822 if (st->last_IP_duration == 0)
823 st->cur_dts += pkt->duration;
824 else
825 st->cur_dts += st->last_IP_duration;
826 st->last_IP_duration = pkt->duration;
827 st->last_IP_pts= pkt->pts;
828 /* cannot compute PTS if not present (we can compute it only
829 by knowing the futur */
830 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
831 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
832 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
833 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
834 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
835 pkt->pts += pkt->duration;
836 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
837 }
838 }
839
840 /* presentation is not delayed : PTS and DTS are the same */
841 if (pkt->pts == AV_NOPTS_VALUE) {
842 if (pkt->dts == AV_NOPTS_VALUE) {
843 pkt->pts = st->cur_dts;
844 pkt->dts = st->cur_dts;
845 }
846 else {
847 st->cur_dts = pkt->dts;
848 pkt->pts = pkt->dts;
849 }
850 } else {
851 st->cur_dts = pkt->pts;
852 pkt->dts = pkt->pts;
853 }
854 st->cur_dts += pkt->duration;
855 }
856 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
857
858 /* update flags */
859 if (pc) {
860 pkt->flags = 0;
861 /* key frame computation */
862 switch(st->codec->codec_type) {
863 case CODEC_TYPE_VIDEO:
864 if (pc->pict_type == FF_I_TYPE)
865 pkt->flags |= PKT_FLAG_KEY;
866 break;
867 case CODEC_TYPE_AUDIO:
868 pkt->flags |= PKT_FLAG_KEY;
869 break;
870 default:
871 break;
872 }
873 }
874 }
875
876 void av_destruct_packet_nofree(AVPacket *pkt)
877 {
878 pkt->data = NULL; pkt->size = 0;
879 }
880
881 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
882 {
883 AVStream *st;
884 int len, ret, i;
885
886 for(;;) {
887 /* select current input stream component */
888 st = s->cur_st;
889 if (st) {
890 if (!st->need_parsing || !st->parser) {
891 /* no parsing needed: we just output the packet as is */
892 /* raw data support */
893 *pkt = s->cur_pkt;
894 compute_pkt_fields(s, st, NULL, pkt);
895 s->cur_st = NULL;
896 return 0;
897 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
898 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
899 s->cur_ptr, s->cur_len,
900 s->cur_pkt.pts, s->cur_pkt.dts);
901 s->cur_pkt.pts = AV_NOPTS_VALUE;
902 s->cur_pkt.dts = AV_NOPTS_VALUE;
903 /* increment read pointer */
904 s->cur_ptr += len;
905 s->cur_len -= len;
906
907 /* return packet if any */
908 if (pkt->size) {
909 got_packet:
910 pkt->duration = 0;
911 pkt->stream_index = st->index;
912 pkt->pts = st->parser->pts;
913 pkt->dts = st->parser->dts;
914 pkt->destruct = av_destruct_packet_nofree;
915 compute_pkt_fields(s, st, st->parser, pkt);
916 return 0;
917 }
918 } else {
919 /* free packet */
920 av_free_packet(&s->cur_pkt);
921 s->cur_st = NULL;
922 }
923 } else {
924 /* read next packet */
925 ret = av_read_packet(s, &s->cur_pkt);
926 if (ret < 0) {
927 if (ret == -EAGAIN)
928 return ret;
929 /* return the last frames, if any */
930 for(i = 0; i < s->nb_streams; i++) {
931 st = s->streams[i];
932 if (st->parser && st->need_parsing) {
933 av_parser_parse(st->parser, st->codec,
934 &pkt->data, &pkt->size,
935 NULL, 0,
936 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
937 if (pkt->size)
938 goto got_packet;
939 }
940 }
941 /* no more packets: really terminates parsing */
942 return ret;
943 }
944
945 st = s->streams[s->cur_pkt.stream_index];
946
947 s->cur_st = st;
948 s->cur_ptr = s->cur_pkt.data;
949 s->cur_len = s->cur_pkt.size;
950 if (st->need_parsing && !st->parser) {
951 st->parser = av_parser_init(st->codec->codec_id);
952 if (!st->parser) {
953 /* no parser available : just output the raw packets */
954 st->need_parsing = 0;
955 }else if(st->need_parsing == 2){
956 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
957 }
958 }
959 }
960 }
961 }
962
963 /**
964 * Return the next frame of a stream.
965 *
966 * The returned packet is valid
967 * until the next av_read_frame() or until av_close_input_file() and
968 * must be freed with av_free_packet. For video, the packet contains
969 * exactly one frame. For audio, it contains an integer number of
970 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
971 * data). If the audio frames have a variable size (e.g. MPEG audio),
972 * then it contains one frame.
973 *
974 * pkt->pts, pkt->dts and pkt->duration are always set to correct
975 * values in AV_TIME_BASE unit (and guessed if the format cannot
976 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
977 * has B frames, so it is better to rely on pkt->dts if you do not
978 * decompress the payload.
979 *
980 * @return 0 if OK, < 0 if error or end of file.
981 */
982 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
983 {
984 AVPacketList *pktl;
985 int eof=0;
986 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
987
988 for(;;){
989 pktl = s->packet_buffer;
990 if (pktl) {
991 AVPacket *next_pkt= &pktl->pkt;
992
993 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
994 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
995 if( pktl->pkt.stream_index == next_pkt->stream_index
996 && next_pkt->dts < pktl->pkt.dts
997 && pktl->pkt.pts != pktl->pkt.dts //not b frame
998 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
999 next_pkt->pts= pktl->pkt.dts;
1000 }
1001 pktl= pktl->next;
1002 }
1003 pktl = s->packet_buffer;
1004 }
1005
1006 if( next_pkt->pts != AV_NOPTS_VALUE
1007 || next_pkt->dts == AV_NOPTS_VALUE
1008 || !genpts || eof){
1009 /* read packet from packet buffer, if there is data */
1010 *pkt = *next_pkt;
1011 s->packet_buffer = pktl->next;
1012 av_free(pktl);
1013 return 0;
1014 }
1015 }
1016 if(genpts){
1017 AVPacketList **plast_pktl= &s->packet_buffer;
1018 int ret= av_read_frame_internal(s, pkt);
1019 if(ret<0){
1020 if(pktl && ret != -EAGAIN){
1021 eof=1;
1022 continue;
1023 }else
1024 return ret;
1025 }
1026
1027 /* duplicate the packet */
1028 if (av_dup_packet(pkt) < 0)
1029 return AVERROR_NOMEM;
1030
1031 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
1032
1033 pktl = av_mallocz(sizeof(AVPacketList));
1034 if (!pktl)
1035 return AVERROR_NOMEM;
1036
1037 /* add the packet in the buffered packet list */
1038 *plast_pktl = pktl;
1039 pktl->pkt= *pkt;
1040 }else{
1041 assert(!s->packet_buffer);
1042 return av_read_frame_internal(s, pkt);
1043 }
1044 }
1045 }
1046
1047 /* XXX: suppress the packet queue */
1048 static void flush_packet_queue(AVFormatContext *s)
1049 {
1050 AVPacketList *pktl;
1051
1052 for(;;) {
1053 pktl = s->packet_buffer;
1054 if (!pktl)
1055 break;
1056 s->packet_buffer = pktl->next;
1057 av_free_packet(&pktl->pkt);
1058 av_free(pktl);
1059 }
1060 }
1061
1062 /*******************************************************/
1063 /* seek support */
1064
1065 int av_find_default_stream_index(AVFormatContext *s)
1066 {
1067 int i;
1068 AVStream *st;
1069
1070 if (s->nb_streams <= 0)
1071 return -1;
1072 for(i = 0; i < s->nb_streams; i++) {
1073 st = s->streams[i];
1074 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1075 return i;
1076 }
1077 }
1078 return 0;
1079 }
1080
1081 /**
1082 * Flush the frame reader.
1083 */
1084 static void av_read_frame_flush(AVFormatContext *s)
1085 {
1086 AVStream *st;
1087 int i;
1088
1089 flush_packet_queue(s);
1090
1091 /* free previous packet */
1092 if (s->cur_st) {
1093 if (s->cur_st->parser)
1094 av_free_packet(&s->cur_pkt);
1095 s->cur_st = NULL;
1096 }
1097 /* fail safe */
1098 s->cur_ptr = NULL;
1099 s->cur_len = 0;
1100
1101 /* for each stream, reset read state */
1102 for(i = 0; i < s->nb_streams; i++) {
1103 st = s->streams[i];
1104
1105 if (st->parser) {
1106 av_parser_close(st->parser);
1107 st->parser = NULL;
1108 }
1109 st->last_IP_pts = AV_NOPTS_VALUE;
1110 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
1111 }
1112 }
1113
1114 /**
1115 * Updates cur_dts of all streams based on given timestamp and AVStream.
1116 *
1117 * Stream ref_st unchanged, others set cur_dts in their native timebase
1118 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1119 * @param timestamp new dts expressed in time_base of param ref_st
1120 * @param ref_st reference stream giving time_base of param timestamp
1121 */
1122 static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1123 int i;
1124
1125 for(i = 0; i < s->nb_streams; i++) {
1126 AVStream *st = s->streams[i];
1127
1128 st->cur_dts = av_rescale(timestamp,
1129 st->time_base.den * (int64_t)ref_st->time_base.num,
1130 st->time_base.num * (int64_t)ref_st->time_base.den);
1131 }
1132 }
1133
1134 /**
1135 * Add a index entry into a sorted list updateing if it is already there.
1136 *
1137 * @param timestamp timestamp in the timebase of the given stream
1138 */
1139 int av_add_index_entry(AVStream *st,
1140 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1141 {
1142 AVIndexEntry *entries, *ie;
1143 int index;
1144
1145 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1146 return -1;
1147
1148 entries = av_fast_realloc(st->index_entries,
1149 &st->index_entries_allocated_size,
1150 (st->nb_index_entries + 1) *
1151 sizeof(AVIndexEntry));
1152 if(!entries)
1153 return -1;
1154
1155 st->index_entries= entries;
1156
1157 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1158
1159 if(index<0){
1160 index= st->nb_index_entries++;
1161 ie= &entries[index];
1162 assert(index==0 || ie[-1].timestamp < timestamp);
1163 }else{
1164 ie= &entries[index];
1165 if(ie->timestamp != timestamp){
1166 if(ie->timestamp <= timestamp)
1167 return -1;
1168 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1169 st->nb_index_entries++;
1170 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1171 distance= ie->min_distance;
1172 }
1173
1174 ie->pos = pos;
1175 ie->timestamp = timestamp;
1176 ie->min_distance= distance;
1177 ie->size= size;
1178 ie->flags = flags;
1179
1180 return index;
1181 }
1182
1183 /**
1184 * build an index for raw streams using a parser.
1185 */
1186 static void av_build_index_raw(AVFormatContext *s)
1187 {
1188 AVPacket pkt1, *pkt = &pkt1;
1189 int ret;
1190 AVStream *st;
1191
1192 st = s->streams[0];
1193 av_read_frame_flush(s);
1194 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1195
1196 for(;;) {
1197 ret = av_read_frame(s, pkt);
1198 if (ret < 0)
1199 break;
1200 if (pkt->stream_index == 0 && st->parser &&
1201 (pkt->flags & PKT_FLAG_KEY)) {
1202 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1203 0, 0, AVINDEX_KEYFRAME);
1204 }
1205 av_free_packet(pkt);
1206 }
1207 }
1208
1209 /**
1210 * Returns TRUE if we deal with a raw stream.
1211 *
1212 * Raw codec data and parsing needed.
1213 */
1214 static int is_raw_stream(AVFormatContext *s)
1215 {
1216 AVStream *st;
1217
1218 if (s->nb_streams != 1)
1219 return 0;
1220 st = s->streams[0];
1221 if (!st->need_parsing)
1222 return 0;
1223 return 1;
1224 }
1225
1226 /**
1227 * Gets the index for a specific timestamp.
1228 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1229 * the timestamp which is <= the requested one, if backward is 0
1230 * then it will be >=
1231 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1232 * @return < 0 if no such timestamp could be found
1233 */
1234 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1235 int flags)
1236 {
1237 AVIndexEntry *entries= st->index_entries;
1238 int nb_entries= st->nb_index_entries;
1239 int a, b, m;
1240 int64_t timestamp;
1241
1242 a = - 1;
1243 b = nb_entries;
1244
1245 while (b - a > 1) {
1246 m = (a + b) >> 1;
1247 timestamp = entries[m].timestamp;
1248 if(timestamp >= wanted_timestamp)
1249 b = m;
1250 if(timestamp <= wanted_timestamp)
1251 a = m;
1252 }
1253 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1254
1255 if(!(flags & AVSEEK_FLAG_ANY)){
1256 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1257 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1258 }
1259 }
1260
1261 if(m == nb_entries)
1262 return -1;
1263 return m;
1264 }
1265
1266 #define DEBUG_SEEK
1267
1268 /**
1269 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1270 * this isnt supposed to be called directly by a user application, but by demuxers
1271 * @param target_ts target timestamp in the time base of the given stream
1272 * @param stream_index stream number
1273 */
1274 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1275 AVInputFormat *avif= s->iformat;
1276 int64_t pos_min, pos_max, pos, pos_limit;
1277 int64_t ts_min, ts_max, ts;
1278 int64_t start_pos, filesize;
1279 int index, no_change;
1280 AVStream *st;
1281
1282 if (stream_index < 0)
1283 return -1;
1284
1285 #ifdef DEBUG_SEEK
1286 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1287 #endif
1288
1289 ts_max=
1290 ts_min= AV_NOPTS_VALUE;
1291 pos_limit= -1; //gcc falsely says it may be uninitalized
1292
1293 st= s->streams[stream_index];
1294 if(st->index_entries){
1295 AVIndexEntry *e;
1296
1297 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1298 index= FFMAX(index, 0);
1299 e= &st->index_entries[index];
1300
1301 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1302 pos_min= e->pos;
1303 ts_min= e->timestamp;
1304 #ifdef DEBUG_SEEK
1305 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1306 pos_min,ts_min);
1307 #endif
1308 }else{
1309 assert(index==0);
1310 }
1311
1312 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1313 assert(index < st->nb_index_entries);
1314 if(index >= 0){
1315 e= &st->index_entries[index];
1316 assert(e->timestamp >= target_ts);
1317 pos_max= e->pos;
1318 ts_max= e->timestamp;
1319 pos_limit= pos_max - e->min_distance;
1320 #ifdef DEBUG_SEEK
1321 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1322 pos_max,pos_limit, ts_max);
1323 #endif
1324 }
1325 }
1326
1327 if(ts_min == AV_NOPTS_VALUE){
1328 pos_min = s->data_offset;
1329 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1330 if (ts_min == AV_NOPTS_VALUE)
1331 return -1;
1332 }
1333
1334 if(ts_max == AV_NOPTS_VALUE){
1335 int step= 1024;
1336 filesize = url_fsize(&s->pb);
1337 pos_max = filesize - 1;
1338 do{
1339 pos_max -= step;
1340 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1341 step += step;
1342 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1343 if (ts_max == AV_NOPTS_VALUE)
1344 return -1;
1345
1346 for(;;){
1347 int64_t tmp_pos= pos_max + 1;
1348 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1349 if(tmp_ts == AV_NOPTS_VALUE)
1350 break;
1351 ts_max= tmp_ts;
1352 pos_max= tmp_pos;
1353 if(tmp_pos >= filesize)
1354 break;
1355 }
1356 pos_limit= pos_max;
1357 }
1358
1359 if(ts_min > ts_max){
1360 return -1;
1361 }else if(ts_min == ts_max){
1362 pos_limit= pos_min;
1363 }
1364
1365 no_change=0;
1366 while (pos_min < pos_limit) {
1367 #ifdef DEBUG_SEEK
1368 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1369 pos_min, pos_max,
1370 ts_min, ts_max);
1371 #endif
1372 assert(pos_limit <= pos_max);
1373
1374 if(no_change==0){
1375 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1376 // interpolate position (better than dichotomy)
1377 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1378 + pos_min - approximate_keyframe_distance;
1379 }else if(no_change==1){
1380 // bisection, if interpolation failed to change min or max pos last time
1381 pos = (pos_min + pos_limit)>>1;
1382 }else{
1383 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1384 pos=pos_min;
1385 }
1386 if(pos <= pos_min)
1387 pos= pos_min + 1;
1388 else if(pos > pos_limit)
1389 pos= pos_limit;
1390 start_pos= pos;
1391
1392 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1393 if(pos == pos_max)
1394 no_change++;
1395 else
1396 no_change=0;
1397 #ifdef DEBUG_SEEK
1398 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1399 #endif
1400 assert(ts != AV_NOPTS_VALUE);
1401 if (target_ts <= ts) {
1402 pos_limit = start_pos - 1;
1403 pos_max = pos;
1404 ts_max = ts;
1405 }
1406 if (target_ts >= ts) {
1407 pos_min = pos;
1408 ts_min = ts;
1409 }
1410 }
1411
1412 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1413 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1414 #ifdef DEBUG_SEEK
1415 pos_min = pos;
1416 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1417 pos_min++;
1418 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1419 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1420 pos, ts_min, target_ts, ts_max);
1421 #endif
1422 /* do the seek */
1423 url_fseek(&s->pb, pos, SEEK_SET);
1424
1425 av_update_cur_dts(s, st, ts);
1426
1427 return 0;
1428 }
1429
1430 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1431 int64_t pos_min, pos_max;
1432 #if 0
1433 AVStream *st;
1434
1435 if (stream_index < 0)
1436 return -1;
1437
1438 st= s->streams[stream_index];
1439 #endif
1440
1441 pos_min = s->data_offset;
1442 pos_max = url_fsize(&s->pb) - 1;
1443
1444 if (pos < pos_min) pos= pos_min;
1445 else if(pos > pos_max) pos= pos_max;
1446
1447 url_fseek(&s->pb, pos, SEEK_SET);
1448
1449 #if 0
1450 av_update_cur_dts(s, st, ts);
1451 #endif
1452 return 0;
1453 }
1454
1455 static int av_seek_frame_generic(AVFormatContext *s,
1456 int stream_index, int64_t timestamp, int flags)
1457 {
1458 int index;
1459 AVStream *st;
1460 AVIndexEntry *ie;
1461
1462 if (!s->index_built) {
1463 if (is_raw_stream(s)) {
1464 av_build_index_raw(s);
1465 } else {
1466 return -1;
1467 }
1468 s->index_built = 1;
1469 }
1470
1471 st = s->streams[stream_index];
1472 index = av_index_search_timestamp(st, timestamp, flags);
1473 if (index < 0)
1474 return -1;
1475
1476 /* now we have found the index, we can seek */
1477 ie = &st->index_entries[index];
1478 av_read_frame_flush(s);
1479 url_fseek(&s->pb, ie->pos, SEEK_SET);
1480
1481 av_update_cur_dts(s, st, ie->timestamp);
1482
1483 return 0;
1484 }
1485
1486 /**
1487 * Seek to the key frame at timestamp.
1488 * 'timestamp' in 'stream_index'.
1489 * @param stream_index If stream_index is (-1), a default
1490 * stream is selected, and timestamp is automatically converted
1491 * from AV_TIME_BASE units to the stream specific time_base.
1492 * @param timestamp timestamp in AVStream.time_base units
1493 * or if there is no stream specified then in AV_TIME_BASE units
1494 * @param flags flags which select direction and seeking mode
1495 * @return >= 0 on success
1496 */
1497 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1498 {
1499 int ret;
1500 AVStream *st;
1501
1502 av_read_frame_flush(s);
1503
1504 if(flags & AVSEEK_FLAG_BYTE)
1505 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1506
1507 if(stream_index < 0){
1508 stream_index= av_find_default_stream_index(s);
1509 if(stream_index < 0)
1510 return -1;
1511
1512 st= s->streams[stream_index];
1513 /* timestamp for default must be expressed in AV_TIME_BASE units */
1514 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1515 }
1516 st= s->streams[stream_index];
1517
1518 /* first, we try the format specific seek */
1519 if (s->iformat->read_seek)
1520 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1521 else
1522 ret = -1;
1523 if (ret >= 0) {
1524 return 0;
1525 }
1526
1527 if(s->iformat->read_timestamp)
1528 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1529 else
1530 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1531 }
1532
1533 /*******************************************************/
1534
1535 /**
1536 * Returns TRUE if the stream has accurate timings in any stream.
1537 *
1538 * @return TRUE if the stream has accurate timings for at least one component.
1539 */
1540 static int av_has_timings(AVFormatContext *ic)
1541 {
1542 int i;
1543 AVStream *st;
1544
1545 for(i = 0;i < ic->nb_streams; i++) {
1546 st = ic->streams[i];
1547 if (st->start_time != AV_NOPTS_VALUE &&
1548 st->duration != AV_NOPTS_VALUE)
1549 return 1;
1550 }
1551 return 0;
1552 }
1553
1554 /**
1555 * Estimate the stream timings from the one of each components.
1556 *
1557 * Also computes the global bitrate if possible.
1558 */
1559 static void av_update_stream_timings(AVFormatContext *ic)
1560 {
1561 int64_t start_time, start_time1, end_time, end_time1;
1562 int i;
1563 AVStream *st;
1564
1565 start_time = MAXINT64;
1566 end_time = MININT64;
1567 for(i = 0;i < ic->nb_streams; i++) {
1568 st = ic->streams[i];
1569 if (st->start_time != AV_NOPTS_VALUE) {
1570 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1571 if (start_time1 < start_time)
1572 start_time = start_time1;
1573 if (st->duration != AV_NOPTS_VALUE) {
1574 end_time1 = start_time1
1575 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1576 if (end_time1 > end_time)
1577 end_time = end_time1;
1578 }
1579 }
1580 }
1581 if (start_time != MAXINT64) {
1582 ic->start_time = start_time;
1583 if (end_time != MININT64) {
1584 ic->duration = end_time - start_time;
1585 if (ic->file_size > 0) {
1586 /* compute the bit rate */
1587 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1588 (double)ic->duration;
1589 }
1590 }
1591 }
1592
1593 }
1594
1595 static void fill_all_stream_timings(AVFormatContext *ic)
1596 {
1597 int i;
1598 AVStream *st;
1599
1600 av_update_stream_timings(ic);
1601 for(i = 0;i < ic->nb_streams; i++) {
1602 st = ic->streams[i];
1603 if (st->start_time == AV_NOPTS_VALUE) {
1604 if(ic->start_time != AV_NOPTS_VALUE)
1605 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1606 if(ic->duration != AV_NOPTS_VALUE)
1607 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1608 }
1609 }
1610 }
1611
1612 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1613 {
1614 int64_t filesize, duration;
1615 int bit_rate, i;
1616 AVStream *st;
1617
1618 /* if bit_rate is already set, we believe it */
1619 if (ic->bit_rate == 0) {
1620 bit_rate = 0;
1621 for(i=0;i<ic->nb_streams;i++) {
1622 st = ic->streams[i];
1623 bit_rate += st->codec->bit_rate;
1624 }
1625 ic->bit_rate = bit_rate;
1626 }
1627
1628 /* if duration is already set, we believe it */
1629 if (ic->duration == AV_NOPTS_VALUE &&
1630 ic->bit_rate != 0 &&
1631 ic->file_size != 0) {
1632 filesize = ic->file_size;
1633 if (filesize > 0) {
1634 for(i = 0; i < ic->nb_streams; i++) {
1635 st = ic->streams[i];
1636 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1637 if (st->start_time == AV_NOPTS_VALUE ||
1638 st->duration == AV_NOPTS_VALUE) {
1639 st->start_time = 0;
1640 st->duration = duration;
1641 }
1642 }
1643 }
1644 }
1645 }
1646
1647 #define DURATION_MAX_READ_SIZE 250000
1648
1649 /* only usable for MPEG-PS streams */
1650 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1651 {
1652 AVPacket pkt1, *pkt = &pkt1;
1653 AVStream *st;
1654 int read_size, i, ret;
1655 int64_t end_time;
1656 int64_t filesize, offset, duration;
1657
1658 /* free previous packet */
1659 if (ic->cur_st && ic->cur_st->parser)
1660 av_free_packet(&ic->cur_pkt);
1661 ic->cur_st = NULL;
1662
1663 /* flush packet queue */
1664 flush_packet_queue(ic);
1665
1666 for(i=0;i<ic->nb_streams;i++) {
1667 st = ic->streams[i];
1668 if (st->parser) {
1669 av_parser_close(st->parser);
1670 st->parser= NULL;
1671 }
1672 }
1673
1674 /* we read the first packets to get the first PTS (not fully
1675 accurate, but it is enough now) */
1676 url_fseek(&ic->pb, 0, SEEK_SET);
1677 read_size = 0;
1678 for(;;) {
1679 if (read_size >= DURATION_MAX_READ_SIZE)
1680 break;
1681 /* if all info is available, we can stop */
1682 for(i = 0;i < ic->nb_streams; i++) {
1683 st = ic->streams[i];
1684 if (st->start_time == AV_NOPTS_VALUE)
1685 break;
1686 }
1687 if (i == ic->nb_streams)
1688 break;
1689
1690 ret = av_read_packet(ic, pkt);
1691 if (ret != 0)
1692 break;
1693 read_size += pkt->size;
1694 st = ic->streams[pkt->stream_index];
1695 if (pkt->pts != AV_NOPTS_VALUE) {
1696 if (st->start_time == AV_NOPTS_VALUE)
1697 st->start_time = pkt->pts;
1698 }
1699 av_free_packet(pkt);
1700 }
1701
1702 /* estimate the end time (duration) */
1703 /* XXX: may need to support wrapping */
1704 filesize = ic->file_size;
1705 offset = filesize - DURATION_MAX_READ_SIZE;
1706 if (offset < 0)
1707 offset = 0;
1708
1709 url_fseek(&ic->pb, offset, SEEK_SET);
1710 read_size = 0;
1711 for(;;) {
1712 if (read_size >= DURATION_MAX_READ_SIZE)
1713 break;
1714 /* if all info is available, we can stop */
1715 for(i = 0;i < ic->nb_streams; i++) {
1716 st = ic->streams[i];
1717 if (st->duration == AV_NOPTS_VALUE)
1718 break;
1719 }
1720 if (i == ic->nb_streams)
1721 break;
1722
1723 ret = av_read_packet(ic, pkt);
1724 if (ret != 0)
1725 break;
1726 read_size += pkt->size;
1727 st = ic->streams[pkt->stream_index];
1728 if (pkt->pts != AV_NOPTS_VALUE) {
1729 end_time = pkt->pts;
1730 duration = end_time - st->start_time;
1731 if (duration > 0) {
1732 if (st->duration == AV_NOPTS_VALUE ||
1733 st->duration < duration)
1734 st->duration = duration;
1735 }
1736 }
1737 av_free_packet(pkt);
1738 }
1739
1740 fill_all_stream_timings(ic);
1741
1742 url_fseek(&ic->pb, 0, SEEK_SET);
1743 }
1744
1745 static void av_estimate_timings(AVFormatContext *ic)
1746 {
1747 int64_t file_size;
1748
1749 /* get the file size, if possible */
1750 if (ic->iformat->flags & AVFMT_NOFILE) {
1751 file_size = 0;
1752 } else {
1753 file_size = url_fsize(&ic->pb);
1754 if (file_size < 0)
1755 file_size = 0;
1756 }
1757 ic->file_size = file_size;
1758
1759 if ((ic->iformat == &mpegps_demux || ic->iformat == &mpegts_demux) && file_size && !ic->pb.is_streamed) {
1760 /* get accurate estimate from the PTSes */
1761 av_estimate_timings_from_pts(ic);
1762 } else if (av_has_timings(ic)) {
1763 /* at least one components has timings - we use them for all
1764 the components */
1765 fill_all_stream_timings(ic);
1766 } else {
1767 /* less precise: use bit rate info */
1768 av_estimate_timings_from_bit_rate(ic);
1769 }
1770 av_update_stream_timings(ic);
1771
1772 #if 0
1773 {
1774 int i;
1775 AVStream *st;
1776 for(i = 0;i < ic->nb_streams; i++) {
1777 st = ic->streams[i];
1778 printf("%d: start_time: %0.3f duration: %0.3f\n",
1779 i, (double)st->start_time / AV_TIME_BASE,
1780 (double)st->duration / AV_TIME_BASE);
1781 }
1782 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1783 (double)ic->start_time / AV_TIME_BASE,
1784 (double)ic->duration / AV_TIME_BASE,
1785 ic->bit_rate / 1000);
1786 }
1787 #endif
1788 }
1789
1790 static int has_codec_parameters(AVCodecContext *enc)
1791 {
1792 int val;
1793 switch(enc->codec_type) {
1794 case CODEC_TYPE_AUDIO:
1795 val = enc->sample_rate;
1796 break;
1797 case CODEC_TYPE_VIDEO:
1798 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1799 break;
1800 default:
1801 val = 1;
1802 break;
1803 }
1804 return (val != 0);
1805 }
1806
1807 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1808 {
1809 int16_t *samples;
1810 AVCodec *codec;
1811 int got_picture, ret=0;
1812 AVFrame picture;
1813
1814 if(!st->codec->codec){
1815 codec = avcodec_find_decoder(st->codec->codec_id);
1816 if (!codec)
1817 return -1;
1818 ret = avcodec_open(st->codec, codec);
1819 if (ret < 0)
1820 return ret;
1821 }
1822
1823 if(!has_codec_parameters(st->codec)){
1824 switch(st->codec->codec_type) {
1825 case CODEC_TYPE_VIDEO:
1826 ret = avcodec_decode_video(st->codec, &picture,
1827 &got_picture, (uint8_t *)data, size);
1828 break;
1829 case CODEC_TYPE_AUDIO:
1830 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1831 if (!samples)
1832 goto fail;
1833 ret = avcodec_decode_audio(st->codec, samples,
1834 &got_picture, (uint8_t *)data, size);
1835 av_free(samples);
1836 break;
1837 default:
1838 break;
1839 }
1840 }
1841 fail:
1842 return ret;
1843 }
1844
1845 /* absolute maximum size we read until we abort */
1846 #define MAX_READ_SIZE 5000000
1847
1848 /* maximum duration until we stop analysing the stream */
1849 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 2.0))
1850
1851 /**
1852 * Read the beginning of a media file to get stream information. This
1853 * is useful for file formats with no headers such as MPEG. This
1854 * function also compute the real frame rate in case of mpeg2 repeat
1855 * frame mode.
1856 *
1857 * @param ic media file handle
1858 * @return >=0 if OK. AVERROR_xxx if error.
1859 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1860 */
1861 int av_find_stream_info(AVFormatContext *ic)
1862 {
1863 int i, count, ret, read_size, j;
1864 AVStream *st;
1865 AVPacket pkt1, *pkt;
1866 AVPacketList *pktl=NULL, **ppktl;
1867 int64_t last_dts[MAX_STREAMS];
1868 int64_t duration_sum[MAX_STREAMS];
1869 int duration_count[MAX_STREAMS]={0};
1870
1871 for(i=0;i<ic->nb_streams;i++) {
1872 st = ic->streams[i];
1873 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1874 /* if(!st->time_base.num)
1875 st->time_base= */
1876 if(!st->codec->time_base.num)
1877 st->codec->time_base= st->time_base;
1878 }
1879 //only for the split stuff
1880 if (!st->parser) {
1881 st->parser = av_parser_init(st->codec->codec_id);
1882 if(st->need_parsing == 2 && st->parser){
1883 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1884 }
1885 }
1886 }
1887
1888 for(i=0;i<MAX_STREAMS;i++){
1889 last_dts[i]= AV_NOPTS_VALUE;
1890 duration_sum[i]= INT64_MAX;
1891 }
1892
1893 count = 0;
1894 read_size = 0;
1895 ppktl = &ic->packet_buffer;
1896 for(;;) {
1897 /* check if one codec still needs to be handled */
1898 for(i=0;i<ic->nb_streams;i++) {
1899 st = ic->streams[i];
1900 if (!has_codec_parameters(st->codec))
1901 break;
1902 /* variable fps and no guess at the real fps */
1903 if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
1904 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1905 break;
1906 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1907 break;
1908 }
1909 if (i == ic->nb_streams) {
1910 /* NOTE: if the format has no header, then we need to read
1911 some packets to get most of the streams, so we cannot
1912 stop here */
1913 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1914 /* if we found the info for all the codecs, we can stop */
1915 ret = count;
1916 break;
1917 }
1918 } else {
1919 /* we did not get all the codec info, but we read too much data */
1920 if (read_size >= MAX_READ_SIZE) {
1921 ret = count;
1922 break;
1923 }
1924 }
1925
1926 /* NOTE: a new stream can be added there if no header in file
1927 (AVFMTCTX_NOHEADER) */
1928 ret = av_read_frame_internal(ic, &pkt1);
1929 if (ret < 0) {
1930 /* EOF or error */
1931 ret = -1; /* we could not have all the codec parameters before EOF */
1932 for(i=0;i<ic->nb_streams;i++) {
1933 st = ic->streams[i];
1934 if (!has_codec_parameters(st->codec)){
1935 char buf[256];
1936 avcodec_string(buf, sizeof(buf), st->codec, 0);
1937 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1938 break;
1939 }
1940 }
1941 if (i == ic->nb_streams)
1942 ret = 0;
1943 break;
1944 }
1945
1946 pktl = av_mallocz(sizeof(AVPacketList));
1947 if (!pktl) {
1948 ret = AVERROR_NOMEM;
1949 break;
1950 }
1951
1952 /* add the packet in the buffered packet list */
1953 *ppktl = pktl;
1954 ppktl = &pktl->next;
1955
1956 pkt = &pktl->pkt;
1957 *pkt = pkt1;
1958
1959 /* duplicate the packet */
1960 if (av_dup_packet(pkt) < 0) {
1961 ret = AVERROR_NOMEM;
1962 break;
1963 }
1964
1965 read_size += pkt->size;
1966
1967 st = ic->streams[pkt->stream_index];
1968 st->codec_info_duration += pkt->duration;
1969 if (pkt->duration != 0)
1970 st->codec_info_nb_frames++;
1971
1972 {
1973 int index= pkt->stream_index;
1974 int64_t last= last_dts[index];
1975 int64_t duration= pkt->dts - last;
1976
1977 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1978 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1979 duration_sum[index]= duration;
1980 duration_count[index]=1;
1981 }else{
1982 int factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1983 duration_sum[index] += duration;
1984 duration_count[index]+= factor;
1985 }
1986 if(st->codec_info_nb_frames == 0 && 0)
1987 st->codec_info_duration += duration;
1988 }
1989 last_dts[pkt->stream_index]= pkt->dts;
1990 }
1991 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1992 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1993 if(i){
1994 st->codec->extradata_size= i;
1995 st->codec->extradata= av_malloc(st->codec->extradata_size);
1996 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1997 }
1998 }
1999
2000 /* if still no information, we try to open the codec and to
2001 decompress the frame. We try to avoid that in most cases as
2002 it takes longer and uses more memory. For MPEG4, we need to
2003 decompress for Quicktime. */
2004 if (!has_codec_parameters(st->codec) /*&&
2005 (st->codec->codec_id == CODEC_ID_FLV1 ||
2006 st->codec->codec_id == CODEC_ID_H264 ||
2007 st->codec->codec_id == CODEC_ID_H263 ||
2008 st->codec->codec_id == CODEC_ID_H261 ||
2009 st->codec->codec_id == CODEC_ID_VORBIS ||
2010 st->codec->codec_id == CODEC_ID_MJPEG ||
2011 st->codec->codec_id == CODEC_ID_PNG ||
2012 st->codec->codec_id == CODEC_ID_PAM ||
2013 st->codec->codec_id == CODEC_ID_PGM ||
2014 st->codec->codec_id == CODEC_ID_PGMYUV ||
2015 st->codec->codec_id == CODEC_ID_PBM ||
2016 st->codec->codec_id == CODEC_ID_PPM ||
2017 st->codec->codec_id == CODEC_ID_SHORTEN ||
2018 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2019 try_decode_frame(st, pkt->data, pkt->size);
2020
2021 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
2022 break;
2023 }
2024 count++;
2025 }
2026
2027 // close codecs which where opened in try_decode_frame()
2028 for(i=0;i<ic->nb_streams;i++) {
2029 st = ic->streams[i];
2030 if(st->codec->codec)
2031 avcodec_close(st->codec);
2032 }
2033 for(i=0;i<ic->nb_streams;i++) {
2034 st = ic->streams[i];
2035 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2036 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2037 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2038
2039 if(duration_count[i] && st->codec->time_base.num*101LL <= st->codec->time_base.den &&
2040 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den){
2041 int64_t num, den, error, best_error;
2042
2043 num= st->time_base.den*duration_count[i];
2044 den= st->time_base.num*duration_sum[i];
2045
2046 best_error= INT64_MAX;
2047 for(j=1; j<60*12; j++){
2048 error= ABS(1001*12*num - 1001*j*den);
2049 if(error < best_error){
2050 best_error= error;
2051 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j, 12, INT_MAX);
2052 }
2053 }
2054 for(j=24; j<=30; j+=6){
2055 error= ABS(1001*12*num - 1000*12*j*den);
2056 if(error < best_error){
2057 best_error= error;
2058 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j*1000, 1001, INT_MAX);
2059 }
2060 }
2061 }
2062
2063 /* set real frame rate info */
2064 /* compute the real frame rate for telecine */
2065 if ((st->codec->codec_id == CODEC_ID_MPEG1VIDEO ||
2066 st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
2067 st->codec->sub_id == 2) {
2068 if (st->codec_info_nb_frames >= 20) {
2069 float coded_frame_rate, est_frame_rate;
2070 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
2071 (double)st->codec_info_duration ;
2072 coded_frame_rate = 1.0/av_q2d(st->codec->time_base);
2073 #if 0
2074 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
2075 coded_frame_rate, est_frame_rate);
2076 #endif
2077 /* if we detect that it could be a telecine, we
2078 signal it. It would be better to do it at a
2079 higher level as it can change in a film */
2080 if (coded_frame_rate >= 24.97 &&
2081 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
2082 st->r_frame_rate = (AVRational){24000, 1001};
2083 }
2084 }
2085 }
2086 /* if no real frame rate, use the codec one */
2087 if (!st->r_frame_rate.num){
2088 st->r_frame_rate.num = st->codec->time_base.den;
2089 st->r_frame_rate.den = st->codec->time_base.num;
2090 }
2091 }
2092 }
2093
2094 av_estimate_timings(ic);
2095 #if 0
2096 /* correct DTS for b frame streams with no timestamps */
2097 for(i=0;i<ic->nb_streams;i++) {
2098 st = ic->streams[i];
2099 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2100 if(b-frames){
2101 ppktl = &ic->packet_buffer;
2102 while(ppkt1){
2103 if(ppkt1->stream_index != i)
2104 continue;
2105 if(ppkt1->pkt->dts < 0)
2106 break;
2107 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2108 break;
2109 ppkt1->pkt->dts -= delta;
2110 ppkt1= ppkt1->next;
2111 }
2112 if(ppkt1)
2113 continue;
2114 st->cur_dts -= delta;
2115 }
2116 }
2117 }
2118 #endif
2119 return ret;
2120 }
2121
2122 /*******************************************************/
2123
2124 /**
2125 * start playing a network based stream (e.g. RTSP stream) at the
2126 * current position
2127 */
2128 int av_read_play(AVFormatContext *s)
2129 {
2130 if (!s->iformat->read_play)
2131 return AVERROR_NOTSUPP;
2132 return s->iformat->read_play(s);
2133 }
2134
2135 /**
2136 * Pause a network based stream (e.g. RTSP stream).
2137 *
2138 * Use av_read_play() to resume it.
2139 */
2140 int av_read_pause(AVFormatContext *s)
2141 {
2142 if (!s->iformat->read_pause)
2143 return AVERROR_NOTSUPP;
2144 return s->iformat->read_pause(s);
2145 }
2146
2147 /**
2148 * Close a media file (but not its codecs).
2149 *
2150 * @param s media file handle
2151 */
2152 void av_close_input_file(AVFormatContext *s)
2153 {
2154 int i, must_open_file;
2155 AVStream *st;
2156
2157 /* free previous packet */
2158 if (s->cur_st && s->cur_st->parser)
2159 av_free_packet(&s->cur_pkt);
2160
2161 if (s->iformat->read_close)
2162 s->iformat->read_close(s);
2163 for(i=0;i<s->nb_streams;i++) {
2164 /* free all data in a stream component */
2165 st = s->streams[i];
2166 if (st->parser) {
2167 av_parser_close(st->parser);
2168 }
2169 av_free(st->index_entries);
2170 av_free(st->codec);
2171 av_free(st);
2172 }
2173 flush_packet_queue(s);
2174 must_open_file = 1;
2175 if (s->iformat->flags & AVFMT_NOFILE) {
2176 must_open_file = 0;
2177 }
2178 if (must_open_file) {
2179 url_fclose(&s->pb);
2180 }
2181 av_freep(&s->priv_data);
2182 av_free(s);
2183 }
2184
2185 /**
2186 * Add a new stream to a media file.
2187 *
2188 * Can only be called in the read_header() function. If the flag
2189 * AVFMTCTX_NOHEADER is in the format context, then new streams
2190 * can be added in read_packet too.
2191 *
2192 * @param s media file handle
2193 * @param id file format dependent stream id
2194 */
2195 AVStream *av_new_stream(AVFormatContext *s, int id)
2196 {
2197 AVStream *st;
2198
2199 if (s->nb_streams >= MAX_STREAMS)
2200 return NULL;
2201
2202 st = av_mallocz(sizeof(AVStream));
2203 if (!st)
2204 return NULL;
2205
2206 st->codec= avcodec_alloc_context();
2207 if (s->iformat) {
2208 /* no default bitrate if decoding */
2209 st->codec->bit_rate = 0;
2210 }
2211 st->index = s->nb_streams;
2212 st->id = id;
2213 st->start_time = AV_NOPTS_VALUE;
2214 st->duration = AV_NOPTS_VALUE;
2215 st->cur_dts = AV_NOPTS_VALUE;
2216
2217 /* default pts settings is MPEG like */
2218 av_set_pts_info(st, 33, 1, 90000);
2219 st->last_IP_pts = AV_NOPTS_VALUE;
2220
2221 s->streams[s->nb_streams++] = st;
2222 return st;
2223 }
2224
2225 /************************************************************/
2226 /* output media file */
2227
2228 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2229 {
2230 int ret;
2231
2232 if (s->oformat->priv_data_size > 0) {
2233 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2234 if (!s->priv_data)
2235 return AVERROR_NOMEM;
2236 } else
2237 s->priv_data = NULL;
2238
2239 if (s->oformat->set_parameters) {
2240 ret = s->oformat->set_parameters(s, ap);
2241 if (ret < 0)
2242 return ret;
2243 }
2244 return 0;
2245 }
2246
2247 /**
2248 * allocate the stream private data and write the stream header to an
2249 * output media file
2250 *
2251 * @param s media file handle
2252 * @return 0 if OK. AVERROR_xxx if error.
2253 */
2254 int av_write_header(AVFormatContext *s)
2255 {
2256 int ret, i;
2257 AVStream *st;
2258
2259 // some sanity checks
2260 for(i=0;i<s->nb_streams;i++) {
2261 st = s->streams[i];
2262
2263 switch (st->codec->codec_type) {
2264 case CODEC_TYPE_AUDIO:
2265 if(st->codec->sample_rate<=0){
2266 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2267 return -1;
2268 }
2269 break;
2270 case CODEC_TYPE_VIDEO:
2271 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2272 av_log(s, AV_LOG_ERROR, "time base not set\n");
2273 return -1;
2274 }
2275 if(st->codec->width<=0 || st->codec->height<=0){
2276 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2277 return -1;
2278 }
2279 break;
2280 }
2281 }
2282
2283 if(s->oformat->write_header){
2284 ret = s->oformat->write_header(s);
2285 if (ret < 0)
2286 return ret;
2287 }
2288
2289 /* init PTS generation */
2290 for(i=0;i<s->nb_streams;i++) {
2291 int64_t den = AV_NOPTS_VALUE;
2292 st = s->streams[i];
2293
2294 switch (st->codec->codec_type) {
2295 case CODEC_TYPE_AUDIO:
2296 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2297 break;
2298 case CODEC_TYPE_VIDEO:
2299 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2300 break;
2301 default:
2302 break;
2303 }
2304 if (den != AV_NOPTS_VALUE) {
2305 if (den <= 0)
2306 return AVERROR_INVALIDDATA;
2307 av_frac_init(&st->pts, 0, 0, den);
2308 }
2309 }
2310 return 0;
2311 }
2312
2313 //FIXME merge with compute_pkt_fields
2314 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2315 int b_frames = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames);
2316 int num, den, frame_size;
2317
2318 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2319
2320 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2321 return -1;*/
2322
2323 /* duration field */
2324 if (pkt->duration == 0) {
2325 compute_frame_duration(&num, &den, st, NULL, pkt);
2326 if (den && num) {
2327 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2328 }
2329 }
2330
2331 //XXX/FIXME this is a temporary hack until all encoders output pts
2332 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2333 pkt->dts=
2334 // pkt->pts= st->cur_dts;
2335 pkt->pts= st->pts.val;
2336 }
2337
2338 //calculate dts from pts
2339 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2340 if(b_frames){
2341 if(st->last_IP_pts == AV_NOPTS_VALUE){
2342 st->last_IP_pts= -pkt->duration;
2343 }
2344 if(st->last_IP_pts < pkt->pts){
2345 pkt->dts= st->last_IP_pts;
2346 st->last_IP_pts= pkt->pts;
2347 }else
2348 pkt->dts= pkt->pts;
2349 }else
2350 pkt->dts= pkt->pts;
2351 }
2352
2353 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2354 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2355 return -1;
2356 }
2357 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2358 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2359 return -1;
2360 }
2361
2362 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2363 st->cur_dts= pkt->dts;
2364 st->pts.val= pkt->dts;
2365
2366 /* update pts */
2367 switch (st->codec->codec_type) {
2368 case CODEC_TYPE_AUDIO:
2369 frame_size = get_audio_frame_size(st->codec, pkt->size);
2370
2371 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2372 but it would be better if we had the real timestamps from the encoder */
2373 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2374 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2375 }
2376 break;
2377 case CODEC_TYPE_VIDEO:
2378 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2379 break;
2380 default:
2381 break;
2382 }
2383 return 0;
2384 }
2385
2386 static void truncate_ts(AVStream *st, AVPacket *pkt){
2387 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2388
2389 // if(pkt->dts < 0)
2390 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2391
2392 pkt->pts &= pts_mask;
2393 pkt->dts &= pts_mask;
2394 }
2395
2396 /**
2397 * Write a packet to an output media file.
2398 *
2399 * The packet shall contain one audio or video frame.
2400 *
2401 * @param s media file handle
2402 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2403 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2404 */
2405 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2406 {
2407 int ret;
2408
2409 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2410 if(ret<0)
2411 return ret;
2412
2413 truncate_ts(s->streams[pkt->stream_index], pkt);
2414
2415 ret= s->oformat->write_packet(s, pkt);
2416 if(!ret)
2417 ret= url_ferror(&s->pb);
2418 return ret;
2419 }
2420
2421 /**
2422 * interleave_packet implementation which will interleave per DTS.
2423 * packets with pkt->destruct == av_destruct_packet will be freed inside this function.
2424 * so they cannot be used after it, note calling av_free_packet() on them is still safe
2425 */
2426 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2427 AVPacketList *pktl, **next_point, *this_pktl;
2428 int stream_count=0;
2429 int streams[MAX_STREAMS];
2430
2431 if(pkt){
2432 AVStream *st= s->streams[ pkt->stream_index];
2433
2434 // assert(pkt->destruct != av_destruct_packet); //FIXME
2435
2436 this_pktl = av_mallocz(sizeof(AVPacketList));
2437 this_pktl->pkt= *pkt;
2438 if(pkt->destruct == av_destruct_packet)
2439 pkt->destruct= NULL; // non shared -> must keep original from being freed
2440 else
2441 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2442
2443 next_point = &s->packet_buffer;
2444 while(*next_point){
2445 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2446 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2447 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2448 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2449 break;
2450 next_point= &(*next_point)->next;
2451 }
2452 this_pktl->next= *next_point;
2453 *next_point= this_pktl;
2454 }
2455
2456 memset(streams, 0, sizeof(streams));
2457 pktl= s->packet_buffer;
2458 while(pktl){
2459 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2460 if(streams[ pktl->pkt.stream_index ] == 0)
2461 stream_count++;
2462 streams[ pktl->pkt.stream_index ]++;
2463 pktl= pktl->next;
2464 }
2465
2466 if(s->nb_streams == stream_count || (flush && stream_count)){
2467 pktl= s->packet_buffer;
2468 *out= pktl->pkt;
2469
2470 s->packet_buffer= pktl->next;
2471 av_freep(&pktl);
2472 return 1;
2473 }else{
2474 av_init_packet(out);
2475 return 0;
2476 }
2477 }
2478
2479 /**
2480 * Interleaves a AVPacket correctly so it can be muxed.
2481 * @param out the interleaved packet will be output here
2482 * @param in the input packet
2483 * @param flush 1 if no further packets are available as input and all
2484 * remaining packets should be output
2485 * @return 1 if a packet was output, 0 if no packet could be output,
2486 * < 0 if an error occured
2487 */
2488 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2489 if(s->oformat->interleave_packet)
2490 return s->oformat->interleave_packet(s, out, in, flush);
2491 else
2492 return av_interleave_packet_per_dts(s, out, in, flush);
2493 }
2494
2495 /**
2496 * Writes a packet to an output media file ensuring correct interleaving.
2497 *
2498 * The packet must contain one audio or video frame.
2499 * If the packets are already correctly interleaved the application should
2500 * call av_write_frame() instead as its slightly faster, its also important
2501 * to keep in mind that completly non interleaved input will need huge amounts
2502 * of memory to interleave with this, so its prefereable to interleave at the
2503 * demuxer level
2504 *
2505 * @param s media file handle
2506 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2507 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2508 */
2509 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2510 AVStream *st= s->streams[ pkt->stream_index];
2511
2512 //FIXME/XXX/HACK drop zero sized packets
2513 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2514 return 0;
2515
2516 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2517 if(compute_pkt_fields2(st, pkt) < 0)
2518 return -1;
2519
2520 if(pkt->dts == AV_NOPTS_VALUE)
2521 return -1;
2522
2523 for(;;){
2524 AVPacket opkt;
2525 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2526 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2527 return ret;
2528
2529 truncate_ts(s->streams[opkt.stream_index], &opkt);
2530 ret= s->oformat->write_packet(s, &opkt);
2531
2532 av_free_packet(&opkt);
2533 pkt= NULL;
2534
2535 if(ret<0)
2536 return ret;
2537 if(url_ferror(&s->pb))
2538 return url_ferror(&s->pb);
2539 }
2540 }
2541
2542 /**
2543 * @brief Write the stream trailer to an output media file and
2544 * free the file private data.
2545 *
2546 * @param s media file handle
2547 * @return 0 if OK. AVERROR_xxx if error.
2548 */
2549 int av_write_trailer(AVFormatContext *s)
2550 {
2551 int ret, i;
2552
2553 for(;;){
2554 AVPacket pkt;
2555 ret= av_interleave_packet(s, &pkt, NULL, 1);
2556 if(ret<0) //FIXME cleanup needed for ret<0 ?
2557 goto fail;
2558 if(!ret)
2559 break;
2560
2561 truncate_ts(s->streams[pkt.stream_index], &pkt);
2562 ret= s->oformat->write_packet(s, &pkt);
2563
2564 av_free_packet(&pkt);
2565
2566 if(ret<0)
2567 goto fail;
2568 if(url_ferror(&s->pb))
2569 goto fail;
2570 }
2571
2572 if(s->oformat->write_trailer)
2573 ret = s->oformat->write_trailer(s);
2574 fail:
2575 if(ret == 0)
2576 ret=url_ferror(&s->pb);
2577 for(i=0;i<s->nb_streams;i++)
2578 av_freep(&s->streams[i]->priv_data);
2579 av_freep(&s->priv_data);
2580 return ret;
2581 }
2582
2583 /* "user interface" functions */
2584
2585 void dump_format(AVFormatContext *ic,
2586 int index,
2587 const char *url,
2588 int is_output)
2589 {
2590 int i, flags;
2591 char buf[256];
2592
2593 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2594 is_output ? "Output" : "Input",
2595 index,
2596 is_output ? ic->oformat->name : ic->iformat->name,
2597 is_output ? "to" : "from", url);
2598 if (!is_output) {
2599 av_log(NULL, AV_LOG_INFO, " Duration: ");
2600 if (ic->duration != AV_NOPTS_VALUE) {
2601 int hours, mins, secs, us;
2602 secs = ic->duration / AV_TIME_BASE;
2603 us = ic->duration % AV_TIME_BASE;
2604 mins = secs / 60;
2605 secs %= 60;
2606 hours = mins / 60;
2607 mins %= 60;
2608 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2609 (10 * us) / AV_TIME_BASE);
2610 } else {
2611 av_log(NULL, AV_LOG_INFO, "N/A");
2612 }
2613 if (ic->start_time != AV_NOPTS_VALUE) {
2614 int secs, us;
2615 av_log(NULL, AV_LOG_INFO, ", start: ");
2616 secs = ic->start_time / AV_TIME_BASE;
2617 us = ic->start_time % AV_TIME_BASE;
2618 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2619 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2620 }
2621 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2622 if (ic->bit_rate) {
2623 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2624 } else {
2625 av_log(NULL, AV_LOG_INFO, "N/A");
2626 }
2627 av_log(NULL, AV_LOG_INFO, "\n");
2628 }
2629 for(i=0;i<ic->nb_streams;i++) {
2630 AVStream *st = ic->streams[i];
2631 int g= ff_gcd(st->time_base.num, st->time_base.den);
2632 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2633 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2634 /* the pid is an important information, so we display it */
2635 /* XXX: add a generic system */
2636 if (is_output)
2637 flags = ic->oformat->flags;
2638 else
2639 flags = ic->iformat->flags;
2640 if (flags & AVFMT_SHOW_IDS) {
2641 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2642 }
2643 if (strlen(st->language) > 0) {
2644 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2645 }
2646 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2647 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2648 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2649 if(st->r_frame_rate.den && st->r_frame_rate.num)
2650 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2651 /* else if(st->time_base.den && st->time_base.num)
2652 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2653 else
2654 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2655 }
2656 av_log(NULL, AV_LOG_INFO, "\n");
2657 }
2658 }
2659
2660 typedef struct {
2661 const char *abv;
2662 int width, height;
2663 int frame_rate, frame_rate_base;
2664 } AbvEntry;
2665
2666 static AbvEntry frame_abvs[] = {
2667 { "ntsc", 720, 480, 30000, 1001 },
2668 { "pal", 720, 576, 25, 1 },
2669 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2670 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2671 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2672 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2673 { "film", 352, 240, 24, 1 },
2674 { "ntsc-film", 352, 240, 24000, 1001 },
2675 { "sqcif", 128, 96, 0, 0 },
2676 { "qcif", 176, 144, 0, 0 },
2677 { "cif", 352, 288, 0, 0 },
2678 { "4cif", 704, 576, 0, 0 },
2679 };
2680
2681 /**
2682 * parses width and height out of string str.
2683 */
2684 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2685 {
2686 int i;
2687 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2688 const char *p;
2689 int frame_width = 0, frame_height = 0;
2690
2691 for(i=0;i<n;i++) {
2692 if (!strcmp(frame_abvs[i].abv, str)) {
2693 frame_width = frame_abvs[i].width;
2694 frame_height = frame_abvs[i].height;
2695 break;
2696 }
2697 }
2698 if (i == n) {
2699 p = str;
2700 frame_width = strtol(p, (char **)&p, 10);
2701 if (*p)
2702 p++;
2703 frame_height = strtol(p, (char **)&p, 10);
2704 }
2705 if (frame_width <= 0 || frame_height <= 0)
2706 return -1;
2707 *width_ptr = frame_width;
2708 *height_ptr = frame_height;
2709 return 0;
2710 }
2711
2712 /**
2713 * Converts frame rate from string to a fraction.
2714 *
2715 * First we try to get an exact integer or fractional frame rate.
2716 * If this fails we convert the frame rate to a double and return
2717 * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
2718 */
2719 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2720 {
2721 int i;
2722 char* cp;
2723
2724 /* First, we check our abbreviation table */
2725 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2726 if (!strcmp(frame_abvs[i].abv, arg)) {
2727 *frame_rate = frame_abvs[i].frame_rate;
2728 *frame_rate_base = frame_abvs[i].frame_rate_base;
2729 return 0;
2730 }
2731
2732 /* Then, we try to parse it as fraction */
2733 cp = strchr(arg, '/');
2734 if (!cp)
2735 cp = strchr(arg, ':');
2736 if (cp) {
2737 char* cpp;
2738 *frame_rate = strtol(arg, &cpp, 10);
2739 if (cpp != arg || cpp == cp)
2740 *frame_rate_base = strtol(cp+1, &cpp, 10);
2741 else
2742 *frame_rate = 0;
2743 }
2744 else {
2745 /* Finally we give up and parse it as double */
2746 AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
2747 *frame_rate_base = time_base.den;
2748 *frame_rate = time_base.num;
2749 }
2750 if (!*frame_rate || !*frame_rate_base)
2751 return -1;
2752 else
2753 return 0;
2754 }
2755
2756 /**
2757 * Converts date string to number of seconds since Jan 1st, 1970.
2758 *
2759 * @code
2760 * Syntax:
2761 * - If not a duration:
2762 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2763 * Time is localtime unless Z is suffixed to the end. In this case GMT
2764 * Return the date in micro seconds since 1970
2765 *
2766 * - If a duration:
2767 * HH[:MM[:SS[.m...]]]
2768 * S+[.m...]
2769 * @endcode
2770 */
2771 #ifndef CONFIG_WINCE
2772 int64_t parse_date(const char *datestr, int duration)
2773 {
2774 const char *p;
2775 int64_t t;
2776 struct tm dt;
2777 int i;
2778 static const char *date_fmt[] = {
2779 "%Y-%m-%d",
2780 "%Y%m%d",
2781 };
2782 static const char *time_fmt[] = {
2783 "%H:%M:%S",
2784 "%H%M%S",
2785 };
2786 const char *q;
2787 int is_utc, len;
2788 char lastch;
2789 int negative = 0;
2790
2791 #undef time
2792 time_t now = time(0);
2793
2794 len = strlen(datestr);
2795 if (len > 0)
2796 lastch = datestr[len - 1];
2797 else
2798 lastch = '\0';
2799 is_utc = (lastch == 'z' || lastch == 'Z');
2800
2801 memset(&dt, 0, sizeof(dt));
2802
2803 p = datestr;
2804 q = NULL;
2805 if (!duration) {
2806 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2807 q = small_strptime(p, date_fmt[i], &dt);
2808 if (q) {
2809 break;
2810 }
2811 }
2812
2813 if (!q) {
2814 if (is_utc) {
2815 dt = *gmtime(&now);
2816 } else {
2817 dt = *localtime(&now);
2818 }
2819 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2820 } else {
2821 p = q;
2822 }
2823
2824 if (*p == 'T' || *p == 't' || *p == ' ')
2825 p++;
2826
2827 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2828 q = small_strptime(p, time_fmt[i], &dt);
2829 if (q) {
2830 break;
2831 }
2832 }
2833 } else {
2834 if (p[0] == '-') {
2835 negative = 1;
2836 ++p;
2837 }
2838 q = small_strptime(p, time_fmt[0], &dt);
2839 if (!q) {
2840 dt.tm_sec = strtol(p, (char **)&q, 10);
2841 dt.tm_min = 0;
2842 dt.tm_hour = 0;
2843 }
2844 }
2845
2846 /* Now we have all the fields that we can get */
2847 if (!q) {
2848 if (duration)
2849 return 0;
2850 else
2851 return now * int64_t_C(1000000);
2852 }
2853
2854 if (duration) {
2855 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2856 } else {
2857 dt.tm_isdst = -1; /* unknown */
2858 if (is_utc) {
2859 t = mktimegm(&dt);
2860 } else {
2861 t = mktime(&dt);
2862 }
2863 }
2864
2865 t *= 1000000;
2866
2867 if (*q == '.') {
2868 int val, n;
2869 q++;
2870 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2871 if (!isdigit(*q))
2872 break;
2873 val += n * (*q - '0');
2874 }
2875 t += val;
2876 }
2877 return negative ? -t : t;
2878 }
2879 #endif /* CONFIG_WINCE */
2880
2881 /**
2882 * Attempts to find a specific tag in a URL.
2883 *
2884 * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
2885 * Return 1 if found.
2886 */
2887 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2888 {
2889 const char *p;
2890 char tag[128], *q;
2891
2892 p = info;
2893 if (*p == '?')
2894 p++;
2895 for(;;) {
2896 q = tag;
2897 while (*p != '\0' && *p != '=' && *p != '&') {
2898 if ((q - tag) < sizeof(tag) - 1)
2899 *q++ = *p;
2900 p++;
2901 }
2902 *q = '\0';
2903 q = arg;
2904 if (*p == '=') {
2905 p++;
2906 while (*p != '&' && *p != '\0') {
2907 if ((q - arg) < arg_size - 1) {
2908 if (*p == '+')
2909 *q++ = ' ';
2910 else
2911 *q++ = *p;
2912 }
2913 p++;
2914 }
2915 *q = '\0';
2916 }
2917 if (!strcmp(tag, tag1))
2918 return 1;
2919 if (*p != '&')
2920 break;
2921 p++;
2922 }
2923 return 0;
2924 }
2925
2926 /**
2927 * Returns in 'buf' the path with '%d' replaced by number.
2928 *
2929 * Also handles the '%0nd' format where 'n' is the total number
2930 * of digits and '%%'. Return 0 if OK, and -1 if format error.
2931 */
2932 int get_frame_filename(char *buf, int buf_size,
2933 const char *path, int number)
2934 {
2935 const char *p;
2936 char *q, buf1[20], c;
2937 int nd, len, percentd_found;
2938
2939 q = buf;
2940 p = path;
2941 percentd_found = 0;
2942 for(;;) {
2943 c = *p++;
2944 if (c == '\0')
2945 break;
2946 if (c == '%') {
2947 do {
2948 nd = 0;
2949 while (isdigit(*p)) {
2950 nd = nd * 10 + *p++ - '0';
2951 }
2952 c = *p++;
2953 } while (isdigit(c));
2954
2955 switch(c) {
2956 case '%':
2957 goto addchar;
2958 case 'd':
2959 if (percentd_found)
2960 goto fail;
2961 percentd_found = 1;
2962 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2963 len = strlen(buf1);
2964 if ((q - buf + len) > buf_size - 1)
2965 goto fail;
2966 memcpy(q, buf1, len);
2967 q += len;
2968 break;
2969 default:
2970 goto fail;
2971 }
2972 } else {
2973 addchar:
2974 if ((q - buf) < buf_size - 1)
2975 *q++ = c;
2976 }
2977 }
2978 if (!percentd_found)
2979 goto fail;
2980 *q = '\0';
2981 return 0;
2982 fail:
2983 *q = '\0';
2984 return -1;
2985 }
2986
2987 /**
2988 * Print nice hexa dump of a buffer
2989 * @param f stream for output
2990 * @param buf buffer
2991 * @param size buffer size
2992 */
2993 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2994 {
2995 int len, i, j, c;
2996
2997 for(i=0;i<size;i+=16) {
2998 len = size - i;
2999 if (len > 16)
3000 len = 16;
3001 fprintf(f, "%08x ", i);
3002 for(j=0;j<16;j++) {
3003 if (j < len)
3004 fprintf(f, " %02x", buf[i+j]);
3005 else
3006 fprintf(f, " ");
3007 }
3008 fprintf(f, " ");
3009 for(j=0;j<len;j++) {
3010 c = buf[i+j];
3011 if (c < ' ' || c > '~')
3012 c = '.';
3013 fprintf(f, "%c", c);
3014 }
3015 fprintf(f, "\n");
3016 }
3017 }
3018
3019 /**
3020 * Print on 'f' a nice dump of a packet
3021 * @param f stream for output
3022 * @param pkt packet to dump
3023 * @param dump_payload true if the payload must be displayed too
3024 */
3025 //FIXME needs to know the time_base
3026 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3027 {
3028 fprintf(f, "stream #%d:\n", pkt->stream_index);
3029 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3030 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3031 /* DTS is _always_ valid after av_read_frame() */
3032 fprintf(f, " dts=");
3033 if (pkt->dts == AV_NOPTS_VALUE)
3034 fprintf(f, "N/A");
3035 else
3036 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
3037 /* PTS may be not known if B frames are present */
3038 fprintf(f, " pts=");
3039 if (pkt->pts == AV_NOPTS_VALUE)
3040 fprintf(f, "N/A");
3041 else
3042 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
3043 fprintf(f, "\n");
3044 fprintf(f, " size=%d\n", pkt->size);
3045 if (dump_payload)
3046 av_hex_dump(f, pkt->data, pkt->size);
3047 }
3048
3049 void url_split(char *proto, int proto_size,
3050 char *authorization, int authorization_size,
3051 char *hostname, int hostname_size,
3052 int *port_ptr,
3053 char *path, int path_size,
3054 const char *url)
3055 {
3056 const char *p;
3057 char *q;
3058 int port;
3059
3060 port = -1;
3061
3062 p = url;
3063 q = proto;
3064 while (*p != ':' && *p != '\0') {
3065 if ((q - proto) < proto_size - 1)
3066 *q++ = *p;
3067 p++;
3068 }
3069 if (proto_size > 0)
3070 *q = '\0';
3071 if (authorization_size > 0)
3072 authorization[0] = '\0';
3073 if (*p == '\0') {
3074 if (proto_size > 0)
3075 proto[0] = '\0';
3076 if (hostname_size > 0)
3077 hostname[0] = '\0';
3078 p = url;
3079 } else {
3080 char *at,*slash; // PETR: position of '@' character and '/' character
3081
3082 p++;
3083 if (*p == '/')
3084 p++;
3085 if (*p == '/')
3086 p++;
3087 at = strchr(p,'@'); // PETR: get the position of '@'
3088 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
3089 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
3090
3091 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
3092
3093 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
3094 if (*p == '@') { // PETR: passed '@'
3095 if (authorization_size > 0)
3096 *q = '\0';
3097 q = hostname;
3098 at = NULL;
3099 } else if (!at) { // PETR: hostname
3100 if ((q - hostname) < hostname_size - 1)
3101 *q++ = *p;
3102 } else {
3103 if ((q - authorization) < authorization_size - 1)
3104 *q++ = *p;
3105 }
3106 p++;
3107 }
3108 if (hostname_size > 0)
3109 *q = '\0';
3110 if (*p == ':') {
3111 p++;
3112 port = strtoul(p, (char **)&p, 10);
3113 }
3114 }
3115 if (port_ptr)
3116 *port_ptr = port;
3117 pstrcpy(path, path_size, p);
3118 }
3119
3120 /**
3121 * Set the pts for a given stream.
3122 *
3123 * @param s stream
3124 * @param pts_wrap_bits number of bits effectively used by the pts
3125 * (used for wrap control, 33 is the value for MPEG)
3126 * @param pts_num numerator to convert to seconds (MPEG: 1)
3127 * @param pts_den denominator to convert to seconds (MPEG: 90000)
3128 */
3129 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3130 int pts_num, int pts_den)
3131 {
3132 s->pts_wrap_bits = pts_wrap_bits;
3133 s->time_base.num = pts_num;
3134 s->time_base.den = pts_den;
3135 }
3136
3137 /* fraction handling */
3138
3139 /**
3140 * f = val + (num / den) + 0.5.
3141 *
3142 * 'num' is normalized so that it is such as 0 <= num < den.
3143 *
3144 * @param f fractional number
3145 * @param val integer value
3146 * @param num must be >= 0
3147 * @param den must be >= 1
3148 */
3149 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3150 {
3151 num += (den >> 1);
3152 if (num >= den) {
3153 val += num / den;
3154 num = num % den;
3155 }
3156 f->val = val;
3157 f->num = num;
3158 f->den = den;
3159 }
3160
3161 /**
3162 * Set f to (val + 0.5).
3163 */
3164 void av_frac_set(AVFrac *f, int64_t val)
3165 {
3166 f->val = val;
3167 f->num = f->den >> 1;
3168 }
3169
3170 /**
3171 * Fractionnal addition to f: f = f + (incr / f->den).
3172 *
3173 * @param f fractional number
3174 * @param incr increment, can be positive or negative
3175 */
3176 void av_frac_add(AVFrac *f, int64_t incr)
3177 {
3178 int64_t num, den;
3179
3180 num = f->num + incr;
3181 den = f->den;
3182 if (num < 0) {
3183 f->val += num / den;
3184 num = num % den;
3185 if (num < 0) {
3186 num += den;
3187 f->val--;
3188 }
3189 } else if (num >= den) {
3190 f->val += num / den;
3191 num = num % den;
3192 }
3193 f->num = num;
3194 }
3195
3196 /**
3197 * register a new image format
3198 * @param img_fmt Image format descriptor
3199 */
3200 void av_register_image_format(AVImageFormat *img_fmt)
3201 {
3202 AVImageFormat **p;
3203
3204 p = &first_image_format;
3205 while (*p != NULL) p = &(*p)->next;
3206 *p = img_fmt;
3207 img_fmt->next = NULL;
3208 }
3209
3210 /**
3211 * Guesses image format based on data in the image.
3212 */
3213 AVImageFormat *av_probe_image_format(AVProbeData *pd)
3214 {
3215 AVImageFormat *fmt1, *fmt;
3216 int score, score_max;
3217
3218 fmt = NULL;
3219 score_max = 0;
3220 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3221 if (fmt1->img_probe) {
3222 score = fmt1->img_probe(pd);
3223 if (score > score_max) {
3224 score_max = score;
3225 fmt = fmt1;
3226 }
3227 }
3228 }
3229 return fmt;
3230 }
3231
3232 /**
3233 * Guesses image format based on file name extensions.
3234 */
3235 AVImageFormat *guess_image_format(const char *filename)
3236 {
3237 AVImageFormat *fmt1;
3238
3239 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3240 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
3241 return fmt1;
3242 }
3243 return NULL;
3244 }
3245
3246 /**
3247 * Read an image from a stream.
3248 * @param gb byte stream containing the image
3249 * @param fmt image format, NULL if probing is required
3250 */
3251 int av_read_image(ByteIOContext *pb, const char *filename,
3252 AVImageFormat *fmt,
3253 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
3254 {
3255 uint8_t buf[PROBE_BUF_MIN];
3256 AVProbeData probe_data, *pd = &probe_data;
3257 offset_t pos;
3258 int ret;
3259
3260 if (!fmt) {
3261 pd->filename = filename;
3262 pd->buf = buf;
3263 pos = url_ftell(pb);
3264 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_MIN);
3265 url_fseek(pb, pos, SEEK_SET);
3266 fmt = av_probe_image_format(pd);
3267 }
3268 if (!fmt)
3269 return AVERROR_NOFMT;
3270 ret = fmt->img_read(pb, alloc_cb, opaque);
3271 return ret;
3272 }
3273
3274 /**
3275 * Write an image to a stream.
3276 * @param pb byte stream for the image output
3277 * @param fmt image format
3278 * @param img image data and informations
3279 */
3280 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
3281 {
3282 return fmt->img_write(pb, img);
3283 }
3284