use PRIxN, %zd, %td formats where needed
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #include "avformat.h"
20
21 #undef NDEBUG
22 #include <assert.h>
23
24 /**
25 * @file libavformat/utils.c
26 * Various utility functions for using ffmpeg library.
27 */
28
29 /** head of registered input format linked list. */
30 AVInputFormat *first_iformat = NULL;
31 /** head of registered output format linked list. */
32 AVOutputFormat *first_oformat = NULL;
33 /** head of registered image format linked list. */
34 AVImageFormat *first_image_format = NULL;
35
36 void av_register_input_format(AVInputFormat *format)
37 {
38 AVInputFormat **p;
39 p = &first_iformat;
40 while (*p != NULL) p = &(*p)->next;
41 *p = format;
42 format->next = NULL;
43 }
44
45 void av_register_output_format(AVOutputFormat *format)
46 {
47 AVOutputFormat **p;
48 p = &first_oformat;
49 while (*p != NULL) p = &(*p)->next;
50 *p = format;
51 format->next = NULL;
52 }
53
54 int match_ext(const char *filename, const char *extensions)
55 {
56 const char *ext, *p;
57 char ext1[32], *q;
58
59 if(!filename)
60 return 0;
61
62 ext = strrchr(filename, '.');
63 if (ext) {
64 ext++;
65 p = extensions;
66 for(;;) {
67 q = ext1;
68 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
69 *q++ = *p++;
70 *q = '\0';
71 if (!strcasecmp(ext1, ext))
72 return 1;
73 if (*p == '\0')
74 break;
75 p++;
76 }
77 }
78 return 0;
79 }
80
81 AVOutputFormat *guess_format(const char *short_name, const char *filename,
82 const char *mime_type)
83 {
84 AVOutputFormat *fmt, *fmt_found;
85 int score_max, score;
86
87 /* specific test for image sequences */
88 if (!short_name && filename &&
89 filename_number_test(filename) >= 0 &&
90 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
91 return guess_format("image2", NULL, NULL);
92 }
93 if (!short_name && filename &&
94 filename_number_test(filename) >= 0 &&
95 guess_image_format(filename)) {
96 return guess_format("image", NULL, NULL);
97 }
98
99 /* find the proper file type */
100 fmt_found = NULL;
101 score_max = 0;
102 fmt = first_oformat;
103 while (fmt != NULL) {
104 score = 0;
105 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
106 score += 100;
107 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
108 score += 10;
109 if (filename && fmt->extensions &&
110 match_ext(filename, fmt->extensions)) {
111 score += 5;
112 }
113 if (score > score_max) {
114 score_max = score;
115 fmt_found = fmt;
116 }
117 fmt = fmt->next;
118 }
119 return fmt_found;
120 }
121
122 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
123 const char *mime_type)
124 {
125 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
126
127 if (fmt) {
128 AVOutputFormat *stream_fmt;
129 char stream_format_name[64];
130
131 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
132 stream_fmt = guess_format(stream_format_name, NULL, NULL);
133
134 if (stream_fmt)
135 fmt = stream_fmt;
136 }
137
138 return fmt;
139 }
140
141 /**
142 * Guesses the codec id based upon muxer and filename.
143 */
144 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
145 const char *filename, const char *mime_type, enum CodecType type){
146 if(type == CODEC_TYPE_VIDEO){
147 enum CodecID codec_id= CODEC_ID_NONE;
148
149 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
150 codec_id= av_guess_image2_codec(filename);
151 }
152 if(codec_id == CODEC_ID_NONE)
153 codec_id= fmt->video_codec;
154 return codec_id;
155 }else if(type == CODEC_TYPE_AUDIO)
156 return fmt->audio_codec;
157 else
158 return CODEC_ID_NONE;
159 }
160
161 /**
162 * finds AVInputFormat based on input format's short name.
163 */
164 AVInputFormat *av_find_input_format(const char *short_name)
165 {
166 AVInputFormat *fmt;
167 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
168 if (!strcmp(fmt->name, short_name))
169 return fmt;
170 }
171 return NULL;
172 }
173
174 /* memory handling */
175
176 /**
177 * Default packet destructor.
178 */
179 void av_destruct_packet(AVPacket *pkt)
180 {
181 av_free(pkt->data);
182 pkt->data = NULL; pkt->size = 0;
183 }
184
185 /**
186 * Allocate the payload of a packet and intialized its fields to default values.
187 *
188 * @param pkt packet
189 * @param size wanted payload size
190 * @return 0 if OK. AVERROR_xxx otherwise.
191 */
192 int av_new_packet(AVPacket *pkt, int size)
193 {
194 void *data;
195 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
196 return AVERROR_NOMEM;
197 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
198 if (!data)
199 return AVERROR_NOMEM;
200 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
201
202 av_init_packet(pkt);
203 pkt->data = data;
204 pkt->size = size;
205 pkt->destruct = av_destruct_packet;
206 return 0;
207 }
208
209 /**
210 * Allocate and read the payload of a packet and intialized its fields to default values.
211 *
212 * @param pkt packet
213 * @param size wanted payload size
214 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
215 */
216 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
217 {
218 int ret= av_new_packet(pkt, size);
219
220 if(ret<0)
221 return ret;
222
223 pkt->pos= url_ftell(s);
224
225 ret= get_buffer(s, pkt->data, size);
226 if(ret<=0)
227 av_free_packet(pkt);
228 else
229 pkt->size= ret;
230
231 return ret;
232 }
233
234 /* This is a hack - the packet memory allocation stuff is broken. The
235 packet is allocated if it was not really allocated */
236 int av_dup_packet(AVPacket *pkt)
237 {
238 if (pkt->destruct != av_destruct_packet) {
239 uint8_t *data;
240 /* we duplicate the packet and don't forget to put the padding
241 again */
242 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
243 return AVERROR_NOMEM;
244 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
245 if (!data) {
246 return AVERROR_NOMEM;
247 }
248 memcpy(data, pkt->data, pkt->size);
249 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
250 pkt->data = data;
251 pkt->destruct = av_destruct_packet;
252 }
253 return 0;
254 }
255
256 /* fifo handling */
257
258 int fifo_init(FifoBuffer *f, int size)
259 {
260 f->buffer = av_malloc(size);
261 if (!f->buffer)
262 return -1;
263 f->end = f->buffer + size;
264 f->wptr = f->rptr = f->buffer;
265 return 0;
266 }
267
268 void fifo_free(FifoBuffer *f)
269 {
270 av_free(f->buffer);
271 }
272
273 int fifo_size(FifoBuffer *f, uint8_t *rptr)
274 {
275 int size;
276
277 if(!rptr)
278 rptr= f->rptr;
279
280 if (f->wptr >= rptr) {
281 size = f->wptr - rptr;
282 } else {
283 size = (f->end - rptr) + (f->wptr - f->buffer);
284 }
285 return size;
286 }
287
288 /**
289 * Get data from the fifo (returns -1 if not enough data).
290 */
291 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
292 {
293 uint8_t *rptr;
294 int size, len;
295
296 if(!rptr_ptr)
297 rptr_ptr= &f->rptr;
298 rptr = *rptr_ptr;
299
300 if (f->wptr >= rptr) {
301 size = f->wptr - rptr;
302 } else {
303 size = (f->end - rptr) + (f->wptr - f->buffer);
304 }
305
306 if (size < buf_size)
307 return -1;
308 while (buf_size > 0) {
309 len = f->end - rptr;
310 if (len > buf_size)
311 len = buf_size;
312 memcpy(buf, rptr, len);
313 buf += len;
314 rptr += len;
315 if (rptr >= f->end)
316 rptr = f->buffer;
317 buf_size -= len;
318 }
319 *rptr_ptr = rptr;
320 return 0;
321 }
322
323 /**
324 * Resizes a FIFO.
325 */
326 void fifo_realloc(FifoBuffer *f, unsigned int new_size){
327 unsigned int old_size= f->end - f->buffer;
328
329 if(old_size < new_size){
330 uint8_t *old= f->buffer;
331
332 f->buffer= av_realloc(f->buffer, new_size);
333
334 f->rptr += f->buffer - old;
335 f->wptr += f->buffer - old;
336
337 if(f->wptr < f->rptr){
338 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
339 f->rptr += new_size - old_size;
340 }
341 f->end= f->buffer + new_size;
342 }
343 }
344
345 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
346 {
347 int len;
348 uint8_t *wptr;
349
350 if(!wptr_ptr)
351 wptr_ptr= &f->wptr;
352 wptr = *wptr_ptr;
353
354 while (size > 0) {
355 len = f->end - wptr;
356 if (len > size)
357 len = size;
358 memcpy(wptr, buf, len);
359 wptr += len;
360 if (wptr >= f->end)
361 wptr = f->buffer;
362 buf += len;
363 size -= len;
364 }
365 *wptr_ptr = wptr;
366 }
367
368 /* get data from the fifo (return -1 if not enough data) */
369 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
370 {
371 uint8_t *rptr = *rptr_ptr;
372 int size, len;
373
374 if (f->wptr >= rptr) {
375 size = f->wptr - rptr;
376 } else {
377 size = (f->end - rptr) + (f->wptr - f->buffer);
378 }
379
380 if (size < buf_size)
381 return -1;
382 while (buf_size > 0) {
383 len = f->end - rptr;
384 if (len > buf_size)
385 len = buf_size;
386 put_buffer(pb, rptr, len);
387 rptr += len;
388 if (rptr >= f->end)
389 rptr = f->buffer;
390 buf_size -= len;
391 }
392 *rptr_ptr = rptr;
393 return 0;
394 }
395
396 int filename_number_test(const char *filename)
397 {
398 char buf[1024];
399 if(!filename)
400 return -1;
401 return get_frame_filename(buf, sizeof(buf), filename, 1);
402 }
403
404 /**
405 * Guess file format.
406 */
407 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
408 {
409 AVInputFormat *fmt1, *fmt;
410 int score, score_max;
411
412 fmt = NULL;
413 score_max = 0;
414 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
415 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
416 continue;
417 score = 0;
418 if (fmt1->read_probe) {
419 score = fmt1->read_probe(pd);
420 } else if (fmt1->extensions) {
421 if (match_ext(pd->filename, fmt1->extensions)) {
422 score = 50;
423 }
424 }
425 if (score > score_max) {
426 score_max = score;
427 fmt = fmt1;
428 }
429 }
430 return fmt;
431 }
432
433 /************************************************************/
434 /* input media file */
435
436 /**
437 * Open a media file from an IO stream. 'fmt' must be specified.
438 */
439 static const char* format_to_name(void* ptr)
440 {
441 AVFormatContext* fc = (AVFormatContext*) ptr;
442 if(fc->iformat) return fc->iformat->name;
443 else if(fc->oformat) return fc->oformat->name;
444 else return "NULL";
445 }
446
447 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
448
449 AVFormatContext *av_alloc_format_context(void)
450 {
451 AVFormatContext *ic;
452 ic = av_mallocz(sizeof(AVFormatContext));
453 if (!ic) return ic;
454 ic->av_class = &av_format_context_class;
455 return ic;
456 }
457
458 /**
459 * Allocates all the structures needed to read an input stream.
460 * This does not open the needed codecs for decoding the stream[s].
461 */
462 int av_open_input_stream(AVFormatContext **ic_ptr,
463 ByteIOContext *pb, const char *filename,
464 AVInputFormat *fmt, AVFormatParameters *ap)
465 {
466 int err;
467 AVFormatContext *ic;
468
469 ic = av_alloc_format_context();
470 if (!ic) {
471 err = AVERROR_NOMEM;
472 goto fail;
473 }
474 ic->iformat = fmt;
475 if (pb)
476 ic->pb = *pb;
477 ic->duration = AV_NOPTS_VALUE;
478 ic->start_time = AV_NOPTS_VALUE;
479 pstrcpy(ic->filename, sizeof(ic->filename), filename);
480
481 /* allocate private data */
482 if (fmt->priv_data_size > 0) {
483 ic->priv_data = av_mallocz(fmt->priv_data_size);
484 if (!ic->priv_data) {
485 err = AVERROR_NOMEM;
486 goto fail;
487 }
488 } else {
489 ic->priv_data = NULL;
490 }
491
492 err = ic->iformat->read_header(ic, ap);
493 if (err < 0)
494 goto fail;
495
496 if (pb)
497 ic->data_offset = url_ftell(&ic->pb);
498
499 *ic_ptr = ic;
500 return 0;
501 fail:
502 if (ic) {
503 av_freep(&ic->priv_data);
504 }
505 av_free(ic);
506 *ic_ptr = NULL;
507 return err;
508 }
509
510 /** Size of probe buffer, for guessing file type from file contents. */
511 #define PROBE_BUF_SIZE 2048
512
513 /**
514 * Open a media file as input. The codec are not opened. Only the file
515 * header (if present) is read.
516 *
517 * @param ic_ptr the opened media file handle is put here
518 * @param filename filename to open.
519 * @param fmt if non NULL, force the file format to use
520 * @param buf_size optional buffer size (zero if default is OK)
521 * @param ap additionnal parameters needed when opening the file (NULL if default)
522 * @return 0 if OK. AVERROR_xxx otherwise.
523 */
524 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
525 AVInputFormat *fmt,
526 int buf_size,
527 AVFormatParameters *ap)
528 {
529 int err, must_open_file, file_opened;
530 uint8_t buf[PROBE_BUF_SIZE];
531 AVProbeData probe_data, *pd = &probe_data;
532 ByteIOContext pb1, *pb = &pb1;
533
534 file_opened = 0;
535 pd->filename = "";
536 if (filename)
537 pd->filename = filename;
538 pd->buf = buf;
539 pd->buf_size = 0;
540
541 if (!fmt) {
542 /* guess format if no file can be opened */
543 fmt = av_probe_input_format(pd, 0);
544 }
545
546 /* do not open file if the format does not need it. XXX: specific
547 hack needed to handle RTSP/TCP */
548 must_open_file = 1;
549 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
550 must_open_file = 0;
551 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
552 }
553
554 if (!fmt || must_open_file) {
555 /* if no file needed do not try to open one */
556 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
557 err = AVERROR_IO;
558 goto fail;
559 }
560 file_opened = 1;
561 if (buf_size > 0) {
562 url_setbufsize(pb, buf_size);
563 }
564 if (!fmt) {
565 /* read probe data */
566 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
567 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
568 url_fclose(pb);
569 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
570 err = AVERROR_IO;
571 goto fail;
572 }
573 }
574 }
575 }
576
577 /* guess file format */
578 if (!fmt) {
579 fmt = av_probe_input_format(pd, 1);
580 }
581
582 /* if still no format found, error */
583 if (!fmt) {
584 err = AVERROR_NOFMT;
585 goto fail;
586 }
587
588 /* XXX: suppress this hack for redirectors */
589 #ifdef CONFIG_NETWORK
590 if (fmt == &redir_demux) {
591 err = redir_open(ic_ptr, pb);
592 url_fclose(pb);
593 return err;
594 }
595 #endif
596
597 /* check filename in case of an image number is expected */
598 if (fmt->flags & AVFMT_NEEDNUMBER) {
599 if (filename_number_test(filename) < 0) {
600 err = AVERROR_NUMEXPECTED;
601 goto fail;
602 }
603 }
604 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
605 if (err)
606 goto fail;
607 return 0;
608 fail:
609 if (file_opened)
610 url_fclose(pb);
611 *ic_ptr = NULL;
612 return err;
613
614 }
615
616 /*******************************************************/
617
618 /**
619 * Read a transport packet from a media file.
620 *
621 * This function is absolete and should never be used.
622 * Use av_read_frame() instead.
623 *
624 * @param s media file handle
625 * @param pkt is filled
626 * @return 0 if OK. AVERROR_xxx if error.
627 */
628 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
629 {
630 return s->iformat->read_packet(s, pkt);
631 }
632
633 /**********************************************************/
634
635 /**
636 * Get the number of samples of an audio frame. Return (-1) if error.
637 */
638 static int get_audio_frame_size(AVCodecContext *enc, int size)
639 {
640 int frame_size;
641
642 if (enc->frame_size <= 1) {
643 /* specific hack for pcm codecs because no frame size is
644 provided */
645 switch(enc->codec_id) {
646 case CODEC_ID_PCM_S32LE:
647 case CODEC_ID_PCM_S32BE:
648 case CODEC_ID_PCM_U32LE:
649 case CODEC_ID_PCM_U32BE:
650 if (enc->channels == 0)
651 return -1;
652 frame_size = size / (4 * enc->channels);
653 break;
654 case CODEC_ID_PCM_S24LE:
655 case CODEC_ID_PCM_S24BE:
656 case CODEC_ID_PCM_U24LE:
657 case CODEC_ID_PCM_U24BE:
658 case CODEC_ID_PCM_S24DAUD:
659 if (enc->channels == 0)
660 return -1;
661 frame_size = size / (3 * enc->channels);
662 break;
663 case CODEC_ID_PCM_S16LE:
664 case CODEC_ID_PCM_S16BE:
665 case CODEC_ID_PCM_U16LE:
666 case CODEC_ID_PCM_U16BE:
667 if (enc->channels == 0)
668 return -1;
669 frame_size = size / (2 * enc->channels);
670 break;
671 case CODEC_ID_PCM_S8:
672 case CODEC_ID_PCM_U8:
673 case CODEC_ID_PCM_MULAW:
674 case CODEC_ID_PCM_ALAW:
675 if (enc->channels == 0)
676 return -1;
677 frame_size = size / (enc->channels);
678 break;
679 default:
680 /* used for example by ADPCM codecs */
681 if (enc->bit_rate == 0)
682 return -1;
683 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
684 break;
685 }
686 } else {
687 frame_size = enc->frame_size;
688 }
689 return frame_size;
690 }
691
692
693 /**
694 * Return the frame duration in seconds, return 0 if not available.
695 */
696 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
697 AVCodecParserContext *pc, AVPacket *pkt)
698 {
699 int frame_size;
700
701 *pnum = 0;
702 *pden = 0;
703 switch(st->codec->codec_type) {
704 case CODEC_TYPE_VIDEO:
705 if(st->time_base.num*1000LL > st->time_base.den){
706 *pnum = st->time_base.num;
707 *pden = st->time_base.den;
708 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
709 *pnum = st->codec->time_base.num;
710 *pden = st->codec->time_base.den;
711 if (pc && pc->repeat_pict) {
712 *pden *= 2;
713 *pnum = (*pnum) * (2 + pc->repeat_pict);
714 }
715 }
716 break;
717 case CODEC_TYPE_AUDIO:
718 frame_size = get_audio_frame_size(st->codec, pkt->size);
719 if (frame_size < 0)
720 break;
721 *pnum = frame_size;
722 *pden = st->codec->sample_rate;
723 break;
724 default:
725 break;
726 }
727 }
728
729 static int is_intra_only(AVCodecContext *enc){
730 if(enc->codec_type == CODEC_TYPE_AUDIO){
731 return 1;
732 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
733 switch(enc->codec_id){
734 case CODEC_ID_MJPEG:
735 case CODEC_ID_MJPEGB:
736 case CODEC_ID_LJPEG:
737 case CODEC_ID_RAWVIDEO:
738 case CODEC_ID_DVVIDEO:
739 case CODEC_ID_HUFFYUV:
740 case CODEC_ID_FFVHUFF:
741 case CODEC_ID_ASV1:
742 case CODEC_ID_ASV2:
743 case CODEC_ID_VCR1:
744 return 1;
745 default: break;
746 }
747 }
748 return 0;
749 }
750
751 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
752 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
753 int64_t delta= last_ts - mask/2;
754 return ((lsb - delta)&mask) + delta;
755 }
756
757 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
758 AVCodecParserContext *pc, AVPacket *pkt)
759 {
760 int num, den, presentation_delayed;
761 /* handle wrapping */
762 if(st->cur_dts != AV_NOPTS_VALUE){
763 if(pkt->pts != AV_NOPTS_VALUE)
764 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
765 if(pkt->dts != AV_NOPTS_VALUE)
766 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
767 }
768
769 if (pkt->duration == 0) {
770 compute_frame_duration(&num, &den, st, pc, pkt);
771 if (den && num) {
772 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
773 }
774 }
775
776 if(is_intra_only(st->codec))
777 pkt->flags |= PKT_FLAG_KEY;
778
779 /* do we have a video B frame ? */
780 presentation_delayed = 0;
781 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
782 /* XXX: need has_b_frame, but cannot get it if the codec is
783 not initialized */
784 if (( st->codec->codec_id == CODEC_ID_H264
785 || st->codec->has_b_frames) &&
786 pc && pc->pict_type != FF_B_TYPE)
787 presentation_delayed = 1;
788 /* this may be redundant, but it shouldnt hurt */
789 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
790 presentation_delayed = 1;
791 }
792
793 if(st->cur_dts == AV_NOPTS_VALUE){
794 if(presentation_delayed) st->cur_dts = -pkt->duration;
795 else st->cur_dts = 0;
796 }
797
798 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
799 /* interpolate PTS and DTS if they are not present */
800 if (presentation_delayed) {
801 /* DTS = decompression time stamp */
802 /* PTS = presentation time stamp */
803 if (pkt->dts == AV_NOPTS_VALUE) {
804 /* if we know the last pts, use it */
805 if(st->last_IP_pts != AV_NOPTS_VALUE)
806 st->cur_dts = pkt->dts = st->last_IP_pts;
807 else
808 pkt->dts = st->cur_dts;
809 } else {
810 st->cur_dts = pkt->dts;
811 }
812 /* this is tricky: the dts must be incremented by the duration
813 of the frame we are displaying, i.e. the last I or P frame */
814 if (st->last_IP_duration == 0)
815 st->cur_dts += pkt->duration;
816 else
817 st->cur_dts += st->last_IP_duration;
818 st->last_IP_duration = pkt->duration;
819 st->last_IP_pts= pkt->pts;
820 /* cannot compute PTS if not present (we can compute it only
821 by knowing the futur */
822 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
823 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
824 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
825 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
826 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
827 pkt->pts += pkt->duration;
828 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
829 }
830 }
831
832 /* presentation is not delayed : PTS and DTS are the same */
833 if (pkt->pts == AV_NOPTS_VALUE) {
834 if (pkt->dts == AV_NOPTS_VALUE) {
835 pkt->pts = st->cur_dts;
836 pkt->dts = st->cur_dts;
837 }
838 else {
839 st->cur_dts = pkt->dts;
840 pkt->pts = pkt->dts;
841 }
842 } else {
843 st->cur_dts = pkt->pts;
844 pkt->dts = pkt->pts;
845 }
846 st->cur_dts += pkt->duration;
847 }
848 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
849
850 /* update flags */
851 if (pc) {
852 pkt->flags = 0;
853 /* key frame computation */
854 switch(st->codec->codec_type) {
855 case CODEC_TYPE_VIDEO:
856 if (pc->pict_type == FF_I_TYPE)
857 pkt->flags |= PKT_FLAG_KEY;
858 break;
859 case CODEC_TYPE_AUDIO:
860 pkt->flags |= PKT_FLAG_KEY;
861 break;
862 default:
863 break;
864 }
865 }
866 }
867
868 void av_destruct_packet_nofree(AVPacket *pkt)
869 {
870 pkt->data = NULL; pkt->size = 0;
871 }
872
873 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
874 {
875 AVStream *st;
876 int len, ret, i;
877
878 for(;;) {
879 /* select current input stream component */
880 st = s->cur_st;
881 if (st) {
882 if (!st->need_parsing || !st->parser) {
883 /* no parsing needed: we just output the packet as is */
884 /* raw data support */
885 *pkt = s->cur_pkt;
886 compute_pkt_fields(s, st, NULL, pkt);
887 s->cur_st = NULL;
888 return 0;
889 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
890 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
891 s->cur_ptr, s->cur_len,
892 s->cur_pkt.pts, s->cur_pkt.dts);
893 s->cur_pkt.pts = AV_NOPTS_VALUE;
894 s->cur_pkt.dts = AV_NOPTS_VALUE;
895 /* increment read pointer */
896 s->cur_ptr += len;
897 s->cur_len -= len;
898
899 /* return packet if any */
900 if (pkt->size) {
901 got_packet:
902 pkt->duration = 0;
903 pkt->stream_index = st->index;
904 pkt->pts = st->parser->pts;
905 pkt->dts = st->parser->dts;
906 pkt->destruct = av_destruct_packet_nofree;
907 compute_pkt_fields(s, st, st->parser, pkt);
908 return 0;
909 }
910 } else {
911 /* free packet */
912 av_free_packet(&s->cur_pkt);
913 s->cur_st = NULL;
914 }
915 } else {
916 /* read next packet */
917 ret = av_read_packet(s, &s->cur_pkt);
918 if (ret < 0) {
919 if (ret == -EAGAIN)
920 return ret;
921 /* return the last frames, if any */
922 for(i = 0; i < s->nb_streams; i++) {
923 st = s->streams[i];
924 if (st->parser && st->need_parsing) {
925 av_parser_parse(st->parser, st->codec,
926 &pkt->data, &pkt->size,
927 NULL, 0,
928 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
929 if (pkt->size)
930 goto got_packet;
931 }
932 }
933 /* no more packets: really terminates parsing */
934 return ret;
935 }
936
937 st = s->streams[s->cur_pkt.stream_index];
938
939 s->cur_st = st;
940 s->cur_ptr = s->cur_pkt.data;
941 s->cur_len = s->cur_pkt.size;
942 if (st->need_parsing && !st->parser) {
943 st->parser = av_parser_init(st->codec->codec_id);
944 if (!st->parser) {
945 /* no parser available : just output the raw packets */
946 st->need_parsing = 0;
947 }else if(st->need_parsing == 2){
948 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
949 }
950 }
951 }
952 }
953 }
954
955 /**
956 * Return the next frame of a stream.
957 *
958 * The returned packet is valid
959 * until the next av_read_frame() or until av_close_input_file() and
960 * must be freed with av_free_packet. For video, the packet contains
961 * exactly one frame. For audio, it contains an integer number of
962 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
963 * data). If the audio frames have a variable size (e.g. MPEG audio),
964 * then it contains one frame.
965 *
966 * pkt->pts, pkt->dts and pkt->duration are always set to correct
967 * values in AV_TIME_BASE unit (and guessed if the format cannot
968 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
969 * has B frames, so it is better to rely on pkt->dts if you do not
970 * decompress the payload.
971 *
972 * @return 0 if OK, < 0 if error or end of file.
973 */
974 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
975 {
976 AVPacketList *pktl;
977 int eof=0;
978 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
979
980 for(;;){
981 pktl = s->packet_buffer;
982 if (pktl) {
983 AVPacket *next_pkt= &pktl->pkt;
984
985 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
986 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
987 if( pktl->pkt.stream_index == next_pkt->stream_index
988 && next_pkt->dts < pktl->pkt.dts
989 && pktl->pkt.pts != pktl->pkt.dts //not b frame
990 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
991 next_pkt->pts= pktl->pkt.dts;
992 }
993 pktl= pktl->next;
994 }
995 pktl = s->packet_buffer;
996 }
997
998 if( next_pkt->pts != AV_NOPTS_VALUE
999 || next_pkt->dts == AV_NOPTS_VALUE
1000 || !genpts || eof){
1001 /* read packet from packet buffer, if there is data */
1002 *pkt = *next_pkt;
1003 s->packet_buffer = pktl->next;
1004 av_free(pktl);
1005 return 0;
1006 }
1007 }
1008 if(genpts){
1009 AVPacketList **plast_pktl= &s->packet_buffer;
1010 int ret= av_read_frame_internal(s, pkt);
1011 if(ret<0){
1012 if(pktl && ret != -EAGAIN){
1013 eof=1;
1014 continue;
1015 }else
1016 return ret;
1017 }
1018
1019 /* duplicate the packet */
1020 if (av_dup_packet(pkt) < 0)
1021 return AVERROR_NOMEM;
1022
1023 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
1024
1025 pktl = av_mallocz(sizeof(AVPacketList));
1026 if (!pktl)
1027 return AVERROR_NOMEM;
1028
1029 /* add the packet in the buffered packet list */
1030 *plast_pktl = pktl;
1031 pktl->pkt= *pkt;
1032 }else{
1033 assert(!s->packet_buffer);
1034 return av_read_frame_internal(s, pkt);
1035 }
1036 }
1037 }
1038
1039 /* XXX: suppress the packet queue */
1040 static void flush_packet_queue(AVFormatContext *s)
1041 {
1042 AVPacketList *pktl;
1043
1044 for(;;) {
1045 pktl = s->packet_buffer;
1046 if (!pktl)
1047 break;
1048 s->packet_buffer = pktl->next;
1049 av_free_packet(&pktl->pkt);
1050 av_free(pktl);
1051 }
1052 }
1053
1054 /*******************************************************/
1055 /* seek support */
1056
1057 int av_find_default_stream_index(AVFormatContext *s)
1058 {
1059 int i;
1060 AVStream *st;
1061
1062 if (s->nb_streams <= 0)
1063 return -1;
1064 for(i = 0; i < s->nb_streams; i++) {
1065 st = s->streams[i];
1066 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1067 return i;
1068 }
1069 }
1070 return 0;
1071 }
1072
1073 /**
1074 * Flush the frame reader.
1075 */
1076 static void av_read_frame_flush(AVFormatContext *s)
1077 {
1078 AVStream *st;
1079 int i;
1080
1081 flush_packet_queue(s);
1082
1083 /* free previous packet */
1084 if (s->cur_st) {
1085 if (s->cur_st->parser)
1086 av_free_packet(&s->cur_pkt);
1087 s->cur_st = NULL;
1088 }
1089 /* fail safe */
1090 s->cur_ptr = NULL;
1091 s->cur_len = 0;
1092
1093 /* for each stream, reset read state */
1094 for(i = 0; i < s->nb_streams; i++) {
1095 st = s->streams[i];
1096
1097 if (st->parser) {
1098 av_parser_close(st->parser);
1099 st->parser = NULL;
1100 }
1101 st->last_IP_pts = AV_NOPTS_VALUE;
1102 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
1103 }
1104 }
1105
1106 /**
1107 * Updates cur_dts of all streams based on given timestamp and AVStream.
1108 *
1109 * Stream ref_st unchanged, others set cur_dts in their native timebase
1110 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1111 * @param timestamp new dts expressed in time_base of param ref_st
1112 * @param ref_st reference stream giving time_base of param timestamp
1113 */
1114 static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1115 int i;
1116
1117 for(i = 0; i < s->nb_streams; i++) {
1118 AVStream *st = s->streams[i];
1119
1120 st->cur_dts = av_rescale(timestamp,
1121 st->time_base.den * (int64_t)ref_st->time_base.num,
1122 st->time_base.num * (int64_t)ref_st->time_base.den);
1123 }
1124 }
1125
1126 /**
1127 * Add a index entry into a sorted list updateing if it is already there.
1128 *
1129 * @param timestamp timestamp in the timebase of the given stream
1130 */
1131 int av_add_index_entry(AVStream *st,
1132 int64_t pos, int64_t timestamp, int distance, int flags)
1133 {
1134 AVIndexEntry *entries, *ie;
1135 int index;
1136
1137 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1138 return -1;
1139
1140 entries = av_fast_realloc(st->index_entries,
1141 &st->index_entries_allocated_size,
1142 (st->nb_index_entries + 1) *
1143 sizeof(AVIndexEntry));
1144 if(!entries)
1145 return -1;
1146
1147 st->index_entries= entries;
1148
1149 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1150
1151 if(index<0){
1152 index= st->nb_index_entries++;
1153 ie= &entries[index];
1154 assert(index==0 || ie[-1].timestamp < timestamp);
1155 }else{
1156 ie= &entries[index];
1157 if(ie->timestamp != timestamp){
1158 if(ie->timestamp <= timestamp)
1159 return -1;
1160 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1161 st->nb_index_entries++;
1162 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1163 distance= ie->min_distance;
1164 }
1165
1166 ie->pos = pos;
1167 ie->timestamp = timestamp;
1168 ie->min_distance= distance;
1169 ie->flags = flags;
1170
1171 return index;
1172 }
1173
1174 /**
1175 * build an index for raw streams using a parser.
1176 */
1177 static void av_build_index_raw(AVFormatContext *s)
1178 {
1179 AVPacket pkt1, *pkt = &pkt1;
1180 int ret;
1181 AVStream *st;
1182
1183 st = s->streams[0];
1184 av_read_frame_flush(s);
1185 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1186
1187 for(;;) {
1188 ret = av_read_frame(s, pkt);
1189 if (ret < 0)
1190 break;
1191 if (pkt->stream_index == 0 && st->parser &&
1192 (pkt->flags & PKT_FLAG_KEY)) {
1193 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1194 0, AVINDEX_KEYFRAME);
1195 }
1196 av_free_packet(pkt);
1197 }
1198 }
1199
1200 /**
1201 * Returns TRUE if we deal with a raw stream.
1202 *
1203 * Raw codec data and parsing needed.
1204 */
1205 static int is_raw_stream(AVFormatContext *s)
1206 {
1207 AVStream *st;
1208
1209 if (s->nb_streams != 1)
1210 return 0;
1211 st = s->streams[0];
1212 if (!st->need_parsing)
1213 return 0;
1214 return 1;
1215 }
1216
1217 /**
1218 * Gets the index for a specific timestamp.
1219 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1220 * the timestamp which is <= the requested one, if backward is 0
1221 * then it will be >=
1222 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1223 * @return < 0 if no such timestamp could be found
1224 */
1225 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1226 int flags)
1227 {
1228 AVIndexEntry *entries= st->index_entries;
1229 int nb_entries= st->nb_index_entries;
1230 int a, b, m;
1231 int64_t timestamp;
1232
1233 a = - 1;
1234 b = nb_entries;
1235
1236 while (b - a > 1) {
1237 m = (a + b) >> 1;
1238 timestamp = entries[m].timestamp;
1239 if(timestamp >= wanted_timestamp)
1240 b = m;
1241 if(timestamp <= wanted_timestamp)
1242 a = m;
1243 }
1244 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1245
1246 if(!(flags & AVSEEK_FLAG_ANY)){
1247 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1248 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1249 }
1250 }
1251
1252 if(m == nb_entries)
1253 return -1;
1254 return m;
1255 }
1256
1257 #define DEBUG_SEEK
1258
1259 /**
1260 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1261 * this isnt supposed to be called directly by a user application, but by demuxers
1262 * @param target_ts target timestamp in the time base of the given stream
1263 * @param stream_index stream number
1264 */
1265 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1266 AVInputFormat *avif= s->iformat;
1267 int64_t pos_min, pos_max, pos, pos_limit;
1268 int64_t ts_min, ts_max, ts;
1269 int64_t start_pos, filesize;
1270 int index, no_change;
1271 AVStream *st;
1272
1273 if (stream_index < 0)
1274 return -1;
1275
1276 #ifdef DEBUG_SEEK
1277 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1278 #endif
1279
1280 ts_max=
1281 ts_min= AV_NOPTS_VALUE;
1282 pos_limit= -1; //gcc falsely says it may be uninitalized
1283
1284 st= s->streams[stream_index];
1285 if(st->index_entries){
1286 AVIndexEntry *e;
1287
1288 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1289 index= FFMAX(index, 0);
1290 e= &st->index_entries[index];
1291
1292 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1293 pos_min= e->pos;
1294 ts_min= e->timestamp;
1295 #ifdef DEBUG_SEEK
1296 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1297 pos_min,ts_min);
1298 #endif
1299 }else{
1300 assert(index==0);
1301 }
1302
1303 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1304 assert(index < st->nb_index_entries);
1305 if(index >= 0){
1306 e= &st->index_entries[index];
1307 assert(e->timestamp >= target_ts);
1308 pos_max= e->pos;
1309 ts_max= e->timestamp;
1310 pos_limit= pos_max - e->min_distance;
1311 #ifdef DEBUG_SEEK
1312 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1313 pos_max,pos_limit, ts_max);
1314 #endif
1315 }
1316 }
1317
1318 if(ts_min == AV_NOPTS_VALUE){
1319 pos_min = s->data_offset;
1320 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1321 if (ts_min == AV_NOPTS_VALUE)
1322 return -1;
1323 }
1324
1325 if(ts_max == AV_NOPTS_VALUE){
1326 int step= 1024;
1327 filesize = url_fsize(&s->pb);
1328 pos_max = filesize - 1;
1329 do{
1330 pos_max -= step;
1331 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1332 step += step;
1333 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1334 if (ts_max == AV_NOPTS_VALUE)
1335 return -1;
1336
1337 for(;;){
1338 int64_t tmp_pos= pos_max + 1;
1339 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1340 if(tmp_ts == AV_NOPTS_VALUE)
1341 break;
1342 ts_max= tmp_ts;
1343 pos_max= tmp_pos;
1344 if(tmp_pos >= filesize)
1345 break;
1346 }
1347 pos_limit= pos_max;
1348 }
1349
1350 no_change=0;
1351 while (pos_min < pos_limit) {
1352 #ifdef DEBUG_SEEK
1353 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1354 pos_min, pos_max,
1355 ts_min, ts_max);
1356 #endif
1357 assert(pos_limit <= pos_max);
1358
1359 if(no_change==0){
1360 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1361 // interpolate position (better than dichotomy)
1362 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1363 + pos_min - approximate_keyframe_distance;
1364 }else if(no_change==1){
1365 // bisection, if interpolation failed to change min or max pos last time
1366 pos = (pos_min + pos_limit)>>1;
1367 }else{
1368 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1369 pos=pos_min;
1370 }
1371 if(pos <= pos_min)
1372 pos= pos_min + 1;
1373 else if(pos > pos_limit)
1374 pos= pos_limit;
1375 start_pos= pos;
1376
1377 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1378 if(pos == pos_max)
1379 no_change++;
1380 else
1381 no_change=0;
1382 #ifdef DEBUG_SEEK
1383 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1384 #endif
1385 assert(ts != AV_NOPTS_VALUE);
1386 if (target_ts <= ts) {
1387 pos_limit = start_pos - 1;
1388 pos_max = pos;
1389 ts_max = ts;
1390 }
1391 if (target_ts >= ts) {
1392 pos_min = pos;
1393 ts_min = ts;
1394 }
1395 }
1396
1397 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1398 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1399 #ifdef DEBUG_SEEK
1400 pos_min = pos;
1401 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1402 pos_min++;
1403 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1404 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1405 pos, ts_min, target_ts, ts_max);
1406 #endif
1407 /* do the seek */
1408 url_fseek(&s->pb, pos, SEEK_SET);
1409
1410 av_update_cur_dts(s, st, ts);
1411
1412 return 0;
1413 }
1414
1415 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1416 int64_t pos_min, pos_max;
1417 #if 0
1418 AVStream *st;
1419
1420 if (stream_index < 0)
1421 return -1;
1422
1423 st= s->streams[stream_index];
1424 #endif
1425
1426 pos_min = s->data_offset;
1427 pos_max = url_fsize(&s->pb) - 1;
1428
1429 if (pos < pos_min) pos= pos_min;
1430 else if(pos > pos_max) pos= pos_max;
1431
1432 url_fseek(&s->pb, pos, SEEK_SET);
1433
1434 #if 0
1435 av_update_cur_dts(s, st, ts);
1436 #endif
1437 return 0;
1438 }
1439
1440 static int av_seek_frame_generic(AVFormatContext *s,
1441 int stream_index, int64_t timestamp, int flags)
1442 {
1443 int index;
1444 AVStream *st;
1445 AVIndexEntry *ie;
1446
1447 if (!s->index_built) {
1448 if (is_raw_stream(s)) {
1449 av_build_index_raw(s);
1450 } else {
1451 return -1;
1452 }
1453 s->index_built = 1;
1454 }
1455
1456 st = s->streams[stream_index];
1457 index = av_index_search_timestamp(st, timestamp, flags);
1458 if (index < 0)
1459 return -1;
1460
1461 /* now we have found the index, we can seek */
1462 ie = &st->index_entries[index];
1463 av_read_frame_flush(s);
1464 url_fseek(&s->pb, ie->pos, SEEK_SET);
1465
1466 av_update_cur_dts(s, st, ie->timestamp);
1467
1468 return 0;
1469 }
1470
1471 /**
1472 * Seek to the key frame at timestamp.
1473 * 'timestamp' in 'stream_index'.
1474 * @param stream_index If stream_index is (-1), a default
1475 * stream is selected, and timestamp is automatically converted
1476 * from AV_TIME_BASE units to the stream specific time_base.
1477 * @param timestamp timestamp in AVStream.time_base units
1478 * or if there is no stream specified then in AV_TIME_BASE units
1479 * @param flags flags which select direction and seeking mode
1480 * @return >= 0 on success
1481 */
1482 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1483 {
1484 int ret;
1485 AVStream *st;
1486
1487 av_read_frame_flush(s);
1488
1489 if(flags & AVSEEK_FLAG_BYTE)
1490 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1491
1492 if(stream_index < 0){
1493 stream_index= av_find_default_stream_index(s);
1494 if(stream_index < 0)
1495 return -1;
1496
1497 st= s->streams[stream_index];
1498 /* timestamp for default must be expressed in AV_TIME_BASE units */
1499 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1500 }
1501 st= s->streams[stream_index];
1502
1503 /* first, we try the format specific seek */
1504 if (s->iformat->read_seek)
1505 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1506 else
1507 ret = -1;
1508 if (ret >= 0) {
1509 return 0;
1510 }
1511
1512 if(s->iformat->read_timestamp)
1513 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1514 else
1515 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1516 }
1517
1518 /*******************************************************/
1519
1520 /**
1521 * Returns TRUE if the stream has accurate timings in any stream.
1522 *
1523 * @return TRUE if the stream has accurate timings for at least one component.
1524 */
1525 static int av_has_timings(AVFormatContext *ic)
1526 {
1527 int i;
1528 AVStream *st;
1529
1530 for(i = 0;i < ic->nb_streams; i++) {
1531 st = ic->streams[i];
1532 if (st->start_time != AV_NOPTS_VALUE &&
1533 st->duration != AV_NOPTS_VALUE)
1534 return 1;
1535 }
1536 return 0;
1537 }
1538
1539 /**
1540 * Estimate the stream timings from the one of each components.
1541 *
1542 * Also computes the global bitrate if possible.
1543 */
1544 static void av_update_stream_timings(AVFormatContext *ic)
1545 {
1546 int64_t start_time, start_time1, end_time, end_time1;
1547 int i;
1548 AVStream *st;
1549
1550 start_time = MAXINT64;
1551 end_time = MININT64;
1552 for(i = 0;i < ic->nb_streams; i++) {
1553 st = ic->streams[i];
1554 if (st->start_time != AV_NOPTS_VALUE) {
1555 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1556 if (start_time1 < start_time)
1557 start_time = start_time1;
1558 if (st->duration != AV_NOPTS_VALUE) {
1559 end_time1 = start_time1
1560 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1561 if (end_time1 > end_time)
1562 end_time = end_time1;
1563 }
1564 }
1565 }
1566 if (start_time != MAXINT64) {
1567 ic->start_time = start_time;
1568 if (end_time != MININT64) {
1569 ic->duration = end_time - start_time;
1570 if (ic->file_size > 0) {
1571 /* compute the bit rate */
1572 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1573 (double)ic->duration;
1574 }
1575 }
1576 }
1577
1578 }
1579
1580 static void fill_all_stream_timings(AVFormatContext *ic)
1581 {
1582 int i;
1583 AVStream *st;
1584
1585 av_update_stream_timings(ic);
1586 for(i = 0;i < ic->nb_streams; i++) {
1587 st = ic->streams[i];
1588 if (st->start_time == AV_NOPTS_VALUE) {
1589 if(ic->start_time != AV_NOPTS_VALUE)
1590 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1591 if(ic->duration != AV_NOPTS_VALUE)
1592 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1593 }
1594 }
1595 }
1596
1597 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1598 {
1599 int64_t filesize, duration;
1600 int bit_rate, i;
1601 AVStream *st;
1602
1603 /* if bit_rate is already set, we believe it */
1604 if (ic->bit_rate == 0) {
1605 bit_rate = 0;
1606 for(i=0;i<ic->nb_streams;i++) {
1607 st = ic->streams[i];
1608 bit_rate += st->codec->bit_rate;
1609 }
1610 ic->bit_rate = bit_rate;
1611 }
1612
1613 /* if duration is already set, we believe it */
1614 if (ic->duration == AV_NOPTS_VALUE &&
1615 ic->bit_rate != 0 &&
1616 ic->file_size != 0) {
1617 filesize = ic->file_size;
1618 if (filesize > 0) {
1619 for(i = 0; i < ic->nb_streams; i++) {
1620 st = ic->streams[i];
1621 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1622 if (st->start_time == AV_NOPTS_VALUE ||
1623 st->duration == AV_NOPTS_VALUE) {
1624 st->start_time = 0;
1625 st->duration = duration;
1626 }
1627 }
1628 }
1629 }
1630 }
1631
1632 #define DURATION_MAX_READ_SIZE 250000
1633
1634 /* only usable for MPEG-PS streams */
1635 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1636 {
1637 AVPacket pkt1, *pkt = &pkt1;
1638 AVStream *st;
1639 int read_size, i, ret;
1640 int64_t end_time;
1641 int64_t filesize, offset, duration;
1642
1643 /* free previous packet */
1644 if (ic->cur_st && ic->cur_st->parser)
1645 av_free_packet(&ic->cur_pkt);
1646 ic->cur_st = NULL;
1647
1648 /* flush packet queue */
1649 flush_packet_queue(ic);
1650
1651 for(i=0;i<ic->nb_streams;i++) {
1652 st = ic->streams[i];
1653 if (st->parser) {
1654 av_parser_close(st->parser);
1655 st->parser= NULL;
1656 }
1657 }
1658
1659 /* we read the first packets to get the first PTS (not fully
1660 accurate, but it is enough now) */
1661 url_fseek(&ic->pb, 0, SEEK_SET);
1662 read_size = 0;
1663 for(;;) {
1664 if (read_size >= DURATION_MAX_READ_SIZE)
1665 break;
1666 /* if all info is available, we can stop */
1667 for(i = 0;i < ic->nb_streams; i++) {
1668 st = ic->streams[i];
1669 if (st->start_time == AV_NOPTS_VALUE)
1670 break;
1671 }
1672 if (i == ic->nb_streams)
1673 break;
1674
1675 ret = av_read_packet(ic, pkt);
1676 if (ret != 0)
1677 break;
1678 read_size += pkt->size;
1679 st = ic->streams[pkt->stream_index];
1680 if (pkt->pts != AV_NOPTS_VALUE) {
1681 if (st->start_time == AV_NOPTS_VALUE)
1682 st->start_time = pkt->pts;
1683 }
1684 av_free_packet(pkt);
1685 }
1686
1687 /* estimate the end time (duration) */
1688 /* XXX: may need to support wrapping */
1689 filesize = ic->file_size;
1690 offset = filesize - DURATION_MAX_READ_SIZE;
1691 if (offset < 0)
1692 offset = 0;
1693
1694 url_fseek(&ic->pb, offset, SEEK_SET);
1695 read_size = 0;
1696 for(;;) {
1697 if (read_size >= DURATION_MAX_READ_SIZE)
1698 break;
1699 /* if all info is available, we can stop */
1700 for(i = 0;i < ic->nb_streams; i++) {
1701 st = ic->streams[i];
1702 if (st->duration == AV_NOPTS_VALUE)
1703 break;
1704 }
1705 if (i == ic->nb_streams)
1706 break;
1707
1708 ret = av_read_packet(ic, pkt);
1709 if (ret != 0)
1710 break;
1711 read_size += pkt->size;
1712 st = ic->streams[pkt->stream_index];
1713 if (pkt->pts != AV_NOPTS_VALUE) {
1714 end_time = pkt->pts;
1715 duration = end_time - st->start_time;
1716 if (duration > 0) {
1717 if (st->duration == AV_NOPTS_VALUE ||
1718 st->duration < duration)
1719 st->duration = duration;
1720 }
1721 }
1722 av_free_packet(pkt);
1723 }
1724
1725 fill_all_stream_timings(ic);
1726
1727 url_fseek(&ic->pb, 0, SEEK_SET);
1728 }
1729
1730 static void av_estimate_timings(AVFormatContext *ic)
1731 {
1732 int64_t file_size;
1733
1734 /* get the file size, if possible */
1735 if (ic->iformat->flags & AVFMT_NOFILE) {
1736 file_size = 0;
1737 } else {
1738 file_size = url_fsize(&ic->pb);
1739 if (file_size < 0)
1740 file_size = 0;
1741 }
1742 ic->file_size = file_size;
1743
1744 if ((ic->iformat == &mpegps_demux || ic->iformat == &mpegts_demux) && file_size && !ic->pb.is_streamed) {
1745 /* get accurate estimate from the PTSes */
1746 av_estimate_timings_from_pts(ic);
1747 } else if (av_has_timings(ic)) {
1748 /* at least one components has timings - we use them for all
1749 the components */
1750 fill_all_stream_timings(ic);
1751 } else {
1752 /* less precise: use bit rate info */
1753 av_estimate_timings_from_bit_rate(ic);
1754 }
1755 av_update_stream_timings(ic);
1756
1757 #if 0
1758 {
1759 int i;
1760 AVStream *st;
1761 for(i = 0;i < ic->nb_streams; i++) {
1762 st = ic->streams[i];
1763 printf("%d: start_time: %0.3f duration: %0.3f\n",
1764 i, (double)st->start_time / AV_TIME_BASE,
1765 (double)st->duration / AV_TIME_BASE);
1766 }
1767 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1768 (double)ic->start_time / AV_TIME_BASE,
1769 (double)ic->duration / AV_TIME_BASE,
1770 ic->bit_rate / 1000);
1771 }
1772 #endif
1773 }
1774
1775 static int has_codec_parameters(AVCodecContext *enc)
1776 {
1777 int val;
1778 switch(enc->codec_type) {
1779 case CODEC_TYPE_AUDIO:
1780 val = enc->sample_rate;
1781 break;
1782 case CODEC_TYPE_VIDEO:
1783 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1784 break;
1785 default:
1786 val = 1;
1787 break;
1788 }
1789 return (val != 0);
1790 }
1791
1792 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1793 {
1794 int16_t *samples;
1795 AVCodec *codec;
1796 int got_picture, ret=0;
1797 AVFrame picture;
1798
1799 if(!st->codec->codec){
1800 codec = avcodec_find_decoder(st->codec->codec_id);
1801 if (!codec)
1802 return -1;
1803 ret = avcodec_open(st->codec, codec);
1804 if (ret < 0)
1805 return ret;
1806 }
1807
1808 if(!has_codec_parameters(st->codec)){
1809 switch(st->codec->codec_type) {
1810 case CODEC_TYPE_VIDEO:
1811 ret = avcodec_decode_video(st->codec, &picture,
1812 &got_picture, (uint8_t *)data, size);
1813 break;
1814 case CODEC_TYPE_AUDIO:
1815 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1816 if (!samples)
1817 goto fail;
1818 ret = avcodec_decode_audio(st->codec, samples,
1819 &got_picture, (uint8_t *)data, size);
1820 av_free(samples);
1821 break;
1822 default:
1823 break;
1824 }
1825 }
1826 fail:
1827 return ret;
1828 }
1829
1830 /* absolute maximum size we read until we abort */
1831 #define MAX_READ_SIZE 5000000
1832
1833 /* maximum duration until we stop analysing the stream */
1834 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 2.0))
1835
1836 /**
1837 * Read the beginning of a media file to get stream information. This
1838 * is useful for file formats with no headers such as MPEG. This
1839 * function also compute the real frame rate in case of mpeg2 repeat
1840 * frame mode.
1841 *
1842 * @param ic media file handle
1843 * @return >=0 if OK. AVERROR_xxx if error.
1844 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1845 */
1846 int av_find_stream_info(AVFormatContext *ic)
1847 {
1848 int i, count, ret, read_size;
1849 AVStream *st;
1850 AVPacket pkt1, *pkt;
1851 AVPacketList *pktl=NULL, **ppktl;
1852 int64_t last_dts[MAX_STREAMS];
1853 int64_t duration_sum[MAX_STREAMS];
1854 int duration_count[MAX_STREAMS]={0};
1855
1856 for(i=0;i<ic->nb_streams;i++) {
1857 st = ic->streams[i];
1858 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1859 /* if(!st->time_base.num)
1860 st->time_base= */
1861 if(!st->codec->time_base.num)
1862 st->codec->time_base= st->time_base;
1863 }
1864 //only for the split stuff
1865 if (!st->parser) {
1866 st->parser = av_parser_init(st->codec->codec_id);
1867 if(st->need_parsing == 2 && st->parser){
1868 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1869 }
1870 }
1871 }
1872
1873 for(i=0;i<MAX_STREAMS;i++){
1874 last_dts[i]= AV_NOPTS_VALUE;
1875 duration_sum[i]= INT64_MAX;
1876 }
1877
1878 count = 0;
1879 read_size = 0;
1880 ppktl = &ic->packet_buffer;
1881 for(;;) {
1882 /* check if one codec still needs to be handled */
1883 for(i=0;i<ic->nb_streams;i++) {
1884 st = ic->streams[i];
1885 if (!has_codec_parameters(st->codec))
1886 break;
1887 /* variable fps and no guess at the real fps */
1888 if( st->codec->time_base.den >= 1000LL*st->codec->time_base.num
1889 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1890 break;
1891 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1892 break;
1893 }
1894 if (i == ic->nb_streams) {
1895 /* NOTE: if the format has no header, then we need to read
1896 some packets to get most of the streams, so we cannot
1897 stop here */
1898 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1899 /* if we found the info for all the codecs, we can stop */
1900 ret = count;
1901 break;
1902 }
1903 } else {
1904 /* we did not get all the codec info, but we read too much data */
1905 if (read_size >= MAX_READ_SIZE) {
1906 ret = count;
1907 break;
1908 }
1909 }
1910
1911 /* NOTE: a new stream can be added there if no header in file
1912 (AVFMTCTX_NOHEADER) */
1913 ret = av_read_frame_internal(ic, &pkt1);
1914 if (ret < 0) {
1915 /* EOF or error */
1916 ret = -1; /* we could not have all the codec parameters before EOF */
1917 for(i=0;i<ic->nb_streams;i++) {
1918 st = ic->streams[i];
1919 if (!has_codec_parameters(st->codec))
1920 break;
1921 }
1922 if (i == ic->nb_streams)
1923 ret = 0;
1924 break;
1925 }
1926
1927 pktl = av_mallocz(sizeof(AVPacketList));
1928 if (!pktl) {
1929 ret = AVERROR_NOMEM;
1930 break;
1931 }
1932
1933 /* add the packet in the buffered packet list */
1934 *ppktl = pktl;
1935 ppktl = &pktl->next;
1936
1937 pkt = &pktl->pkt;
1938 *pkt = pkt1;
1939
1940 /* duplicate the packet */
1941 if (av_dup_packet(pkt) < 0) {
1942 ret = AVERROR_NOMEM;
1943 break;
1944 }
1945
1946 read_size += pkt->size;
1947
1948 st = ic->streams[pkt->stream_index];
1949 st->codec_info_duration += pkt->duration;
1950 if (pkt->duration != 0)
1951 st->codec_info_nb_frames++;
1952
1953 {
1954 int index= pkt->stream_index;
1955 int64_t last= last_dts[index];
1956 int64_t duration= pkt->dts - last;
1957
1958 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1959 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1960 duration_sum[index]= duration;
1961 duration_count[index]=1;
1962 }else{
1963 int factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1964 duration_sum[index] += duration;
1965 duration_count[index]+= factor;
1966 }
1967 if(st->codec_info_nb_frames == 0 && 0)
1968 st->codec_info_duration += duration;
1969 }
1970 last_dts[pkt->stream_index]= pkt->dts;
1971 }
1972 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1973 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1974 if(i){
1975 st->codec->extradata_size= i;
1976 st->codec->extradata= av_malloc(st->codec->extradata_size);
1977 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1978 }
1979 }
1980
1981 /* if still no information, we try to open the codec and to
1982 decompress the frame. We try to avoid that in most cases as
1983 it takes longer and uses more memory. For MPEG4, we need to
1984 decompress for Quicktime. */
1985 if (!has_codec_parameters(st->codec) /*&&
1986 (st->codec->codec_id == CODEC_ID_FLV1 ||
1987 st->codec->codec_id == CODEC_ID_H264 ||
1988 st->codec->codec_id == CODEC_ID_H263 ||
1989 st->codec->codec_id == CODEC_ID_H261 ||
1990 st->codec->codec_id == CODEC_ID_VORBIS ||
1991 st->codec->codec_id == CODEC_ID_MJPEG ||
1992 st->codec->codec_id == CODEC_ID_PNG ||
1993 st->codec->codec_id == CODEC_ID_PAM ||
1994 st->codec->codec_id == CODEC_ID_PGM ||
1995 st->codec->codec_id == CODEC_ID_PGMYUV ||
1996 st->codec->codec_id == CODEC_ID_PBM ||
1997 st->codec->codec_id == CODEC_ID_PPM ||
1998 st->codec->codec_id == CODEC_ID_SHORTEN ||
1999 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2000 try_decode_frame(st, pkt->data, pkt->size);
2001
2002 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
2003 break;
2004 }
2005 count++;
2006 }
2007
2008 // close codecs which where opened in try_decode_frame()
2009 for(i=0;i<ic->nb_streams;i++) {
2010 st = ic->streams[i];
2011 if(st->codec->codec)
2012 avcodec_close(st->codec);
2013 }
2014 for(i=0;i<ic->nb_streams;i++) {
2015 st = ic->streams[i];
2016 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2017 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2018 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2019
2020 if(duration_count[i] && st->codec->time_base.num*1000LL <= st->codec->time_base.den &&
2021 st->time_base.num*duration_sum[i]/duration_count[i]*1000LL > st->time_base.den){
2022 AVRational fps1;
2023 int64_t num, den;
2024
2025 num= st->time_base.den*duration_count[i];
2026 den= st->time_base.num*duration_sum[i];
2027
2028 av_reduce(&fps1.num, &fps1.den, num*1001, den*1000, FFMAX(st->time_base.den, st->time_base.num)/4);
2029 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, den, FFMAX(st->time_base.den, st->time_base.num)/4);
2030 if(fps1.num < st->r_frame_rate.num && fps1.den == 1 && (fps1.num==24 || fps1.num==30)){ //FIXME better decission
2031 st->r_frame_rate.num= fps1.num*1000;
2032 st->r_frame_rate.den= fps1.den*1001;
2033 }
2034 }
2035
2036 /* set real frame rate info */
2037 /* compute the real frame rate for telecine */
2038 if ((st->codec->codec_id == CODEC_ID_MPEG1VIDEO ||
2039 st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
2040 st->codec->sub_id == 2) {
2041 if (st->codec_info_nb_frames >= 20) {
2042 float coded_frame_rate, est_frame_rate;
2043 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
2044 (double)st->codec_info_duration ;
2045 coded_frame_rate = 1.0/av_q2d(st->codec->time_base);
2046 #if 0
2047 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
2048 coded_frame_rate, est_frame_rate);
2049 #endif
2050 /* if we detect that it could be a telecine, we
2051 signal it. It would be better to do it at a
2052 higher level as it can change in a film */
2053 if (coded_frame_rate >= 24.97 &&
2054 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
2055 st->r_frame_rate = (AVRational){24000, 1001};
2056 }
2057 }
2058 }
2059 /* if no real frame rate, use the codec one */
2060 if (!st->r_frame_rate.num){
2061 st->r_frame_rate.num = st->codec->time_base.den;
2062 st->r_frame_rate.den = st->codec->time_base.num;
2063 }
2064 }
2065 }
2066
2067 av_estimate_timings(ic);
2068 #if 0
2069 /* correct DTS for b frame streams with no timestamps */
2070 for(i=0;i<ic->nb_streams;i++) {
2071 st = ic->streams[i];
2072 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2073 if(b-frames){
2074 ppktl = &ic->packet_buffer;
2075 while(ppkt1){
2076 if(ppkt1->stream_index != i)
2077 continue;
2078 if(ppkt1->pkt->dts < 0)
2079 break;
2080 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2081 break;
2082 ppkt1->pkt->dts -= delta;
2083 ppkt1= ppkt1->next;
2084 }
2085 if(ppkt1)
2086 continue;
2087 st->cur_dts -= delta;
2088 }
2089 }
2090 }
2091 #endif
2092 return ret;
2093 }
2094
2095 /*******************************************************/
2096
2097 /**
2098 * start playing a network based stream (e.g. RTSP stream) at the
2099 * current position
2100 */
2101 int av_read_play(AVFormatContext *s)
2102 {
2103 if (!s->iformat->read_play)
2104 return AVERROR_NOTSUPP;
2105 return s->iformat->read_play(s);
2106 }
2107
2108 /**
2109 * Pause a network based stream (e.g. RTSP stream).
2110 *
2111 * Use av_read_play() to resume it.
2112 */
2113 int av_read_pause(AVFormatContext *s)
2114 {
2115 if (!s->iformat->read_pause)
2116 return AVERROR_NOTSUPP;
2117 return s->iformat->read_pause(s);
2118 }
2119
2120 /**
2121 * Close a media file (but not its codecs).
2122 *
2123 * @param s media file handle
2124 */
2125 void av_close_input_file(AVFormatContext *s)
2126 {
2127 int i, must_open_file;
2128 AVStream *st;
2129
2130 /* free previous packet */
2131 if (s->cur_st && s->cur_st->parser)
2132 av_free_packet(&s->cur_pkt);
2133
2134 if (s->iformat->read_close)
2135 s->iformat->read_close(s);
2136 for(i=0;i<s->nb_streams;i++) {
2137 /* free all data in a stream component */
2138 st = s->streams[i];
2139 if (st->parser) {
2140 av_parser_close(st->parser);
2141 }
2142 av_free(st->index_entries);
2143 av_free(st->codec);
2144 av_free(st);
2145 }
2146 flush_packet_queue(s);
2147 must_open_file = 1;
2148 if (s->iformat->flags & AVFMT_NOFILE) {
2149 must_open_file = 0;
2150 }
2151 if (must_open_file) {
2152 url_fclose(&s->pb);
2153 }
2154 av_freep(&s->priv_data);
2155 av_free(s);
2156 }
2157
2158 /**
2159 * Add a new stream to a media file.
2160 *
2161 * Can only be called in the read_header() function. If the flag
2162 * AVFMTCTX_NOHEADER is in the format context, then new streams
2163 * can be added in read_packet too.
2164 *
2165 * @param s media file handle
2166 * @param id file format dependent stream id
2167 */
2168 AVStream *av_new_stream(AVFormatContext *s, int id)
2169 {
2170 AVStream *st;
2171
2172 if (s->nb_streams >= MAX_STREAMS)
2173 return NULL;
2174
2175 st = av_mallocz(sizeof(AVStream));
2176 if (!st)
2177 return NULL;
2178
2179 st->codec= avcodec_alloc_context();
2180 if (s->iformat) {
2181 /* no default bitrate if decoding */
2182 st->codec->bit_rate = 0;
2183 }
2184 st->index = s->nb_streams;
2185 st->id = id;
2186 st->start_time = AV_NOPTS_VALUE;
2187 st->duration = AV_NOPTS_VALUE;
2188 st->cur_dts = AV_NOPTS_VALUE;
2189
2190 /* default pts settings is MPEG like */
2191 av_set_pts_info(st, 33, 1, 90000);
2192 st->last_IP_pts = AV_NOPTS_VALUE;
2193
2194 s->streams[s->nb_streams++] = st;
2195 return st;
2196 }
2197
2198 /************************************************************/
2199 /* output media file */
2200
2201 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2202 {
2203 int ret;
2204
2205 if (s->oformat->priv_data_size > 0) {
2206 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2207 if (!s->priv_data)
2208 return AVERROR_NOMEM;
2209 } else
2210 s->priv_data = NULL;
2211
2212 if (s->oformat->set_parameters) {
2213 ret = s->oformat->set_parameters(s, ap);
2214 if (ret < 0)
2215 return ret;
2216 }
2217 return 0;
2218 }
2219
2220 /**
2221 * allocate the stream private data and write the stream header to an
2222 * output media file
2223 *
2224 * @param s media file handle
2225 * @return 0 if OK. AVERROR_xxx if error.
2226 */
2227 int av_write_header(AVFormatContext *s)
2228 {
2229 int ret, i;
2230 AVStream *st;
2231
2232 // some sanity checks
2233 for(i=0;i<s->nb_streams;i++) {
2234 st = s->streams[i];
2235
2236 switch (st->codec->codec_type) {
2237 case CODEC_TYPE_AUDIO:
2238 if(st->codec->sample_rate<=0){
2239 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2240 return -1;
2241 }
2242 break;
2243 case CODEC_TYPE_VIDEO:
2244 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2245 av_log(s, AV_LOG_ERROR, "time base not set\n");
2246 return -1;
2247 }
2248 if(st->codec->width<=0 || st->codec->height<=0){
2249 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2250 return -1;
2251 }
2252 break;
2253 }
2254 }
2255
2256 if(s->oformat->write_header){
2257 ret = s->oformat->write_header(s);
2258 if (ret < 0)
2259 return ret;
2260 }
2261
2262 /* init PTS generation */
2263 for(i=0;i<s->nb_streams;i++) {
2264 int64_t den = AV_NOPTS_VALUE;
2265 st = s->streams[i];
2266
2267 switch (st->codec->codec_type) {
2268 case CODEC_TYPE_AUDIO:
2269 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2270 break;
2271 case CODEC_TYPE_VIDEO:
2272 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2273 break;
2274 default:
2275 break;
2276 }
2277 if (den != AV_NOPTS_VALUE) {
2278 if (den <= 0)
2279 return AVERROR_INVALIDDATA;
2280 av_frac_init(&st->pts, 0, 0, den);
2281 }
2282 }
2283 return 0;
2284 }
2285
2286 //FIXME merge with compute_pkt_fields
2287 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2288 int b_frames = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames);
2289 int num, den, frame_size;
2290
2291 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2292
2293 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2294 return -1;*/
2295
2296 /* duration field */
2297 if (pkt->duration == 0) {
2298 compute_frame_duration(&num, &den, st, NULL, pkt);
2299 if (den && num) {
2300 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2301 }
2302 }
2303
2304 //XXX/FIXME this is a temporary hack until all encoders output pts
2305 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2306 pkt->dts=
2307 // pkt->pts= st->cur_dts;
2308 pkt->pts= st->pts.val;
2309 }
2310
2311 //calculate dts from pts
2312 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2313 if(b_frames){
2314 if(st->last_IP_pts == AV_NOPTS_VALUE){
2315 st->last_IP_pts= -pkt->duration;
2316 }
2317 if(st->last_IP_pts < pkt->pts){
2318 pkt->dts= st->last_IP_pts;
2319 st->last_IP_pts= pkt->pts;
2320 }else
2321 pkt->dts= pkt->pts;
2322 }else
2323 pkt->dts= pkt->pts;
2324 }
2325
2326 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2327 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2328 return -1;
2329 }
2330 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2331 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2332 return -1;
2333 }
2334
2335 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2336 st->cur_dts= pkt->dts;
2337 st->pts.val= pkt->dts;
2338
2339 /* update pts */
2340 switch (st->codec->codec_type) {
2341 case CODEC_TYPE_AUDIO:
2342 frame_size = get_audio_frame_size(st->codec, pkt->size);
2343
2344 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2345 but it would be better if we had the real timestamps from the encoder */
2346 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2347 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2348 }
2349 break;
2350 case CODEC_TYPE_VIDEO:
2351 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2352 break;
2353 default:
2354 break;
2355 }
2356 return 0;
2357 }
2358
2359 static void truncate_ts(AVStream *st, AVPacket *pkt){
2360 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2361
2362 // if(pkt->dts < 0)
2363 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2364
2365 pkt->pts &= pts_mask;
2366 pkt->dts &= pts_mask;
2367 }
2368
2369 /**
2370 * Write a packet to an output media file.
2371 *
2372 * The packet shall contain one audio or video frame.
2373 *
2374 * @param s media file handle
2375 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2376 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2377 */
2378 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2379 {
2380 int ret;
2381
2382 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2383 if(ret<0)
2384 return ret;
2385
2386 truncate_ts(s->streams[pkt->stream_index], pkt);
2387
2388 ret= s->oformat->write_packet(s, pkt);
2389 if(!ret)
2390 ret= url_ferror(&s->pb);
2391 return ret;
2392 }
2393
2394 /**
2395 * interleave_packet implementation which will interleave per DTS.
2396 * packets with pkt->destruct == av_destruct_packet will be freed inside this function.
2397 * so they cannot be used after it, note calling av_free_packet() on them is still safe
2398 */
2399 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2400 AVPacketList *pktl, **next_point, *this_pktl;
2401 int stream_count=0;
2402 int streams[MAX_STREAMS];
2403
2404 if(pkt){
2405 AVStream *st= s->streams[ pkt->stream_index];
2406
2407 // assert(pkt->destruct != av_destruct_packet); //FIXME
2408
2409 this_pktl = av_mallocz(sizeof(AVPacketList));
2410 this_pktl->pkt= *pkt;
2411 if(pkt->destruct == av_destruct_packet)
2412 pkt->destruct= NULL; // non shared -> must keep original from being freed
2413 else
2414 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2415
2416 next_point = &s->packet_buffer;
2417 while(*next_point){
2418 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2419 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2420 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2421 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2422 break;
2423 next_point= &(*next_point)->next;
2424 }
2425 this_pktl->next= *next_point;
2426 *next_point= this_pktl;
2427 }
2428
2429 memset(streams, 0, sizeof(streams));
2430 pktl= s->packet_buffer;
2431 while(pktl){
2432 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2433 if(streams[ pktl->pkt.stream_index ] == 0)
2434 stream_count++;
2435 streams[ pktl->pkt.stream_index ]++;
2436 pktl= pktl->next;
2437 }
2438
2439 if(s->nb_streams == stream_count || (flush && stream_count)){
2440 pktl= s->packet_buffer;
2441 *out= pktl->pkt;
2442
2443 s->packet_buffer= pktl->next;
2444 av_freep(&pktl);
2445 return 1;
2446 }else{
2447 av_init_packet(out);
2448 return 0;
2449 }
2450 }
2451
2452 /**
2453 * Interleaves a AVPacket correctly so it can be muxed.
2454 * @param out the interleaved packet will be output here
2455 * @param in the input packet
2456 * @param flush 1 if no further packets are available as input and all
2457 * remaining packets should be output
2458 * @return 1 if a packet was output, 0 if no packet could be output,
2459 * < 0 if an error occured
2460 */
2461 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2462 if(s->oformat->interleave_packet)
2463 return s->oformat->interleave_packet(s, out, in, flush);
2464 else
2465 return av_interleave_packet_per_dts(s, out, in, flush);
2466 }
2467
2468 /**
2469 * Writes a packet to an output media file ensuring correct interleaving.
2470 *
2471 * The packet must contain one audio or video frame.
2472 * If the packets are already correctly interleaved the application should
2473 * call av_write_frame() instead as its slightly faster, its also important
2474 * to keep in mind that completly non interleaved input will need huge amounts
2475 * of memory to interleave with this, so its prefereable to interleave at the
2476 * demuxer level
2477 *
2478 * @param s media file handle
2479 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2480 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2481 */
2482 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2483 AVStream *st= s->streams[ pkt->stream_index];
2484
2485 //FIXME/XXX/HACK drop zero sized packets
2486 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2487 return 0;
2488
2489 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2490 if(compute_pkt_fields2(st, pkt) < 0)
2491 return -1;
2492
2493 if(pkt->dts == AV_NOPTS_VALUE)
2494 return -1;
2495
2496 for(;;){
2497 AVPacket opkt;
2498 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2499 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2500 return ret;
2501
2502 truncate_ts(s->streams[opkt.stream_index], &opkt);
2503 ret= s->oformat->write_packet(s, &opkt);
2504
2505 av_free_packet(&opkt);
2506 pkt= NULL;
2507
2508 if(ret<0)
2509 return ret;
2510 if(url_ferror(&s->pb))
2511 return url_ferror(&s->pb);
2512 }
2513 }
2514
2515 /**
2516 * @brief Write the stream trailer to an output media file and
2517 * free the file private data.
2518 *
2519 * @param s media file handle
2520 * @return 0 if OK. AVERROR_xxx if error.
2521 */
2522 int av_write_trailer(AVFormatContext *s)
2523 {
2524 int ret, i;
2525
2526 for(;;){
2527 AVPacket pkt;
2528 ret= av_interleave_packet(s, &pkt, NULL, 1);
2529 if(ret<0) //FIXME cleanup needed for ret<0 ?
2530 goto fail;
2531 if(!ret)
2532 break;
2533
2534 truncate_ts(s->streams[pkt.stream_index], &pkt);
2535 ret= s->oformat->write_packet(s, &pkt);
2536
2537 av_free_packet(&pkt);
2538
2539 if(ret<0)
2540 goto fail;
2541 if(url_ferror(&s->pb))
2542 goto fail;
2543 }
2544
2545 if(s->oformat->write_trailer)
2546 ret = s->oformat->write_trailer(s);
2547 fail:
2548 if(ret == 0)
2549 ret=url_ferror(&s->pb);
2550 for(i=0;i<s->nb_streams;i++)
2551 av_freep(&s->streams[i]->priv_data);
2552 av_freep(&s->priv_data);
2553 return ret;
2554 }
2555
2556 /* "user interface" functions */
2557
2558 void dump_format(AVFormatContext *ic,
2559 int index,
2560 const char *url,
2561 int is_output)
2562 {
2563 int i, flags;
2564 char buf[256];
2565
2566 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2567 is_output ? "Output" : "Input",
2568 index,
2569 is_output ? ic->oformat->name : ic->iformat->name,
2570 is_output ? "to" : "from", url);
2571 if (!is_output) {
2572 av_log(NULL, AV_LOG_INFO, " Duration: ");
2573 if (ic->duration != AV_NOPTS_VALUE) {
2574 int hours, mins, secs, us;
2575 secs = ic->duration / AV_TIME_BASE;
2576 us = ic->duration % AV_TIME_BASE;
2577 mins = secs / 60;
2578 secs %= 60;
2579 hours = mins / 60;
2580 mins %= 60;
2581 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2582 (10 * us) / AV_TIME_BASE);
2583 } else {
2584 av_log(NULL, AV_LOG_INFO, "N/A");
2585 }
2586 if (ic->start_time != AV_NOPTS_VALUE) {
2587 int secs, us;
2588 av_log(NULL, AV_LOG_INFO, ", start: ");
2589 secs = ic->start_time / AV_TIME_BASE;
2590 us = ic->start_time % AV_TIME_BASE;
2591 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2592 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2593 }
2594 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2595 if (ic->bit_rate) {
2596 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2597 } else {
2598 av_log(NULL, AV_LOG_INFO, "N/A");
2599 }
2600 av_log(NULL, AV_LOG_INFO, "\n");
2601 }
2602 for(i=0;i<ic->nb_streams;i++) {
2603 AVStream *st = ic->streams[i];
2604 int g= ff_gcd(st->time_base.num, st->time_base.den);
2605 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2606 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2607 /* the pid is an important information, so we display it */
2608 /* XXX: add a generic system */
2609 if (is_output)
2610 flags = ic->oformat->flags;
2611 else
2612 flags = ic->iformat->flags;
2613 if (flags & AVFMT_SHOW_IDS) {
2614 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2615 }
2616 if (strlen(st->language) > 0) {
2617 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2618 }
2619 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2620 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2621 av_log(NULL, AV_LOG_INFO, ", %5.2f fps", av_q2d(st->r_frame_rate));
2622 }
2623 av_log(NULL, AV_LOG_INFO, ": %s\n", buf);
2624 }
2625 }
2626
2627 typedef struct {
2628 const char *abv;
2629 int width, height;
2630 int frame_rate, frame_rate_base;
2631 } AbvEntry;
2632
2633 static AbvEntry frame_abvs[] = {
2634 { "ntsc", 720, 480, 30000, 1001 },
2635 { "pal", 720, 576, 25, 1 },
2636 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2637 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2638 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2639 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2640 { "film", 352, 240, 24, 1 },
2641 { "ntsc-film", 352, 240, 24000, 1001 },
2642 { "sqcif", 128, 96, 0, 0 },
2643 { "qcif", 176, 144, 0, 0 },
2644 { "cif", 352, 288, 0, 0 },
2645 { "4cif", 704, 576, 0, 0 },
2646 };
2647
2648 /**
2649 * parses width and height out of string str.
2650 */
2651 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2652 {
2653 int i;
2654 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2655 const char *p;
2656 int frame_width = 0, frame_height = 0;
2657
2658 for(i=0;i<n;i++) {
2659 if (!strcmp(frame_abvs[i].abv, str)) {
2660 frame_width = frame_abvs[i].width;
2661 frame_height = frame_abvs[i].height;
2662 break;
2663 }
2664 }
2665 if (i == n) {
2666 p = str;
2667 frame_width = strtol(p, (char **)&p, 10);
2668 if (*p)
2669 p++;
2670 frame_height = strtol(p, (char **)&p, 10);
2671 }
2672 if (frame_width <= 0 || frame_height <= 0)
2673 return -1;
2674 *width_ptr = frame_width;
2675 *height_ptr = frame_height;
2676 return 0;
2677 }
2678
2679 /**
2680 * Converts frame rate from string to a fraction.
2681 *
2682 * First we try to get an exact integer or fractional frame rate.
2683 * If this fails we convert the frame rate to a double and return
2684 * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
2685 */
2686 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2687 {
2688 int i;
2689 char* cp;
2690
2691 /* First, we check our abbreviation table */
2692 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2693 if (!strcmp(frame_abvs[i].abv, arg)) {
2694 *frame_rate = frame_abvs[i].frame_rate;
2695 *frame_rate_base = frame_abvs[i].frame_rate_base;
2696 return 0;
2697 }
2698
2699 /* Then, we try to parse it as fraction */
2700 cp = strchr(arg, '/');
2701 if (!cp)
2702 cp = strchr(arg, ':');
2703 if (cp) {
2704 char* cpp;
2705 *frame_rate = strtol(arg, &cpp, 10);
2706 if (cpp != arg || cpp == cp)
2707 *frame_rate_base = strtol(cp+1, &cpp, 10);
2708 else
2709 *frame_rate = 0;
2710 }
2711 else {
2712 /* Finally we give up and parse it as double */
2713 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2714 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2715 }
2716 if (!*frame_rate || !*frame_rate_base)
2717 return -1;
2718 else
2719 return 0;
2720 }
2721
2722 /**
2723 * Converts date string to number of seconds since Jan 1st, 1970.
2724 *
2725 * @code
2726 * Syntax:
2727 * - If not a duration:
2728 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2729 * Time is localtime unless Z is suffixed to the end. In this case GMT
2730 * Return the date in micro seconds since 1970
2731 *
2732 * - If a duration:
2733 * HH[:MM[:SS[.m...]]]
2734 * S+[.m...]
2735 * @endcode
2736 */
2737 int64_t parse_date(const char *datestr, int duration)
2738 {
2739 const char *p;
2740 int64_t t;
2741 struct tm dt;
2742 int i;
2743 static const char *date_fmt[] = {
2744 "%Y-%m-%d",
2745 "%Y%m%d",
2746 };
2747 static const char *time_fmt[] = {
2748 "%H:%M:%S",
2749 "%H%M%S",
2750 };
2751 const char *q;
2752 int is_utc, len;
2753 char lastch;
2754 int negative = 0;
2755
2756 #undef time
2757 time_t now = time(0);
2758
2759 len = strlen(datestr);
2760 if (len > 0)
2761 lastch = datestr[len - 1];
2762 else
2763 lastch = '\0';
2764 is_utc = (lastch == 'z' || lastch == 'Z');
2765
2766 memset(&dt, 0, sizeof(dt));
2767
2768 p = datestr;
2769 q = NULL;
2770 if (!duration) {
2771 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2772 q = small_strptime(p, date_fmt[i], &dt);
2773 if (q) {
2774 break;
2775 }
2776 }
2777
2778 if (!q) {
2779 if (is_utc) {
2780 dt = *gmtime(&now);
2781 } else {
2782 dt = *localtime(&now);
2783 }
2784 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2785 } else {
2786 p = q;
2787 }
2788
2789 if (*p == 'T' || *p == 't' || *p == ' ')
2790 p++;
2791
2792 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2793 q = small_strptime(p, time_fmt[i], &dt);
2794 if (q) {
2795 break;
2796 }
2797 }
2798 } else {
2799 if (p[0] == '-') {
2800 negative = 1;
2801 ++p;
2802 }
2803 q = small_strptime(p, time_fmt[0], &dt);
2804 if (!q) {
2805 dt.tm_sec = strtol(p, (char **)&q, 10);
2806 dt.tm_min = 0;
2807 dt.tm_hour = 0;
2808 }
2809 }
2810
2811 /* Now we have all the fields that we can get */
2812 if (!q) {
2813 if (duration)
2814 return 0;
2815 else
2816 return now * int64_t_C(1000000);
2817 }
2818
2819 if (duration) {
2820 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2821 } else {
2822 dt.tm_isdst = -1; /* unknown */
2823 if (is_utc) {
2824 t = mktimegm(&dt);
2825 } else {
2826 t = mktime(&dt);
2827 }
2828 }
2829
2830 t *= 1000000;
2831
2832 if (*q == '.') {
2833 int val, n;
2834 q++;
2835 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2836 if (!isdigit(*q))
2837 break;
2838 val += n * (*q - '0');
2839 }
2840 t += val;
2841 }
2842 return negative ? -t : t;
2843 }
2844
2845 /**
2846 * Attempts to find a specific tag in a URL.
2847 *
2848 * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
2849 * Return 1 if found.
2850 */
2851 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2852 {
2853 const char *p;
2854 char tag[128], *q;
2855
2856 p = info;
2857 if (*p == '?')
2858 p++;
2859 for(;;) {
2860 q = tag;
2861 while (*p != '\0' && *p != '=' && *p != '&') {
2862 if ((q - tag) < sizeof(tag) - 1)
2863 *q++ = *p;
2864 p++;
2865 }
2866 *q = '\0';
2867 q = arg;
2868 if (*p == '=') {
2869 p++;
2870 while (*p != '&' && *p != '\0') {
2871 if ((q - arg) < arg_size - 1) {
2872 if (*p == '+')
2873 *q++ = ' ';
2874 else
2875 *q++ = *p;
2876 }
2877 p++;
2878 }
2879 *q = '\0';
2880 }
2881 if (!strcmp(tag, tag1))
2882 return 1;
2883 if (*p != '&')
2884 break;
2885 p++;
2886 }
2887 return 0;
2888 }
2889
2890 /**
2891 * Returns in 'buf' the path with '%d' replaced by number.
2892 *
2893 * Also handles the '%0nd' format where 'n' is the total number
2894 * of digits and '%%'. Return 0 if OK, and -1 if format error.
2895 */
2896 int get_frame_filename(char *buf, int buf_size,
2897 const char *path, int number)
2898 {
2899 const char *p;
2900 char *q, buf1[20], c;
2901 int nd, len, percentd_found;
2902
2903 q = buf;
2904 p = path;
2905 percentd_found = 0;
2906 for(;;) {
2907 c = *p++;
2908 if (c == '\0')
2909 break;
2910 if (c == '%') {
2911 do {
2912 nd = 0;
2913 while (isdigit(*p)) {
2914 nd = nd * 10 + *p++ - '0';
2915 }
2916 c = *p++;
2917 } while (isdigit(c));
2918
2919 switch(c) {
2920 case '%':
2921 goto addchar;
2922 case 'd':
2923 if (percentd_found)
2924 goto fail;
2925 percentd_found = 1;
2926 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2927 len = strlen(buf1);
2928 if ((q - buf + len) > buf_size - 1)
2929 goto fail;
2930 memcpy(q, buf1, len);
2931 q += len;
2932 break;
2933 default:
2934 goto fail;
2935 }
2936 } else {
2937 addchar:
2938 if ((q - buf) < buf_size - 1)
2939 *q++ = c;
2940 }
2941 }
2942 if (!percentd_found)
2943 goto fail;
2944 *q = '\0';
2945 return 0;
2946 fail:
2947 *q = '\0';
2948 return -1;
2949 }
2950
2951 /**
2952 * Print nice hexa dump of a buffer
2953 * @param f stream for output
2954 * @param buf buffer
2955 * @param size buffer size
2956 */
2957 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2958 {
2959 int len, i, j, c;
2960
2961 for(i=0;i<size;i+=16) {
2962 len = size - i;
2963 if (len > 16)
2964 len = 16;
2965 fprintf(f, "%08x ", i);
2966 for(j=0;j<16;j++) {
2967 if (j < len)
2968 fprintf(f, " %02x", buf[i+j]);
2969 else
2970 fprintf(f, " ");
2971 }
2972 fprintf(f, " ");
2973 for(j=0;j<len;j++) {
2974 c = buf[i+j];
2975 if (c < ' ' || c > '~')
2976 c = '.';
2977 fprintf(f, "%c", c);
2978 }
2979 fprintf(f, "\n");
2980 }
2981 }
2982
2983 /**
2984 * Print on 'f' a nice dump of a packet
2985 * @param f stream for output
2986 * @param pkt packet to dump
2987 * @param dump_payload true if the payload must be displayed too
2988 */
2989 //FIXME needs to know the time_base
2990 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2991 {
2992 fprintf(f, "stream #%d:\n", pkt->stream_index);
2993 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2994 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2995 /* DTS is _always_ valid after av_read_frame() */
2996 fprintf(f, " dts=");
2997 if (pkt->dts == AV_NOPTS_VALUE)
2998 fprintf(f, "N/A");
2999 else
3000 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
3001 /* PTS may be not known if B frames are present */
3002 fprintf(f, " pts=");
3003 if (pkt->pts == AV_NOPTS_VALUE)
3004 fprintf(f, "N/A");
3005 else
3006 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
3007 fprintf(f, "\n");
3008 fprintf(f, " size=%d\n", pkt->size);
3009 if (dump_payload)
3010 av_hex_dump(f, pkt->data, pkt->size);
3011 }
3012
3013 void url_split(char *proto, int proto_size,
3014 char *authorization, int authorization_size,
3015 char *hostname, int hostname_size,
3016 int *port_ptr,
3017 char *path, int path_size,
3018 const char *url)
3019 {
3020 const char *p;
3021 char *q;
3022 int port;
3023
3024 port = -1;
3025
3026 p = url;
3027 q = proto;
3028 while (*p != ':' && *p != '\0') {
3029 if ((q - proto) < proto_size - 1)
3030 *q++ = *p;
3031 p++;
3032 }
3033 if (proto_size > 0)
3034 *q = '\0';
3035 if (authorization_size > 0)
3036 authorization[0] = '\0';
3037 if (*p == '\0') {
3038 if (proto_size > 0)
3039 proto[0] = '\0';
3040 if (hostname_size > 0)
3041 hostname[0] = '\0';
3042 p = url;
3043 } else {
3044 char *at,*slash; // PETR: position of '@' character and '/' character
3045
3046 p++;
3047 if (*p == '/')
3048 p++;
3049 if (*p == '/')
3050 p++;
3051 at = strchr(p,'@'); // PETR: get the position of '@'
3052 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
3053 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
3054
3055 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
3056
3057 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
3058 if (*p == '@') { // PETR: passed '@'
3059 if (authorization_size > 0)
3060 *q = '\0';
3061 q = hostname;
3062 at = NULL;
3063 } else if (!at) { // PETR: hostname
3064 if ((q - hostname) < hostname_size - 1)
3065 *q++ = *p;
3066 } else {
3067 if ((q - authorization) < authorization_size - 1)
3068 *q++ = *p;
3069 }
3070 p++;
3071 }
3072 if (hostname_size > 0)
3073 *q = '\0';
3074 if (*p == ':') {
3075 p++;
3076 port = strtoul(p, (char **)&p, 10);
3077 }
3078 }
3079 if (port_ptr)
3080 *port_ptr = port;
3081 pstrcpy(path, path_size, p);
3082 }
3083
3084 /**
3085 * Set the pts for a given stream.
3086 *
3087 * @param s stream
3088 * @param pts_wrap_bits number of bits effectively used by the pts
3089 * (used for wrap control, 33 is the value for MPEG)
3090 * @param pts_num numerator to convert to seconds (MPEG: 1)
3091 * @param pts_den denominator to convert to seconds (MPEG: 90000)
3092 */
3093 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3094 int pts_num, int pts_den)
3095 {
3096 s->pts_wrap_bits = pts_wrap_bits;
3097 s->time_base.num = pts_num;
3098 s->time_base.den = pts_den;
3099 }
3100
3101 /* fraction handling */
3102
3103 /**
3104 * f = val + (num / den) + 0.5.
3105 *
3106 * 'num' is normalized so that it is such as 0 <= num < den.
3107 *
3108 * @param f fractional number
3109 * @param val integer value
3110 * @param num must be >= 0
3111 * @param den must be >= 1
3112 */
3113 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3114 {
3115 num += (den >> 1);
3116 if (num >= den) {
3117 val += num / den;
3118 num = num % den;
3119 }
3120 f->val = val;
3121 f->num = num;
3122 f->den = den;
3123 }
3124
3125 /**
3126 * Set f to (val + 0.5).
3127 */
3128 void av_frac_set(AVFrac *f, int64_t val)
3129 {
3130 f->val = val;
3131 f->num = f->den >> 1;
3132 }
3133
3134 /**
3135 * Fractionnal addition to f: f = f + (incr / f->den).
3136 *
3137 * @param f fractional number
3138 * @param incr increment, can be positive or negative
3139 */
3140 void av_frac_add(AVFrac *f, int64_t incr)
3141 {
3142 int64_t num, den;
3143
3144 num = f->num + incr;
3145 den = f->den;
3146 if (num < 0) {
3147 f->val += num / den;
3148 num = num % den;
3149 if (num < 0) {
3150 num += den;
3151 f->val--;
3152 }
3153 } else if (num >= den) {
3154 f->val += num / den;
3155 num = num % den;
3156 }
3157 f->num = num;
3158 }
3159
3160 /**
3161 * register a new image format
3162 * @param img_fmt Image format descriptor
3163 */
3164 void av_register_image_format(AVImageFormat *img_fmt)
3165 {
3166 AVImageFormat **p;
3167
3168 p = &first_image_format;
3169 while (*p != NULL) p = &(*p)->next;
3170 *p = img_fmt;
3171 img_fmt->next = NULL;
3172 }
3173
3174 /**
3175 * Guesses image format based on data in the image.
3176 */
3177 AVImageFormat *av_probe_image_format(AVProbeData *pd)
3178 {
3179 AVImageFormat *fmt1, *fmt;
3180 int score, score_max;
3181
3182 fmt = NULL;
3183 score_max = 0;
3184 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3185 if (fmt1->img_probe) {
3186 score = fmt1->img_probe(pd);
3187 if (score > score_max) {
3188 score_max = score;
3189 fmt = fmt1;
3190 }
3191 }
3192 }
3193 return fmt;
3194 }
3195
3196 /**
3197 * Guesses image format based on file name extensions.
3198 */
3199 AVImageFormat *guess_image_format(const char *filename)
3200 {
3201 AVImageFormat *fmt1;
3202
3203 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3204 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
3205 return fmt1;
3206 }
3207 return NULL;
3208 }
3209
3210 /**
3211 * Read an image from a stream.
3212 * @param gb byte stream containing the image
3213 * @param fmt image format, NULL if probing is required
3214 */
3215 int av_read_image(ByteIOContext *pb, const char *filename,
3216 AVImageFormat *fmt,
3217 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
3218 {
3219 char buf[PROBE_BUF_SIZE];
3220 AVProbeData probe_data, *pd = &probe_data;
3221 offset_t pos;
3222 int ret;
3223
3224 if (!fmt) {
3225 pd->filename = filename;
3226 pd->buf = buf;
3227 pos = url_ftell(pb);
3228 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
3229 url_fseek(pb, pos, SEEK_SET);
3230 fmt = av_probe_image_format(pd);
3231 }
3232 if (!fmt)
3233 return AVERROR_NOFMT;
3234 ret = fmt->img_read(pb, alloc_cb, opaque);
3235 return ret;
3236 }
3237
3238 /**
3239 * Write an image to a stream.
3240 * @param pb byte stream for the image output
3241 * @param fmt image format
3242 * @param img image data and informations
3243 */
3244 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
3245 {
3246 return fmt->img_write(pb, img);
3247 }
3248