changing AVCodecContext codec -> *codec in AVStream so additions to AVCodecContext...
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #include "avformat.h"
20
21 #undef NDEBUG
22 #include <assert.h>
23
24 /**
25 * @file libavformat/utils.c
26 * Various utility functions for using ffmpeg library.
27 */
28
29 /** head of registered input format linked list. */
30 AVInputFormat *first_iformat = NULL;
31 /** head of registered output format linked list. */
32 AVOutputFormat *first_oformat = NULL;
33 /** head of registered image format linked list. */
34 AVImageFormat *first_image_format = NULL;
35
36 void av_register_input_format(AVInputFormat *format)
37 {
38 AVInputFormat **p;
39 p = &first_iformat;
40 while (*p != NULL) p = &(*p)->next;
41 *p = format;
42 format->next = NULL;
43 }
44
45 void av_register_output_format(AVOutputFormat *format)
46 {
47 AVOutputFormat **p;
48 p = &first_oformat;
49 while (*p != NULL) p = &(*p)->next;
50 *p = format;
51 format->next = NULL;
52 }
53
54 int match_ext(const char *filename, const char *extensions)
55 {
56 const char *ext, *p;
57 char ext1[32], *q;
58
59 if(!filename)
60 return 0;
61
62 ext = strrchr(filename, '.');
63 if (ext) {
64 ext++;
65 p = extensions;
66 for(;;) {
67 q = ext1;
68 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
69 *q++ = *p++;
70 *q = '\0';
71 if (!strcasecmp(ext1, ext))
72 return 1;
73 if (*p == '\0')
74 break;
75 p++;
76 }
77 }
78 return 0;
79 }
80
81 AVOutputFormat *guess_format(const char *short_name, const char *filename,
82 const char *mime_type)
83 {
84 AVOutputFormat *fmt, *fmt_found;
85 int score_max, score;
86
87 /* specific test for image sequences */
88 if (!short_name && filename &&
89 filename_number_test(filename) >= 0 &&
90 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
91 return guess_format("image2", NULL, NULL);
92 }
93 if (!short_name && filename &&
94 filename_number_test(filename) >= 0 &&
95 guess_image_format(filename)) {
96 return guess_format("image", NULL, NULL);
97 }
98
99 /* find the proper file type */
100 fmt_found = NULL;
101 score_max = 0;
102 fmt = first_oformat;
103 while (fmt != NULL) {
104 score = 0;
105 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
106 score += 100;
107 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
108 score += 10;
109 if (filename && fmt->extensions &&
110 match_ext(filename, fmt->extensions)) {
111 score += 5;
112 }
113 if (score > score_max) {
114 score_max = score;
115 fmt_found = fmt;
116 }
117 fmt = fmt->next;
118 }
119 return fmt_found;
120 }
121
122 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
123 const char *mime_type)
124 {
125 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
126
127 if (fmt) {
128 AVOutputFormat *stream_fmt;
129 char stream_format_name[64];
130
131 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
132 stream_fmt = guess_format(stream_format_name, NULL, NULL);
133
134 if (stream_fmt)
135 fmt = stream_fmt;
136 }
137
138 return fmt;
139 }
140
141 /**
142 * Guesses the codec id based upon muxer and filename.
143 */
144 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
145 const char *filename, const char *mime_type, enum CodecType type){
146 if(type == CODEC_TYPE_VIDEO){
147 enum CodecID codec_id= CODEC_ID_NONE;
148
149 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
150 codec_id= av_guess_image2_codec(filename);
151 }
152 if(codec_id == CODEC_ID_NONE)
153 codec_id= fmt->video_codec;
154 return codec_id;
155 }else if(type == CODEC_TYPE_AUDIO)
156 return fmt->audio_codec;
157 else
158 return CODEC_ID_NONE;
159 }
160
161 /**
162 * finds AVInputFormat based on input format's short name.
163 */
164 AVInputFormat *av_find_input_format(const char *short_name)
165 {
166 AVInputFormat *fmt;
167 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
168 if (!strcmp(fmt->name, short_name))
169 return fmt;
170 }
171 return NULL;
172 }
173
174 /* memory handling */
175
176 /**
177 * Default packet destructor.
178 */
179 void av_destruct_packet(AVPacket *pkt)
180 {
181 av_free(pkt->data);
182 pkt->data = NULL; pkt->size = 0;
183 }
184
185 /**
186 * Allocate the payload of a packet and intialized its fields to default values.
187 *
188 * @param pkt packet
189 * @param size wanted payload size
190 * @return 0 if OK. AVERROR_xxx otherwise.
191 */
192 int av_new_packet(AVPacket *pkt, int size)
193 {
194 void *data;
195 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
196 return AVERROR_NOMEM;
197 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
198 if (!data)
199 return AVERROR_NOMEM;
200 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
201
202 av_init_packet(pkt);
203 pkt->data = data;
204 pkt->size = size;
205 pkt->destruct = av_destruct_packet;
206 return 0;
207 }
208
209 /**
210 * Allocate and read the payload of a packet and intialized its fields to default values.
211 *
212 * @param pkt packet
213 * @param size wanted payload size
214 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
215 */
216 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
217 {
218 int ret= av_new_packet(pkt, size);
219
220 if(ret<0)
221 return ret;
222
223 pkt->pos= url_ftell(s);
224
225 ret= get_buffer(s, pkt->data, size);
226 if(ret<=0)
227 av_free_packet(pkt);
228 else
229 pkt->size= ret;
230
231 return ret;
232 }
233
234 /* This is a hack - the packet memory allocation stuff is broken. The
235 packet is allocated if it was not really allocated */
236 int av_dup_packet(AVPacket *pkt)
237 {
238 if (pkt->destruct != av_destruct_packet) {
239 uint8_t *data;
240 /* we duplicate the packet and don't forget to put the padding
241 again */
242 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
243 return AVERROR_NOMEM;
244 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
245 if (!data) {
246 return AVERROR_NOMEM;
247 }
248 memcpy(data, pkt->data, pkt->size);
249 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
250 pkt->data = data;
251 pkt->destruct = av_destruct_packet;
252 }
253 return 0;
254 }
255
256 /* fifo handling */
257
258 int fifo_init(FifoBuffer *f, int size)
259 {
260 f->buffer = av_malloc(size);
261 if (!f->buffer)
262 return -1;
263 f->end = f->buffer + size;
264 f->wptr = f->rptr = f->buffer;
265 return 0;
266 }
267
268 void fifo_free(FifoBuffer *f)
269 {
270 av_free(f->buffer);
271 }
272
273 int fifo_size(FifoBuffer *f, uint8_t *rptr)
274 {
275 int size;
276
277 if(!rptr)
278 rptr= f->rptr;
279
280 if (f->wptr >= rptr) {
281 size = f->wptr - rptr;
282 } else {
283 size = (f->end - rptr) + (f->wptr - f->buffer);
284 }
285 return size;
286 }
287
288 /**
289 * Get data from the fifo (returns -1 if not enough data).
290 */
291 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
292 {
293 uint8_t *rptr;
294 int size, len;
295
296 if(!rptr_ptr)
297 rptr_ptr= &f->rptr;
298 rptr = *rptr_ptr;
299
300 if (f->wptr >= rptr) {
301 size = f->wptr - rptr;
302 } else {
303 size = (f->end - rptr) + (f->wptr - f->buffer);
304 }
305
306 if (size < buf_size)
307 return -1;
308 while (buf_size > 0) {
309 len = f->end - rptr;
310 if (len > buf_size)
311 len = buf_size;
312 memcpy(buf, rptr, len);
313 buf += len;
314 rptr += len;
315 if (rptr >= f->end)
316 rptr = f->buffer;
317 buf_size -= len;
318 }
319 *rptr_ptr = rptr;
320 return 0;
321 }
322
323 /**
324 * Resizes a FIFO.
325 */
326 void fifo_realloc(FifoBuffer *f, unsigned int new_size){
327 unsigned int old_size= f->end - f->buffer;
328
329 if(old_size < new_size){
330 uint8_t *old= f->buffer;
331
332 f->buffer= av_realloc(f->buffer, new_size);
333
334 f->rptr += f->buffer - old;
335 f->wptr += f->buffer - old;
336
337 if(f->wptr < f->rptr){
338 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
339 f->rptr += new_size - old_size;
340 }
341 f->end= f->buffer + new_size;
342 }
343 }
344
345 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
346 {
347 int len;
348 uint8_t *wptr;
349
350 if(!wptr_ptr)
351 wptr_ptr= &f->wptr;
352 wptr = *wptr_ptr;
353
354 while (size > 0) {
355 len = f->end - wptr;
356 if (len > size)
357 len = size;
358 memcpy(wptr, buf, len);
359 wptr += len;
360 if (wptr >= f->end)
361 wptr = f->buffer;
362 buf += len;
363 size -= len;
364 }
365 *wptr_ptr = wptr;
366 }
367
368 /* get data from the fifo (return -1 if not enough data) */
369 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
370 {
371 uint8_t *rptr = *rptr_ptr;
372 int size, len;
373
374 if (f->wptr >= rptr) {
375 size = f->wptr - rptr;
376 } else {
377 size = (f->end - rptr) + (f->wptr - f->buffer);
378 }
379
380 if (size < buf_size)
381 return -1;
382 while (buf_size > 0) {
383 len = f->end - rptr;
384 if (len > buf_size)
385 len = buf_size;
386 put_buffer(pb, rptr, len);
387 rptr += len;
388 if (rptr >= f->end)
389 rptr = f->buffer;
390 buf_size -= len;
391 }
392 *rptr_ptr = rptr;
393 return 0;
394 }
395
396 int filename_number_test(const char *filename)
397 {
398 char buf[1024];
399 if(!filename)
400 return -1;
401 return get_frame_filename(buf, sizeof(buf), filename, 1);
402 }
403
404 /**
405 * Guess file format.
406 */
407 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
408 {
409 AVInputFormat *fmt1, *fmt;
410 int score, score_max;
411
412 fmt = NULL;
413 score_max = 0;
414 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
415 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
416 continue;
417 score = 0;
418 if (fmt1->read_probe) {
419 score = fmt1->read_probe(pd);
420 } else if (fmt1->extensions) {
421 if (match_ext(pd->filename, fmt1->extensions)) {
422 score = 50;
423 }
424 }
425 if (score > score_max) {
426 score_max = score;
427 fmt = fmt1;
428 }
429 }
430 return fmt;
431 }
432
433 /************************************************************/
434 /* input media file */
435
436 /**
437 * Open a media file from an IO stream. 'fmt' must be specified.
438 */
439 static const char* format_to_name(void* ptr)
440 {
441 AVFormatContext* fc = (AVFormatContext*) ptr;
442 if(fc->iformat) return fc->iformat->name;
443 else if(fc->oformat) return fc->oformat->name;
444 else return "NULL";
445 }
446
447 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
448
449 AVFormatContext *av_alloc_format_context(void)
450 {
451 AVFormatContext *ic;
452 ic = av_mallocz(sizeof(AVFormatContext));
453 if (!ic) return ic;
454 ic->av_class = &av_format_context_class;
455 return ic;
456 }
457
458 /**
459 * Allocates all the structures needed to read an input stream.
460 * This does not open the needed codecs for decoding the stream[s].
461 */
462 int av_open_input_stream(AVFormatContext **ic_ptr,
463 ByteIOContext *pb, const char *filename,
464 AVInputFormat *fmt, AVFormatParameters *ap)
465 {
466 int err;
467 AVFormatContext *ic;
468
469 ic = av_alloc_format_context();
470 if (!ic) {
471 err = AVERROR_NOMEM;
472 goto fail;
473 }
474 ic->iformat = fmt;
475 if (pb)
476 ic->pb = *pb;
477 ic->duration = AV_NOPTS_VALUE;
478 ic->start_time = AV_NOPTS_VALUE;
479 pstrcpy(ic->filename, sizeof(ic->filename), filename);
480
481 /* allocate private data */
482 if (fmt->priv_data_size > 0) {
483 ic->priv_data = av_mallocz(fmt->priv_data_size);
484 if (!ic->priv_data) {
485 err = AVERROR_NOMEM;
486 goto fail;
487 }
488 } else {
489 ic->priv_data = NULL;
490 }
491
492 err = ic->iformat->read_header(ic, ap);
493 if (err < 0)
494 goto fail;
495
496 if (pb)
497 ic->data_offset = url_ftell(&ic->pb);
498
499 *ic_ptr = ic;
500 return 0;
501 fail:
502 if (ic) {
503 av_freep(&ic->priv_data);
504 }
505 av_free(ic);
506 *ic_ptr = NULL;
507 return err;
508 }
509
510 /** Size of probe buffer, for guessing file type from file contents. */
511 #define PROBE_BUF_SIZE 2048
512
513 /**
514 * Open a media file as input. The codec are not opened. Only the file
515 * header (if present) is read.
516 *
517 * @param ic_ptr the opened media file handle is put here
518 * @param filename filename to open.
519 * @param fmt if non NULL, force the file format to use
520 * @param buf_size optional buffer size (zero if default is OK)
521 * @param ap additionnal parameters needed when opening the file (NULL if default)
522 * @return 0 if OK. AVERROR_xxx otherwise.
523 */
524 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
525 AVInputFormat *fmt,
526 int buf_size,
527 AVFormatParameters *ap)
528 {
529 int err, must_open_file, file_opened;
530 uint8_t buf[PROBE_BUF_SIZE];
531 AVProbeData probe_data, *pd = &probe_data;
532 ByteIOContext pb1, *pb = &pb1;
533
534 file_opened = 0;
535 pd->filename = "";
536 if (filename)
537 pd->filename = filename;
538 pd->buf = buf;
539 pd->buf_size = 0;
540
541 if (!fmt) {
542 /* guess format if no file can be opened */
543 fmt = av_probe_input_format(pd, 0);
544 }
545
546 /* do not open file if the format does not need it. XXX: specific
547 hack needed to handle RTSP/TCP */
548 must_open_file = 1;
549 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
550 must_open_file = 0;
551 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
552 }
553
554 if (!fmt || must_open_file) {
555 /* if no file needed do not try to open one */
556 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
557 err = AVERROR_IO;
558 goto fail;
559 }
560 file_opened = 1;
561 if (buf_size > 0) {
562 url_setbufsize(pb, buf_size);
563 }
564 if (!fmt) {
565 /* read probe data */
566 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
567 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
568 url_fclose(pb);
569 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
570 err = AVERROR_IO;
571 goto fail;
572 }
573 }
574 }
575 }
576
577 /* guess file format */
578 if (!fmt) {
579 fmt = av_probe_input_format(pd, 1);
580 }
581
582 /* if still no format found, error */
583 if (!fmt) {
584 err = AVERROR_NOFMT;
585 goto fail;
586 }
587
588 /* XXX: suppress this hack for redirectors */
589 #ifdef CONFIG_NETWORK
590 if (fmt == &redir_demux) {
591 err = redir_open(ic_ptr, pb);
592 url_fclose(pb);
593 return err;
594 }
595 #endif
596
597 /* check filename in case of an image number is expected */
598 if (fmt->flags & AVFMT_NEEDNUMBER) {
599 if (filename_number_test(filename) < 0) {
600 err = AVERROR_NUMEXPECTED;
601 goto fail;
602 }
603 }
604 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
605 if (err)
606 goto fail;
607 return 0;
608 fail:
609 if (file_opened)
610 url_fclose(pb);
611 *ic_ptr = NULL;
612 return err;
613
614 }
615
616 /*******************************************************/
617
618 /**
619 * Read a transport packet from a media file.
620 *
621 * This function is absolete and should never be used.
622 * Use av_read_frame() instead.
623 *
624 * @param s media file handle
625 * @param pkt is filled
626 * @return 0 if OK. AVERROR_xxx if error.
627 */
628 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
629 {
630 return s->iformat->read_packet(s, pkt);
631 }
632
633 /**********************************************************/
634
635 /**
636 * Get the number of samples of an audio frame. Return (-1) if error.
637 */
638 static int get_audio_frame_size(AVCodecContext *enc, int size)
639 {
640 int frame_size;
641
642 if (enc->frame_size <= 1) {
643 /* specific hack for pcm codecs because no frame size is
644 provided */
645 switch(enc->codec_id) {
646 case CODEC_ID_PCM_S16LE:
647 case CODEC_ID_PCM_S16BE:
648 case CODEC_ID_PCM_U16LE:
649 case CODEC_ID_PCM_U16BE:
650 if (enc->channels == 0)
651 return -1;
652 frame_size = size / (2 * enc->channels);
653 break;
654 case CODEC_ID_PCM_S8:
655 case CODEC_ID_PCM_U8:
656 case CODEC_ID_PCM_MULAW:
657 case CODEC_ID_PCM_ALAW:
658 if (enc->channels == 0)
659 return -1;
660 frame_size = size / (enc->channels);
661 break;
662 default:
663 /* used for example by ADPCM codecs */
664 if (enc->bit_rate == 0)
665 return -1;
666 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
667 break;
668 }
669 } else {
670 frame_size = enc->frame_size;
671 }
672 return frame_size;
673 }
674
675
676 /**
677 * Return the frame duration in seconds, return 0 if not available.
678 */
679 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
680 AVCodecParserContext *pc, AVPacket *pkt)
681 {
682 int frame_size;
683
684 *pnum = 0;
685 *pden = 0;
686 switch(st->codec->codec_type) {
687 case CODEC_TYPE_VIDEO:
688 if(st->time_base.num*1000LL > st->time_base.den){
689 *pnum = st->time_base.num;
690 *pden = st->time_base.den;
691 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
692 *pnum = st->codec->time_base.num;
693 *pden = st->codec->time_base.den;
694 if (pc && pc->repeat_pict) {
695 *pden *= 2;
696 *pnum = (*pnum) * (2 + pc->repeat_pict);
697 }
698 }
699 break;
700 case CODEC_TYPE_AUDIO:
701 frame_size = get_audio_frame_size(st->codec, pkt->size);
702 if (frame_size < 0)
703 break;
704 *pnum = frame_size;
705 *pden = st->codec->sample_rate;
706 break;
707 default:
708 break;
709 }
710 }
711
712 static int is_intra_only(AVCodecContext *enc){
713 if(enc->codec_type == CODEC_TYPE_AUDIO){
714 return 1;
715 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
716 switch(enc->codec_id){
717 case CODEC_ID_MJPEG:
718 case CODEC_ID_MJPEGB:
719 case CODEC_ID_LJPEG:
720 case CODEC_ID_RAWVIDEO:
721 case CODEC_ID_DVVIDEO:
722 case CODEC_ID_HUFFYUV:
723 case CODEC_ID_FFVHUFF:
724 case CODEC_ID_ASV1:
725 case CODEC_ID_ASV2:
726 case CODEC_ID_VCR1:
727 return 1;
728 default: break;
729 }
730 }
731 return 0;
732 }
733
734 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
735 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
736 int64_t delta= last_ts - mask/2;
737 return ((lsb - delta)&mask) + delta;
738 }
739
740 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
741 AVCodecParserContext *pc, AVPacket *pkt)
742 {
743 int num, den, presentation_delayed;
744 /* handle wrapping */
745 if(st->cur_dts != AV_NOPTS_VALUE){
746 if(pkt->pts != AV_NOPTS_VALUE)
747 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
748 if(pkt->dts != AV_NOPTS_VALUE)
749 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
750 }
751
752 if (pkt->duration == 0) {
753 compute_frame_duration(&num, &den, st, pc, pkt);
754 if (den && num) {
755 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
756 }
757 }
758
759 if(is_intra_only(st->codec))
760 pkt->flags |= PKT_FLAG_KEY;
761
762 /* do we have a video B frame ? */
763 presentation_delayed = 0;
764 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
765 /* XXX: need has_b_frame, but cannot get it if the codec is
766 not initialized */
767 if (( st->codec->codec_id == CODEC_ID_H264
768 || st->codec->has_b_frames) &&
769 pc && pc->pict_type != FF_B_TYPE)
770 presentation_delayed = 1;
771 /* this may be redundant, but it shouldnt hurt */
772 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
773 presentation_delayed = 1;
774 }
775
776 if(st->cur_dts == AV_NOPTS_VALUE){
777 if(presentation_delayed) st->cur_dts = -pkt->duration;
778 else st->cur_dts = 0;
779 }
780
781 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
782 /* interpolate PTS and DTS if they are not present */
783 if (presentation_delayed) {
784 /* DTS = decompression time stamp */
785 /* PTS = presentation time stamp */
786 if (pkt->dts == AV_NOPTS_VALUE) {
787 /* if we know the last pts, use it */
788 if(st->last_IP_pts != AV_NOPTS_VALUE)
789 st->cur_dts = pkt->dts = st->last_IP_pts;
790 else
791 pkt->dts = st->cur_dts;
792 } else {
793 st->cur_dts = pkt->dts;
794 }
795 /* this is tricky: the dts must be incremented by the duration
796 of the frame we are displaying, i.e. the last I or P frame */
797 if (st->last_IP_duration == 0)
798 st->cur_dts += pkt->duration;
799 else
800 st->cur_dts += st->last_IP_duration;
801 st->last_IP_duration = pkt->duration;
802 st->last_IP_pts= pkt->pts;
803 /* cannot compute PTS if not present (we can compute it only
804 by knowing the futur */
805 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
806 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
807 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
808 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
809 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
810 pkt->pts += pkt->duration;
811 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
812 }
813 }
814
815 /* presentation is not delayed : PTS and DTS are the same */
816 if (pkt->pts == AV_NOPTS_VALUE) {
817 if (pkt->dts == AV_NOPTS_VALUE) {
818 pkt->pts = st->cur_dts;
819 pkt->dts = st->cur_dts;
820 }
821 else {
822 st->cur_dts = pkt->dts;
823 pkt->pts = pkt->dts;
824 }
825 } else {
826 st->cur_dts = pkt->pts;
827 pkt->dts = pkt->pts;
828 }
829 st->cur_dts += pkt->duration;
830 }
831 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
832
833 /* update flags */
834 if (pc) {
835 pkt->flags = 0;
836 /* key frame computation */
837 switch(st->codec->codec_type) {
838 case CODEC_TYPE_VIDEO:
839 if (pc->pict_type == FF_I_TYPE)
840 pkt->flags |= PKT_FLAG_KEY;
841 break;
842 case CODEC_TYPE_AUDIO:
843 pkt->flags |= PKT_FLAG_KEY;
844 break;
845 default:
846 break;
847 }
848 }
849 }
850
851 void av_destruct_packet_nofree(AVPacket *pkt)
852 {
853 pkt->data = NULL; pkt->size = 0;
854 }
855
856 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
857 {
858 AVStream *st;
859 int len, ret, i;
860
861 for(;;) {
862 /* select current input stream component */
863 st = s->cur_st;
864 if (st) {
865 if (!st->need_parsing || !st->parser) {
866 /* no parsing needed: we just output the packet as is */
867 /* raw data support */
868 *pkt = s->cur_pkt;
869 compute_pkt_fields(s, st, NULL, pkt);
870 s->cur_st = NULL;
871 return 0;
872 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
873 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
874 s->cur_ptr, s->cur_len,
875 s->cur_pkt.pts, s->cur_pkt.dts);
876 s->cur_pkt.pts = AV_NOPTS_VALUE;
877 s->cur_pkt.dts = AV_NOPTS_VALUE;
878 /* increment read pointer */
879 s->cur_ptr += len;
880 s->cur_len -= len;
881
882 /* return packet if any */
883 if (pkt->size) {
884 got_packet:
885 pkt->duration = 0;
886 pkt->stream_index = st->index;
887 pkt->pts = st->parser->pts;
888 pkt->dts = st->parser->dts;
889 pkt->destruct = av_destruct_packet_nofree;
890 compute_pkt_fields(s, st, st->parser, pkt);
891 return 0;
892 }
893 } else {
894 /* free packet */
895 av_free_packet(&s->cur_pkt);
896 s->cur_st = NULL;
897 }
898 } else {
899 /* read next packet */
900 ret = av_read_packet(s, &s->cur_pkt);
901 if (ret < 0) {
902 if (ret == -EAGAIN)
903 return ret;
904 /* return the last frames, if any */
905 for(i = 0; i < s->nb_streams; i++) {
906 st = s->streams[i];
907 if (st->parser && st->need_parsing) {
908 av_parser_parse(st->parser, st->codec,
909 &pkt->data, &pkt->size,
910 NULL, 0,
911 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
912 if (pkt->size)
913 goto got_packet;
914 }
915 }
916 /* no more packets: really terminates parsing */
917 return ret;
918 }
919
920 st = s->streams[s->cur_pkt.stream_index];
921
922 s->cur_st = st;
923 s->cur_ptr = s->cur_pkt.data;
924 s->cur_len = s->cur_pkt.size;
925 if (st->need_parsing && !st->parser) {
926 st->parser = av_parser_init(st->codec->codec_id);
927 if (!st->parser) {
928 /* no parser available : just output the raw packets */
929 st->need_parsing = 0;
930 }
931 }
932 }
933 }
934 }
935
936 /**
937 * Return the next frame of a stream.
938 *
939 * The returned packet is valid
940 * until the next av_read_frame() or until av_close_input_file() and
941 * must be freed with av_free_packet. For video, the packet contains
942 * exactly one frame. For audio, it contains an integer number of
943 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
944 * data). If the audio frames have a variable size (e.g. MPEG audio),
945 * then it contains one frame.
946 *
947 * pkt->pts, pkt->dts and pkt->duration are always set to correct
948 * values in AV_TIME_BASE unit (and guessed if the format cannot
949 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
950 * has B frames, so it is better to rely on pkt->dts if you do not
951 * decompress the payload.
952 *
953 * @return 0 if OK, < 0 if error or end of file.
954 */
955 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
956 {
957 AVPacketList *pktl;
958
959 pktl = s->packet_buffer;
960 if (pktl) {
961 /* read packet from packet buffer, if there is data */
962 *pkt = pktl->pkt;
963 s->packet_buffer = pktl->next;
964 av_free(pktl);
965 return 0;
966 } else {
967 return av_read_frame_internal(s, pkt);
968 }
969 }
970
971 /* XXX: suppress the packet queue */
972 static void flush_packet_queue(AVFormatContext *s)
973 {
974 AVPacketList *pktl;
975
976 for(;;) {
977 pktl = s->packet_buffer;
978 if (!pktl)
979 break;
980 s->packet_buffer = pktl->next;
981 av_free_packet(&pktl->pkt);
982 av_free(pktl);
983 }
984 }
985
986 /*******************************************************/
987 /* seek support */
988
989 int av_find_default_stream_index(AVFormatContext *s)
990 {
991 int i;
992 AVStream *st;
993
994 if (s->nb_streams <= 0)
995 return -1;
996 for(i = 0; i < s->nb_streams; i++) {
997 st = s->streams[i];
998 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
999 return i;
1000 }
1001 }
1002 return 0;
1003 }
1004
1005 /**
1006 * Flush the frame reader.
1007 */
1008 static void av_read_frame_flush(AVFormatContext *s)
1009 {
1010 AVStream *st;
1011 int i;
1012
1013 flush_packet_queue(s);
1014
1015 /* free previous packet */
1016 if (s->cur_st) {
1017 if (s->cur_st->parser)
1018 av_free_packet(&s->cur_pkt);
1019 s->cur_st = NULL;
1020 }
1021 /* fail safe */
1022 s->cur_ptr = NULL;
1023 s->cur_len = 0;
1024
1025 /* for each stream, reset read state */
1026 for(i = 0; i < s->nb_streams; i++) {
1027 st = s->streams[i];
1028
1029 if (st->parser) {
1030 av_parser_close(st->parser);
1031 st->parser = NULL;
1032 }
1033 st->last_IP_pts = AV_NOPTS_VALUE;
1034 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
1035 }
1036 }
1037
1038 /**
1039 * Updates cur_dts of all streams based on given timestamp and AVStream.
1040 *
1041 * Stream ref_st unchanged, others set cur_dts in their native timebase
1042 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1043 * @param timestamp new dts expressed in time_base of param ref_st
1044 * @param ref_st reference stream giving time_base of param timestamp
1045 */
1046 static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1047 int i;
1048
1049 for(i = 0; i < s->nb_streams; i++) {
1050 AVStream *st = s->streams[i];
1051
1052 st->cur_dts = av_rescale(timestamp,
1053 st->time_base.den * (int64_t)ref_st->time_base.num,
1054 st->time_base.num * (int64_t)ref_st->time_base.den);
1055 }
1056 }
1057
1058 /**
1059 * Add a index entry into a sorted list updateing if it is already there.
1060 *
1061 * @param timestamp timestamp in the timebase of the given stream
1062 */
1063 int av_add_index_entry(AVStream *st,
1064 int64_t pos, int64_t timestamp, int distance, int flags)
1065 {
1066 AVIndexEntry *entries, *ie;
1067 int index;
1068
1069 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1070 return -1;
1071
1072 entries = av_fast_realloc(st->index_entries,
1073 &st->index_entries_allocated_size,
1074 (st->nb_index_entries + 1) *
1075 sizeof(AVIndexEntry));
1076 if(!entries)
1077 return -1;
1078
1079 st->index_entries= entries;
1080
1081 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1082
1083 if(index<0){
1084 index= st->nb_index_entries++;
1085 ie= &entries[index];
1086 assert(index==0 || ie[-1].timestamp < timestamp);
1087 }else{
1088 ie= &entries[index];
1089 if(ie->timestamp != timestamp){
1090 if(ie->timestamp <= timestamp)
1091 return -1;
1092 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1093 st->nb_index_entries++;
1094 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1095 distance= ie->min_distance;
1096 }
1097
1098 ie->pos = pos;
1099 ie->timestamp = timestamp;
1100 ie->min_distance= distance;
1101 ie->flags = flags;
1102
1103 return index;
1104 }
1105
1106 /**
1107 * build an index for raw streams using a parser.
1108 */
1109 static void av_build_index_raw(AVFormatContext *s)
1110 {
1111 AVPacket pkt1, *pkt = &pkt1;
1112 int ret;
1113 AVStream *st;
1114
1115 st = s->streams[0];
1116 av_read_frame_flush(s);
1117 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1118
1119 for(;;) {
1120 ret = av_read_frame(s, pkt);
1121 if (ret < 0)
1122 break;
1123 if (pkt->stream_index == 0 && st->parser &&
1124 (pkt->flags & PKT_FLAG_KEY)) {
1125 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1126 0, AVINDEX_KEYFRAME);
1127 }
1128 av_free_packet(pkt);
1129 }
1130 }
1131
1132 /**
1133 * Returns TRUE if we deal with a raw stream.
1134 *
1135 * Raw codec data and parsing needed.
1136 */
1137 static int is_raw_stream(AVFormatContext *s)
1138 {
1139 AVStream *st;
1140
1141 if (s->nb_streams != 1)
1142 return 0;
1143 st = s->streams[0];
1144 if (!st->need_parsing)
1145 return 0;
1146 return 1;
1147 }
1148
1149 /**
1150 * Gets the index for a specific timestamp.
1151 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1152 * the timestamp which is <= the requested one, if backward is 0
1153 * then it will be >=
1154 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1155 * @return < 0 if no such timestamp could be found
1156 */
1157 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1158 int flags)
1159 {
1160 AVIndexEntry *entries= st->index_entries;
1161 int nb_entries= st->nb_index_entries;
1162 int a, b, m;
1163 int64_t timestamp;
1164
1165 a = - 1;
1166 b = nb_entries;
1167
1168 while (b - a > 1) {
1169 m = (a + b) >> 1;
1170 timestamp = entries[m].timestamp;
1171 if(timestamp >= wanted_timestamp)
1172 b = m;
1173 if(timestamp <= wanted_timestamp)
1174 a = m;
1175 }
1176 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1177
1178 if(!(flags & AVSEEK_FLAG_ANY)){
1179 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1180 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1181 }
1182 }
1183
1184 if(m == nb_entries)
1185 return -1;
1186 return m;
1187 }
1188
1189 #define DEBUG_SEEK
1190
1191 /**
1192 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1193 * this isnt supposed to be called directly by a user application, but by demuxers
1194 * @param target_ts target timestamp in the time base of the given stream
1195 * @param stream_index stream number
1196 */
1197 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1198 AVInputFormat *avif= s->iformat;
1199 int64_t pos_min, pos_max, pos, pos_limit;
1200 int64_t ts_min, ts_max, ts;
1201 int64_t start_pos, filesize;
1202 int index, no_change;
1203 AVStream *st;
1204
1205 if (stream_index < 0)
1206 return -1;
1207
1208 #ifdef DEBUG_SEEK
1209 av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
1210 #endif
1211
1212 ts_max=
1213 ts_min= AV_NOPTS_VALUE;
1214 pos_limit= -1; //gcc falsely says it may be uninitalized
1215
1216 st= s->streams[stream_index];
1217 if(st->index_entries){
1218 AVIndexEntry *e;
1219
1220 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1221 index= FFMAX(index, 0);
1222 e= &st->index_entries[index];
1223
1224 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1225 pos_min= e->pos;
1226 ts_min= e->timestamp;
1227 #ifdef DEBUG_SEEK
1228 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n",
1229 pos_min,ts_min);
1230 #endif
1231 }else{
1232 assert(index==0);
1233 }
1234
1235 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1236 assert(index < st->nb_index_entries);
1237 if(index >= 0){
1238 e= &st->index_entries[index];
1239 assert(e->timestamp >= target_ts);
1240 pos_max= e->pos;
1241 ts_max= e->timestamp;
1242 pos_limit= pos_max - e->min_distance;
1243 #ifdef DEBUG_SEEK
1244 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
1245 pos_max,pos_limit, ts_max);
1246 #endif
1247 }
1248 }
1249
1250 if(ts_min == AV_NOPTS_VALUE){
1251 pos_min = s->data_offset;
1252 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1253 if (ts_min == AV_NOPTS_VALUE)
1254 return -1;
1255 }
1256
1257 if(ts_max == AV_NOPTS_VALUE){
1258 int step= 1024;
1259 filesize = url_fsize(&s->pb);
1260 pos_max = filesize - 1;
1261 do{
1262 pos_max -= step;
1263 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1264 step += step;
1265 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1266 if (ts_max == AV_NOPTS_VALUE)
1267 return -1;
1268
1269 for(;;){
1270 int64_t tmp_pos= pos_max + 1;
1271 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1272 if(tmp_ts == AV_NOPTS_VALUE)
1273 break;
1274 ts_max= tmp_ts;
1275 pos_max= tmp_pos;
1276 if(tmp_pos >= filesize)
1277 break;
1278 }
1279 pos_limit= pos_max;
1280 }
1281
1282 no_change=0;
1283 while (pos_min < pos_limit) {
1284 #ifdef DEBUG_SEEK
1285 av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
1286 pos_min, pos_max,
1287 ts_min, ts_max);
1288 #endif
1289 assert(pos_limit <= pos_max);
1290
1291 if(no_change==0){
1292 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1293 // interpolate position (better than dichotomy)
1294 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1295 + pos_min - approximate_keyframe_distance;
1296 }else if(no_change==1){
1297 // bisection, if interpolation failed to change min or max pos last time
1298 pos = (pos_min + pos_limit)>>1;
1299 }else{
1300 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1301 pos=pos_min;
1302 }
1303 if(pos <= pos_min)
1304 pos= pos_min + 1;
1305 else if(pos > pos_limit)
1306 pos= pos_limit;
1307 start_pos= pos;
1308
1309 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1310 if(pos == pos_max)
1311 no_change++;
1312 else
1313 no_change=0;
1314 #ifdef DEBUG_SEEK
1315 av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1316 #endif
1317 assert(ts != AV_NOPTS_VALUE);
1318 if (target_ts <= ts) {
1319 pos_limit = start_pos - 1;
1320 pos_max = pos;
1321 ts_max = ts;
1322 }
1323 if (target_ts >= ts) {
1324 pos_min = pos;
1325 ts_min = ts;
1326 }
1327 }
1328
1329 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1330 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1331 #ifdef DEBUG_SEEK
1332 pos_min = pos;
1333 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1334 pos_min++;
1335 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1336 av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
1337 pos, ts_min, target_ts, ts_max);
1338 #endif
1339 /* do the seek */
1340 url_fseek(&s->pb, pos, SEEK_SET);
1341
1342 av_update_cur_dts(s, st, ts);
1343
1344 return 0;
1345 }
1346
1347 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1348 int64_t pos_min, pos_max;
1349 #if 0
1350 AVStream *st;
1351
1352 if (stream_index < 0)
1353 return -1;
1354
1355 st= s->streams[stream_index];
1356 #endif
1357
1358 pos_min = s->data_offset;
1359 pos_max = url_fsize(&s->pb) - 1;
1360
1361 if (pos < pos_min) pos= pos_min;
1362 else if(pos > pos_max) pos= pos_max;
1363
1364 url_fseek(&s->pb, pos, SEEK_SET);
1365
1366 #if 0
1367 av_update_cur_dts(s, st, ts);
1368 #endif
1369 return 0;
1370 }
1371
1372 static int av_seek_frame_generic(AVFormatContext *s,
1373 int stream_index, int64_t timestamp, int flags)
1374 {
1375 int index;
1376 AVStream *st;
1377 AVIndexEntry *ie;
1378
1379 if (!s->index_built) {
1380 if (is_raw_stream(s)) {
1381 av_build_index_raw(s);
1382 } else {
1383 return -1;
1384 }
1385 s->index_built = 1;
1386 }
1387
1388 st = s->streams[stream_index];
1389 index = av_index_search_timestamp(st, timestamp, flags);
1390 if (index < 0)
1391 return -1;
1392
1393 /* now we have found the index, we can seek */
1394 ie = &st->index_entries[index];
1395 av_read_frame_flush(s);
1396 url_fseek(&s->pb, ie->pos, SEEK_SET);
1397
1398 av_update_cur_dts(s, st, ie->timestamp);
1399
1400 return 0;
1401 }
1402
1403 /**
1404 * Seek to the key frame at timestamp.
1405 * 'timestamp' in 'stream_index'.
1406 * @param stream_index If stream_index is (-1), a default
1407 * stream is selected, and timestamp is automatically converted
1408 * from AV_TIME_BASE units to the stream specific time_base.
1409 * @param timestamp timestamp in AVStream.time_base units
1410 * or if there is no stream specified then in AV_TIME_BASE units
1411 * @param flags flags which select direction and seeking mode
1412 * @return >= 0 on success
1413 */
1414 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1415 {
1416 int ret;
1417 AVStream *st;
1418
1419 av_read_frame_flush(s);
1420
1421 if(flags & AVSEEK_FLAG_BYTE)
1422 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1423
1424 if(stream_index < 0){
1425 stream_index= av_find_default_stream_index(s);
1426 if(stream_index < 0)
1427 return -1;
1428
1429 st= s->streams[stream_index];
1430 /* timestamp for default must be expressed in AV_TIME_BASE units */
1431 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1432 }
1433 st= s->streams[stream_index];
1434
1435 /* first, we try the format specific seek */
1436 if (s->iformat->read_seek)
1437 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1438 else
1439 ret = -1;
1440 if (ret >= 0) {
1441 return 0;
1442 }
1443
1444 if(s->iformat->read_timestamp)
1445 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1446 else
1447 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1448 }
1449
1450 /*******************************************************/
1451
1452 /**
1453 * Returns TRUE if the stream has accurate timings in any stream.
1454 *
1455 * @return TRUE if the stream has accurate timings for at least one component.
1456 */
1457 static int av_has_timings(AVFormatContext *ic)
1458 {
1459 int i;
1460 AVStream *st;
1461
1462 for(i = 0;i < ic->nb_streams; i++) {
1463 st = ic->streams[i];
1464 if (st->start_time != AV_NOPTS_VALUE &&
1465 st->duration != AV_NOPTS_VALUE)
1466 return 1;
1467 }
1468 return 0;
1469 }
1470
1471 /**
1472 * Estimate the stream timings from the one of each components.
1473 *
1474 * Also computes the global bitrate if possible.
1475 */
1476 static void av_update_stream_timings(AVFormatContext *ic)
1477 {
1478 int64_t start_time, start_time1, end_time, end_time1;
1479 int i;
1480 AVStream *st;
1481
1482 start_time = MAXINT64;
1483 end_time = MININT64;
1484 for(i = 0;i < ic->nb_streams; i++) {
1485 st = ic->streams[i];
1486 if (st->start_time != AV_NOPTS_VALUE) {
1487 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1488 if (start_time1 < start_time)
1489 start_time = start_time1;
1490 if (st->duration != AV_NOPTS_VALUE) {
1491 end_time1 = start_time1
1492 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1493 if (end_time1 > end_time)
1494 end_time = end_time1;
1495 }
1496 }
1497 }
1498 if (start_time != MAXINT64) {
1499 ic->start_time = start_time;
1500 if (end_time != MININT64) {
1501 ic->duration = end_time - start_time;
1502 if (ic->file_size > 0) {
1503 /* compute the bit rate */
1504 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1505 (double)ic->duration;
1506 }
1507 }
1508 }
1509
1510 }
1511
1512 static void fill_all_stream_timings(AVFormatContext *ic)
1513 {
1514 int i;
1515 AVStream *st;
1516
1517 av_update_stream_timings(ic);
1518 for(i = 0;i < ic->nb_streams; i++) {
1519 st = ic->streams[i];
1520 if (st->start_time == AV_NOPTS_VALUE) {
1521 if(ic->start_time != AV_NOPTS_VALUE)
1522 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1523 if(ic->duration != AV_NOPTS_VALUE)
1524 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1525 }
1526 }
1527 }
1528
1529 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1530 {
1531 int64_t filesize, duration;
1532 int bit_rate, i;
1533 AVStream *st;
1534
1535 /* if bit_rate is already set, we believe it */
1536 if (ic->bit_rate == 0) {
1537 bit_rate = 0;
1538 for(i=0;i<ic->nb_streams;i++) {
1539 st = ic->streams[i];
1540 bit_rate += st->codec->bit_rate;
1541 }
1542 ic->bit_rate = bit_rate;
1543 }
1544
1545 /* if duration is already set, we believe it */
1546 if (ic->duration == AV_NOPTS_VALUE &&
1547 ic->bit_rate != 0 &&
1548 ic->file_size != 0) {
1549 filesize = ic->file_size;
1550 if (filesize > 0) {
1551 for(i = 0; i < ic->nb_streams; i++) {
1552 st = ic->streams[i];
1553 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1554 if (st->start_time == AV_NOPTS_VALUE ||
1555 st->duration == AV_NOPTS_VALUE) {
1556 st->start_time = 0;
1557 st->duration = duration;
1558 }
1559 }
1560 }
1561 }
1562 }
1563
1564 #define DURATION_MAX_READ_SIZE 250000
1565
1566 /* only usable for MPEG-PS streams */
1567 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1568 {
1569 AVPacket pkt1, *pkt = &pkt1;
1570 AVStream *st;
1571 int read_size, i, ret;
1572 int64_t start_time, end_time, end_time1;
1573 int64_t filesize, offset, duration;
1574
1575 /* free previous packet */
1576 if (ic->cur_st && ic->cur_st->parser)
1577 av_free_packet(&ic->cur_pkt);
1578 ic->cur_st = NULL;
1579
1580 /* flush packet queue */
1581 flush_packet_queue(ic);
1582
1583 for(i=0;i<ic->nb_streams;i++) {
1584 st = ic->streams[i];
1585 if (st->parser) {
1586 av_parser_close(st->parser);
1587 st->parser= NULL;
1588 }
1589 }
1590
1591 /* we read the first packets to get the first PTS (not fully
1592 accurate, but it is enough now) */
1593 url_fseek(&ic->pb, 0, SEEK_SET);
1594 read_size = 0;
1595 for(;;) {
1596 if (read_size >= DURATION_MAX_READ_SIZE)
1597 break;
1598 /* if all info is available, we can stop */
1599 for(i = 0;i < ic->nb_streams; i++) {
1600 st = ic->streams[i];
1601 if (st->start_time == AV_NOPTS_VALUE)
1602 break;
1603 }
1604 if (i == ic->nb_streams)
1605 break;
1606
1607 ret = av_read_packet(ic, pkt);
1608 if (ret != 0)
1609 break;
1610 read_size += pkt->size;
1611 st = ic->streams[pkt->stream_index];
1612 if (pkt->pts != AV_NOPTS_VALUE) {
1613 if (st->start_time == AV_NOPTS_VALUE)
1614 st->start_time = pkt->pts;
1615 }
1616 av_free_packet(pkt);
1617 }
1618
1619 /* estimate the end time (duration) */
1620 /* XXX: may need to support wrapping */
1621 filesize = ic->file_size;
1622 offset = filesize - DURATION_MAX_READ_SIZE;
1623 if (offset < 0)
1624 offset = 0;
1625
1626 url_fseek(&ic->pb, offset, SEEK_SET);
1627 read_size = 0;
1628 for(;;) {
1629 if (read_size >= DURATION_MAX_READ_SIZE)
1630 break;
1631 /* if all info is available, we can stop */
1632 for(i = 0;i < ic->nb_streams; i++) {
1633 st = ic->streams[i];
1634 if (st->duration == AV_NOPTS_VALUE)
1635 break;
1636 }
1637 if (i == ic->nb_streams)
1638 break;
1639
1640 ret = av_read_packet(ic, pkt);
1641 if (ret != 0)
1642 break;
1643 read_size += pkt->size;
1644 st = ic->streams[pkt->stream_index];
1645 if (pkt->pts != AV_NOPTS_VALUE) {
1646 end_time = pkt->pts;
1647 duration = end_time - st->start_time;
1648 if (duration > 0) {
1649 if (st->duration == AV_NOPTS_VALUE ||
1650 st->duration < duration)
1651 st->duration = duration;
1652 }
1653 }
1654 av_free_packet(pkt);
1655 }
1656
1657 fill_all_stream_timings(ic);
1658
1659 url_fseek(&ic->pb, 0, SEEK_SET);
1660 }
1661
1662 static void av_estimate_timings(AVFormatContext *ic)
1663 {
1664 int64_t file_size;
1665
1666 /* get the file size, if possible */
1667 if (ic->iformat->flags & AVFMT_NOFILE) {
1668 file_size = 0;
1669 } else {
1670 file_size = url_fsize(&ic->pb);
1671 if (file_size < 0)
1672 file_size = 0;
1673 }
1674 ic->file_size = file_size;
1675
1676 if ((ic->iformat == &mpegps_demux || ic->iformat == &mpegts_demux) && file_size && !ic->pb.is_streamed) {
1677 /* get accurate estimate from the PTSes */
1678 av_estimate_timings_from_pts(ic);
1679 } else if (av_has_timings(ic)) {
1680 /* at least one components has timings - we use them for all
1681 the components */
1682 fill_all_stream_timings(ic);
1683 } else {
1684 /* less precise: use bit rate info */
1685 av_estimate_timings_from_bit_rate(ic);
1686 }
1687 av_update_stream_timings(ic);
1688
1689 #if 0
1690 {
1691 int i;
1692 AVStream *st;
1693 for(i = 0;i < ic->nb_streams; i++) {
1694 st = ic->streams[i];
1695 printf("%d: start_time: %0.3f duration: %0.3f\n",
1696 i, (double)st->start_time / AV_TIME_BASE,
1697 (double)st->duration / AV_TIME_BASE);
1698 }
1699 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1700 (double)ic->start_time / AV_TIME_BASE,
1701 (double)ic->duration / AV_TIME_BASE,
1702 ic->bit_rate / 1000);
1703 }
1704 #endif
1705 }
1706
1707 static int has_codec_parameters(AVCodecContext *enc)
1708 {
1709 int val;
1710 switch(enc->codec_type) {
1711 case CODEC_TYPE_AUDIO:
1712 val = enc->sample_rate;
1713 break;
1714 case CODEC_TYPE_VIDEO:
1715 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1716 break;
1717 default:
1718 val = 1;
1719 break;
1720 }
1721 return (val != 0);
1722 }
1723
1724 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1725 {
1726 int16_t *samples;
1727 AVCodec *codec;
1728 int got_picture, ret=0;
1729 AVFrame picture;
1730
1731 if(!st->codec->codec){
1732 codec = avcodec_find_decoder(st->codec->codec_id);
1733 if (!codec)
1734 return -1;
1735 ret = avcodec_open(st->codec, codec);
1736 if (ret < 0)
1737 return ret;
1738 }
1739
1740 if(!has_codec_parameters(st->codec)){
1741 switch(st->codec->codec_type) {
1742 case CODEC_TYPE_VIDEO:
1743 ret = avcodec_decode_video(st->codec, &picture,
1744 &got_picture, (uint8_t *)data, size);
1745 break;
1746 case CODEC_TYPE_AUDIO:
1747 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1748 if (!samples)
1749 goto fail;
1750 ret = avcodec_decode_audio(st->codec, samples,
1751 &got_picture, (uint8_t *)data, size);
1752 av_free(samples);
1753 break;
1754 default:
1755 break;
1756 }
1757 }
1758 fail:
1759 return ret;
1760 }
1761
1762 /* absolute maximum size we read until we abort */
1763 #define MAX_READ_SIZE 5000000
1764
1765 /* maximum duration until we stop analysing the stream */
1766 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 2.0))
1767
1768 /**
1769 * Read the beginning of a media file to get stream information. This
1770 * is useful for file formats with no headers such as MPEG. This
1771 * function also compute the real frame rate in case of mpeg2 repeat
1772 * frame mode.
1773 *
1774 * @param ic media file handle
1775 * @return >=0 if OK. AVERROR_xxx if error.
1776 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1777 */
1778 int av_find_stream_info(AVFormatContext *ic)
1779 {
1780 int i, count, ret, read_size;
1781 AVStream *st;
1782 AVPacket pkt1, *pkt;
1783 AVPacketList *pktl=NULL, **ppktl;
1784 int64_t last_dts[MAX_STREAMS];
1785 int64_t duration_sum[MAX_STREAMS];
1786 int duration_count[MAX_STREAMS]={0};
1787
1788 for(i=0;i<ic->nb_streams;i++) {
1789 st = ic->streams[i];
1790 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1791 /* if(!st->time_base.num)
1792 st->time_base= */
1793 if(!st->codec->time_base.num)
1794 st->codec->time_base= st->time_base;
1795 }
1796 //only for the split stuff
1797 if (!st->parser) {
1798 st->parser = av_parser_init(st->codec->codec_id);
1799 }
1800 }
1801
1802 for(i=0;i<MAX_STREAMS;i++){
1803 last_dts[i]= AV_NOPTS_VALUE;
1804 duration_sum[i]= INT64_MAX;
1805 }
1806
1807 count = 0;
1808 read_size = 0;
1809 ppktl = &ic->packet_buffer;
1810 for(;;) {
1811 /* check if one codec still needs to be handled */
1812 for(i=0;i<ic->nb_streams;i++) {
1813 st = ic->streams[i];
1814 if (!has_codec_parameters(st->codec))
1815 break;
1816 /* variable fps and no guess at the real fps */
1817 if( st->codec->time_base.den >= 1000LL*st->codec->time_base.num
1818 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1819 break;
1820 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1821 break;
1822 }
1823 if (i == ic->nb_streams) {
1824 /* NOTE: if the format has no header, then we need to read
1825 some packets to get most of the streams, so we cannot
1826 stop here */
1827 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1828 /* if we found the info for all the codecs, we can stop */
1829 ret = count;
1830 break;
1831 }
1832 } else {
1833 /* we did not get all the codec info, but we read too much data */
1834 if (read_size >= MAX_READ_SIZE) {
1835 ret = count;
1836 break;
1837 }
1838 }
1839
1840 /* NOTE: a new stream can be added there if no header in file
1841 (AVFMTCTX_NOHEADER) */
1842 ret = av_read_frame_internal(ic, &pkt1);
1843 if (ret < 0) {
1844 /* EOF or error */
1845 ret = -1; /* we could not have all the codec parameters before EOF */
1846 for(i=0;i<ic->nb_streams;i++) {
1847 st = ic->streams[i];
1848 if (!has_codec_parameters(st->codec))
1849 break;
1850 }
1851 if (i == ic->nb_streams)
1852 ret = 0;
1853 break;
1854 }
1855
1856 pktl = av_mallocz(sizeof(AVPacketList));
1857 if (!pktl) {
1858 ret = AVERROR_NOMEM;
1859 break;
1860 }
1861
1862 /* add the packet in the buffered packet list */
1863 *ppktl = pktl;
1864 ppktl = &pktl->next;
1865
1866 pkt = &pktl->pkt;
1867 *pkt = pkt1;
1868
1869 /* duplicate the packet */
1870 if (av_dup_packet(pkt) < 0) {
1871 ret = AVERROR_NOMEM;
1872 break;
1873 }
1874
1875 read_size += pkt->size;
1876
1877 st = ic->streams[pkt->stream_index];
1878 st->codec_info_duration += pkt->duration;
1879 if (pkt->duration != 0)
1880 st->codec_info_nb_frames++;
1881
1882 {
1883 int index= pkt->stream_index;
1884 int64_t last= last_dts[index];
1885 int64_t duration= pkt->dts - last;
1886
1887 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1888 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1889 duration_sum[index]= duration;
1890 duration_count[index]=1;
1891 }else{
1892 int factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1893 duration_sum[index] += duration;
1894 duration_count[index]+= factor;
1895 }
1896 if(st->codec_info_nb_frames == 0 && 0)
1897 st->codec_info_duration += duration;
1898 }
1899 last_dts[pkt->stream_index]= pkt->dts;
1900 }
1901 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1902 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1903 if(i){
1904 st->codec->extradata_size= i;
1905 st->codec->extradata= av_malloc(st->codec->extradata_size);
1906 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1907 }
1908 }
1909
1910 /* if still no information, we try to open the codec and to
1911 decompress the frame. We try to avoid that in most cases as
1912 it takes longer and uses more memory. For MPEG4, we need to
1913 decompress for Quicktime. */
1914 if (!has_codec_parameters(st->codec) /*&&
1915 (st->codec->codec_id == CODEC_ID_FLV1 ||
1916 st->codec->codec_id == CODEC_ID_H264 ||
1917 st->codec->codec_id == CODEC_ID_H263 ||
1918 st->codec->codec_id == CODEC_ID_H261 ||
1919 st->codec->codec_id == CODEC_ID_VORBIS ||
1920 st->codec->codec_id == CODEC_ID_MJPEG ||
1921 st->codec->codec_id == CODEC_ID_PNG ||
1922 st->codec->codec_id == CODEC_ID_PAM ||
1923 st->codec->codec_id == CODEC_ID_PGM ||
1924 st->codec->codec_id == CODEC_ID_PGMYUV ||
1925 st->codec->codec_id == CODEC_ID_PBM ||
1926 st->codec->codec_id == CODEC_ID_PPM ||
1927 st->codec->codec_id == CODEC_ID_SHORTEN ||
1928 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1929 try_decode_frame(st, pkt->data, pkt->size);
1930
1931 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
1932 break;
1933 }
1934 count++;
1935 }
1936
1937 // close codecs which where opened in try_decode_frame()
1938 for(i=0;i<ic->nb_streams;i++) {
1939 st = ic->streams[i];
1940 if(st->codec->codec)
1941 avcodec_close(st->codec);
1942 }
1943 for(i=0;i<ic->nb_streams;i++) {
1944 st = ic->streams[i];
1945 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1946 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1947 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1948
1949 if(duration_count[i] && st->codec->time_base.num*1000LL <= st->codec->time_base.den &&
1950 st->time_base.num*duration_sum[i]/duration_count[i]*1000LL > st->time_base.den){
1951 AVRational fps1;
1952 int64_t num, den;
1953
1954 num= st->time_base.den*duration_count[i];
1955 den= st->time_base.num*duration_sum[i];
1956
1957 av_reduce(&fps1.num, &fps1.den, num*1001, den*1000, FFMAX(st->time_base.den, st->time_base.num)/4);
1958 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, den, FFMAX(st->time_base.den, st->time_base.num)/4);
1959 if(fps1.num < st->r_frame_rate.num && fps1.den == 1 && (fps1.num==24 || fps1.num==30)){ //FIXME better decission
1960 st->r_frame_rate.num= fps1.num*1000;
1961 st->r_frame_rate.den= fps1.den*1001;
1962 }
1963 }
1964
1965 /* set real frame rate info */
1966 /* compute the real frame rate for telecine */
1967 if ((st->codec->codec_id == CODEC_ID_MPEG1VIDEO ||
1968 st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
1969 st->codec->sub_id == 2) {
1970 if (st->codec_info_nb_frames >= 20) {
1971 float coded_frame_rate, est_frame_rate;
1972 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
1973 (double)st->codec_info_duration ;
1974 coded_frame_rate = 1.0/av_q2d(st->codec->time_base);
1975 #if 0
1976 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
1977 coded_frame_rate, est_frame_rate);
1978 #endif
1979 /* if we detect that it could be a telecine, we
1980 signal it. It would be better to do it at a
1981 higher level as it can change in a film */
1982 if (coded_frame_rate >= 24.97 &&
1983 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
1984 st->r_frame_rate = (AVRational){24000, 1001};
1985 }
1986 }
1987 }
1988 /* if no real frame rate, use the codec one */
1989 if (!st->r_frame_rate.num){
1990 st->r_frame_rate.num = st->codec->time_base.den;
1991 st->r_frame_rate.den = st->codec->time_base.num;
1992 }
1993 }
1994 }
1995
1996 av_estimate_timings(ic);
1997 #if 0
1998 /* correct DTS for b frame streams with no timestamps */
1999 for(i=0;i<ic->nb_streams;i++) {
2000 st = ic->streams[i];
2001 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2002 if(b-frames){
2003 ppktl = &ic->packet_buffer;
2004 while(ppkt1){
2005 if(ppkt1->stream_index != i)
2006 continue;
2007 if(ppkt1->pkt->dts < 0)
2008 break;
2009 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2010 break;
2011 ppkt1->pkt->dts -= delta;
2012 ppkt1= ppkt1->next;
2013 }
2014 if(ppkt1)
2015 continue;
2016 st->cur_dts -= delta;
2017 }
2018 }
2019 }
2020 #endif
2021 return ret;
2022 }
2023
2024 /*******************************************************/
2025
2026 /**
2027 * start playing a network based stream (e.g. RTSP stream) at the
2028 * current position
2029 */
2030 int av_read_play(AVFormatContext *s)
2031 {
2032 if (!s->iformat->read_play)
2033 return AVERROR_NOTSUPP;
2034 return s->iformat->read_play(s);
2035 }
2036
2037 /**
2038 * Pause a network based stream (e.g. RTSP stream).
2039 *
2040 * Use av_read_play() to resume it.
2041 */
2042 int av_read_pause(AVFormatContext *s)
2043 {
2044 if (!s->iformat->read_pause)
2045 return AVERROR_NOTSUPP;
2046 return s->iformat->read_pause(s);
2047 }
2048
2049 /**
2050 * Close a media file (but not its codecs).
2051 *
2052 * @param s media file handle
2053 */
2054 void av_close_input_file(AVFormatContext *s)
2055 {
2056 int i, must_open_file;
2057 AVStream *st;
2058
2059 /* free previous packet */
2060 if (s->cur_st && s->cur_st->parser)
2061 av_free_packet(&s->cur_pkt);
2062
2063 if (s->iformat->read_close)
2064 s->iformat->read_close(s);
2065 for(i=0;i<s->nb_streams;i++) {
2066 /* free all data in a stream component */
2067 st = s->streams[i];
2068 if (st->parser) {
2069 av_parser_close(st->parser);
2070 }
2071 av_free(st->index_entries);
2072 av_free(st->codec);
2073 av_free(st);
2074 }
2075 flush_packet_queue(s);
2076 must_open_file = 1;
2077 if (s->iformat->flags & AVFMT_NOFILE) {
2078 must_open_file = 0;
2079 }
2080 if (must_open_file) {
2081 url_fclose(&s->pb);
2082 }
2083 av_freep(&s->priv_data);
2084 av_free(s);
2085 }
2086
2087 /**
2088 * Add a new stream to a media file.
2089 *
2090 * Can only be called in the read_header() function. If the flag
2091 * AVFMTCTX_NOHEADER is in the format context, then new streams
2092 * can be added in read_packet too.
2093 *
2094 * @param s media file handle
2095 * @param id file format dependent stream id
2096 */
2097 AVStream *av_new_stream(AVFormatContext *s, int id)
2098 {
2099 AVStream *st;
2100
2101 if (s->nb_streams >= MAX_STREAMS)
2102 return NULL;
2103
2104 st = av_mallocz(sizeof(AVStream));
2105 if (!st)
2106 return NULL;
2107
2108 st->codec= avcodec_alloc_context();
2109 if (s->iformat) {
2110 /* no default bitrate if decoding */
2111 st->codec->bit_rate = 0;
2112 }
2113 st->index = s->nb_streams;
2114 st->id = id;
2115 st->start_time = AV_NOPTS_VALUE;
2116 st->duration = AV_NOPTS_VALUE;
2117 st->cur_dts = AV_NOPTS_VALUE;
2118
2119 /* default pts settings is MPEG like */
2120 av_set_pts_info(st, 33, 1, 90000);
2121 st->last_IP_pts = AV_NOPTS_VALUE;
2122
2123 s->streams[s->nb_streams++] = st;
2124 return st;
2125 }
2126
2127 /************************************************************/
2128 /* output media file */
2129
2130 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2131 {
2132 int ret;
2133
2134 if (s->oformat->priv_data_size > 0) {
2135 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2136 if (!s->priv_data)
2137 return AVERROR_NOMEM;
2138 } else
2139 s->priv_data = NULL;
2140
2141 if (s->oformat->set_parameters) {
2142 ret = s->oformat->set_parameters(s, ap);
2143 if (ret < 0)
2144 return ret;
2145 }
2146 return 0;
2147 }
2148
2149 /**
2150 * allocate the stream private data and write the stream header to an
2151 * output media file
2152 *
2153 * @param s media file handle
2154 * @return 0 if OK. AVERROR_xxx if error.
2155 */
2156 int av_write_header(AVFormatContext *s)
2157 {
2158 int ret, i;
2159 AVStream *st;
2160
2161 ret = s->oformat->write_header(s);
2162 if (ret < 0)
2163 return ret;
2164
2165 /* init PTS generation */
2166 for(i=0;i<s->nb_streams;i++) {
2167 st = s->streams[i];
2168
2169 switch (st->codec->codec_type) {
2170 case CODEC_TYPE_AUDIO:
2171 av_frac_init(&st->pts, 0, 0,
2172 (int64_t)st->time_base.num * st->codec->sample_rate);
2173 break;
2174 case CODEC_TYPE_VIDEO:
2175 av_frac_init(&st->pts, 0, 0,
2176 (int64_t)st->time_base.num * st->codec->time_base.den);
2177 break;
2178 default:
2179 break;
2180 }
2181 }
2182 return 0;
2183 }
2184
2185 //FIXME merge with compute_pkt_fields
2186 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2187 int b_frames = FFMAX(st->codec->has_b_frames, st->codec->max_b_frames);
2188 int num, den, frame_size;
2189
2190 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2191
2192 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2193 return -1;*/
2194
2195 /* duration field */
2196 if (pkt->duration == 0) {
2197 compute_frame_duration(&num, &den, st, NULL, pkt);
2198 if (den && num) {
2199 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2200 }
2201 }
2202
2203 //XXX/FIXME this is a temporary hack until all encoders output pts
2204 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2205 pkt->dts=
2206 // pkt->pts= st->cur_dts;
2207 pkt->pts= st->pts.val;
2208 }
2209
2210 //calculate dts from pts
2211 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2212 if(b_frames){
2213 if(st->last_IP_pts == AV_NOPTS_VALUE){
2214 st->last_IP_pts= -pkt->duration;
2215 }
2216 if(st->last_IP_pts < pkt->pts){
2217 pkt->dts= st->last_IP_pts;
2218 st->last_IP_pts= pkt->pts;
2219 }else
2220 pkt->dts= pkt->pts;
2221 }else
2222 pkt->dts= pkt->pts;
2223 }
2224
2225 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2226 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %Ld >= %Ld\n", st->cur_dts, pkt->dts);
2227 return -1;
2228 }
2229 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2230 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2231 return -1;
2232 }
2233
2234 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2235 st->cur_dts= pkt->dts;
2236 st->pts.val= pkt->dts;
2237
2238 /* update pts */
2239 switch (st->codec->codec_type) {
2240 case CODEC_TYPE_AUDIO:
2241 frame_size = get_audio_frame_size(st->codec, pkt->size);
2242
2243 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2244 but it would be better if we had the real timestamps from the encoder */
2245 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2246 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2247 }
2248 break;
2249 case CODEC_TYPE_VIDEO:
2250 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2251 break;
2252 default:
2253 break;
2254 }
2255 return 0;
2256 }
2257
2258 static void truncate_ts(AVStream *st, AVPacket *pkt){
2259 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2260
2261 // if(pkt->dts < 0)
2262 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2263
2264 pkt->pts &= pts_mask;
2265 pkt->dts &= pts_mask;
2266 }
2267
2268 /**
2269 * Write a packet to an output media file.
2270 *
2271 * The packet shall contain one audio or video frame.
2272 *
2273 * @param s media file handle
2274 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2275 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2276 */
2277 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2278 {
2279 int ret;
2280
2281 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2282 if(ret<0)
2283 return ret;
2284
2285 truncate_ts(s->streams[pkt->stream_index], pkt);
2286
2287 ret= s->oformat->write_packet(s, pkt);
2288 if(!ret)
2289 ret= url_ferror(&s->pb);
2290 return ret;
2291 }
2292
2293 /**
2294 * interleave_packet implementation which will interleave per DTS.
2295 * packets with pkt->destruct == av_destruct_packet will be freed inside this function.
2296 * so they cannot be used after it, note calling av_free_packet() on them is still safe
2297 */
2298 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2299 AVPacketList *pktl, **next_point, *this_pktl;
2300 int stream_count=0;
2301 int streams[MAX_STREAMS];
2302
2303 if(pkt){
2304 AVStream *st= s->streams[ pkt->stream_index];
2305
2306 // assert(pkt->destruct != av_destruct_packet); //FIXME
2307
2308 this_pktl = av_mallocz(sizeof(AVPacketList));
2309 this_pktl->pkt= *pkt;
2310 if(pkt->destruct == av_destruct_packet)
2311 pkt->destruct= NULL; // non shared -> must keep original from being freed
2312 else
2313 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2314
2315 next_point = &s->packet_buffer;
2316 while(*next_point){
2317 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2318 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2319 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2320 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2321 break;
2322 next_point= &(*next_point)->next;
2323 }
2324 this_pktl->next= *next_point;
2325 *next_point= this_pktl;
2326 }
2327
2328 memset(streams, 0, sizeof(streams));
2329 pktl= s->packet_buffer;
2330 while(pktl){
2331 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2332 if(streams[ pktl->pkt.stream_index ] == 0)
2333 stream_count++;
2334 streams[ pktl->pkt.stream_index ]++;
2335 pktl= pktl->next;
2336 }
2337
2338 if(s->nb_streams == stream_count || (flush && stream_count)){
2339 pktl= s->packet_buffer;
2340 *out= pktl->pkt;
2341
2342 s->packet_buffer= pktl->next;
2343 av_freep(&pktl);
2344 return 1;
2345 }else{
2346 av_init_packet(out);
2347 return 0;
2348 }
2349 }
2350
2351 /**
2352 * Interleaves a AVPacket correctly so it can be muxed.
2353 * @param out the interleaved packet will be output here
2354 * @param in the input packet
2355 * @param flush 1 if no further packets are available as input and all
2356 * remaining packets should be output
2357 * @return 1 if a packet was output, 0 if no packet could be output,
2358 * < 0 if an error occured
2359 */
2360 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2361 if(s->oformat->interleave_packet)
2362 return s->oformat->interleave_packet(s, out, in, flush);
2363 else
2364 return av_interleave_packet_per_dts(s, out, in, flush);
2365 }
2366
2367 /**
2368 * Writes a packet to an output media file ensuring correct interleaving.
2369 *
2370 * The packet must contain one audio or video frame.
2371 * If the packets are already correctly interleaved the application should
2372 * call av_write_frame() instead as its slightly faster, its also important
2373 * to keep in mind that completly non interleaved input will need huge amounts
2374 * of memory to interleave with this, so its prefereable to interleave at the
2375 * demuxer level
2376 *
2377 * @param s media file handle
2378 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2379 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2380 */
2381 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2382 AVStream *st= s->streams[ pkt->stream_index];
2383
2384 //FIXME/XXX/HACK drop zero sized packets
2385 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2386 return 0;
2387
2388 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2389 if(compute_pkt_fields2(st, pkt) < 0)
2390 return -1;
2391
2392 if(pkt->dts == AV_NOPTS_VALUE)
2393 return -1;
2394
2395 for(;;){
2396 AVPacket opkt;
2397 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2398 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2399 return ret;
2400
2401 truncate_ts(s->streams[opkt.stream_index], &opkt);
2402 ret= s->oformat->write_packet(s, &opkt);
2403
2404 av_free_packet(&opkt);
2405 pkt= NULL;
2406
2407 if(ret<0)
2408 return ret;
2409 if(url_ferror(&s->pb))
2410 return url_ferror(&s->pb);
2411 }
2412 }
2413
2414 /**
2415 * @brief Write the stream trailer to an output media file and
2416 * free the file private data.
2417 *
2418 * @param s media file handle
2419 * @return 0 if OK. AVERROR_xxx if error.
2420 */
2421 int av_write_trailer(AVFormatContext *s)
2422 {
2423 int ret, i;
2424
2425 for(;;){
2426 AVPacket pkt;
2427 ret= av_interleave_packet(s, &pkt, NULL, 1);
2428 if(ret<0) //FIXME cleanup needed for ret<0 ?
2429 goto fail;
2430 if(!ret)
2431 break;
2432
2433 truncate_ts(s->streams[pkt.stream_index], &pkt);
2434 ret= s->oformat->write_packet(s, &pkt);
2435
2436 av_free_packet(&pkt);
2437
2438 if(ret<0)
2439 goto fail;
2440 if(url_ferror(&s->pb))
2441 goto fail;
2442 }
2443
2444 ret = s->oformat->write_trailer(s);
2445 fail:
2446 if(ret == 0)
2447 ret=url_ferror(&s->pb);
2448 for(i=0;i<s->nb_streams;i++)
2449 av_freep(&s->streams[i]->priv_data);
2450 av_freep(&s->priv_data);
2451 return ret;
2452 }
2453
2454 /* "user interface" functions */
2455
2456 void dump_format(AVFormatContext *ic,
2457 int index,
2458 const char *url,
2459 int is_output)
2460 {
2461 int i, flags;
2462 char buf[256];
2463
2464 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2465 is_output ? "Output" : "Input",
2466 index,
2467 is_output ? ic->oformat->name : ic->iformat->name,
2468 is_output ? "to" : "from", url);
2469 if (!is_output) {
2470 av_log(NULL, AV_LOG_INFO, " Duration: ");
2471 if (ic->duration != AV_NOPTS_VALUE) {
2472 int hours, mins, secs, us;
2473 secs = ic->duration / AV_TIME_BASE;
2474 us = ic->duration % AV_TIME_BASE;
2475 mins = secs / 60;
2476 secs %= 60;
2477 hours = mins / 60;
2478 mins %= 60;
2479 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2480 (10 * us) / AV_TIME_BASE);
2481 } else {
2482 av_log(NULL, AV_LOG_INFO, "N/A");
2483 }
2484 if (ic->start_time != AV_NOPTS_VALUE) {
2485 int secs, us;
2486 av_log(NULL, AV_LOG_INFO, ", start: ");
2487 secs = ic->start_time / AV_TIME_BASE;
2488 us = ic->start_time % AV_TIME_BASE;
2489 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2490 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2491 }
2492 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2493 if (ic->bit_rate) {
2494 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2495 } else {
2496 av_log(NULL, AV_LOG_INFO, "N/A");
2497 }
2498 av_log(NULL, AV_LOG_INFO, "\n");
2499 }
2500 for(i=0;i<ic->nb_streams;i++) {
2501 AVStream *st = ic->streams[i];
2502 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2503 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2504 /* the pid is an important information, so we display it */
2505 /* XXX: add a generic system */
2506 if (is_output)
2507 flags = ic->oformat->flags;
2508 else
2509 flags = ic->iformat->flags;
2510 if (flags & AVFMT_SHOW_IDS) {
2511 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2512 }
2513 if (strlen(st->language) > 0) {
2514 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2515 }
2516 av_log(NULL, AV_LOG_INFO, ": %s\n", buf);
2517 }
2518 }
2519
2520 typedef struct {
2521 const char *abv;
2522 int width, height;
2523 int frame_rate, frame_rate_base;
2524 } AbvEntry;
2525
2526 static AbvEntry frame_abvs[] = {
2527 { "ntsc", 720, 480, 30000, 1001 },
2528 { "pal", 720, 576, 25, 1 },
2529 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2530 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2531 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2532 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2533 { "film", 352, 240, 24, 1 },
2534 { "ntsc-film", 352, 240, 24000, 1001 },
2535 { "sqcif", 128, 96, 0, 0 },
2536 { "qcif", 176, 144, 0, 0 },
2537 { "cif", 352, 288, 0, 0 },
2538 { "4cif", 704, 576, 0, 0 },
2539 };
2540
2541 /**
2542 * parses width and height out of string str.
2543 */
2544 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2545 {
2546 int i;
2547 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2548 const char *p;
2549 int frame_width = 0, frame_height = 0;
2550
2551 for(i=0;i<n;i++) {
2552 if (!strcmp(frame_abvs[i].abv, str)) {
2553 frame_width = frame_abvs[i].width;
2554 frame_height = frame_abvs[i].height;
2555 break;
2556 }
2557 }
2558 if (i == n) {
2559 p = str;
2560 frame_width = strtol(p, (char **)&p, 10);
2561 if (*p)
2562 p++;
2563 frame_height = strtol(p, (char **)&p, 10);
2564 }
2565 if (frame_width <= 0 || frame_height <= 0)
2566 return -1;
2567 *width_ptr = frame_width;
2568 *height_ptr = frame_height;
2569 return 0;
2570 }
2571
2572 /**
2573 * Converts frame rate from string to a fraction.
2574 *
2575 * First we try to get an exact integer or fractional frame rate.
2576 * If this fails we convert the frame rate to a double and return
2577 * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
2578 */
2579 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2580 {
2581 int i;
2582 char* cp;
2583
2584 /* First, we check our abbreviation table */
2585 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2586 if (!strcmp(frame_abvs[i].abv, arg)) {
2587 *frame_rate = frame_abvs[i].frame_rate;
2588 *frame_rate_base = frame_abvs[i].frame_rate_base;
2589 return 0;
2590 }
2591
2592 /* Then, we try to parse it as fraction */
2593 cp = strchr(arg, '/');
2594 if (!cp)
2595 cp = strchr(arg, ':');
2596 if (cp) {
2597 char* cpp;
2598 *frame_rate = strtol(arg, &cpp, 10);
2599 if (cpp != arg || cpp == cp)
2600 *frame_rate_base = strtol(cp+1, &cpp, 10);
2601 else
2602 *frame_rate = 0;
2603 }
2604 else {
2605 /* Finally we give up and parse it as double */
2606 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2607 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2608 }
2609 if (!*frame_rate || !*frame_rate_base)
2610 return -1;
2611 else
2612 return 0;
2613 }
2614
2615 /**
2616 * Converts date string to number of seconds since Jan 1st, 1970.
2617 *
2618 * @code
2619 * Syntax:
2620 * - If not a duration:
2621 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2622 * Time is localtime unless Z is suffixed to the end. In this case GMT
2623 * Return the date in micro seconds since 1970
2624 *
2625 * - If a duration:
2626 * HH[:MM[:SS[.m...]]]
2627 * S+[.m...]
2628 * @endcode
2629 */
2630 int64_t parse_date(const char *datestr, int duration)
2631 {
2632 const char *p;
2633 int64_t t;
2634 struct tm dt;
2635 int i;
2636 static const char *date_fmt[] = {
2637 "%Y-%m-%d",
2638 "%Y%m%d",
2639 };
2640 static const char *time_fmt[] = {
2641 "%H:%M:%S",
2642 "%H%M%S",
2643 };
2644 const char *q;
2645 int is_utc, len;
2646 char lastch;
2647 int negative = 0;
2648
2649 #undef time
2650 time_t now = time(0);
2651
2652 len = strlen(datestr);
2653 if (len > 0)
2654 lastch = datestr[len - 1];
2655 else
2656 lastch = '\0';
2657 is_utc = (lastch == 'z' || lastch == 'Z');
2658
2659 memset(&dt, 0, sizeof(dt));
2660
2661 p = datestr;
2662 q = NULL;
2663 if (!duration) {
2664 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2665 q = small_strptime(p, date_fmt[i], &dt);
2666 if (q) {
2667 break;
2668 }
2669 }
2670
2671 if (!q) {
2672 if (is_utc) {
2673 dt = *gmtime(&now);
2674 } else {
2675 dt = *localtime(&now);
2676 }
2677 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2678 } else {
2679 p = q;
2680 }
2681
2682 if (*p == 'T' || *p == 't' || *p == ' ')
2683 p++;
2684
2685 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2686 q = small_strptime(p, time_fmt[i], &dt);
2687 if (q) {
2688 break;
2689 }
2690 }
2691 } else {
2692 if (p[0] == '-') {
2693 negative = 1;
2694 ++p;
2695 }
2696 q = small_strptime(p, time_fmt[0], &dt);
2697 if (!q) {
2698 dt.tm_sec = strtol(p, (char **)&q, 10);
2699 dt.tm_min = 0;
2700 dt.tm_hour = 0;
2701 }
2702 }
2703
2704 /* Now we have all the fields that we can get */
2705 if (!q) {
2706 if (duration)
2707 return 0;
2708 else
2709 return now * int64_t_C(1000000);
2710 }
2711
2712 if (duration) {
2713 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2714 } else {
2715 dt.tm_isdst = -1; /* unknown */
2716 if (is_utc) {
2717 t = mktimegm(&dt);
2718 } else {
2719 t = mktime(&dt);
2720 }
2721 }
2722
2723 t *= 1000000;
2724
2725 if (*q == '.') {
2726 int val, n;
2727 q++;
2728 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2729 if (!isdigit(*q))
2730 break;
2731 val += n * (*q - '0');
2732 }
2733 t += val;
2734 }
2735 return negative ? -t : t;
2736 }
2737
2738 /**
2739 * Attempts to find a specific tag in a URL.
2740 *
2741 * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
2742 * Return 1 if found.
2743 */
2744 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2745 {
2746 const char *p;
2747 char tag[128], *q;
2748
2749 p = info;
2750 if (*p == '?')
2751 p++;
2752 for(;;) {
2753 q = tag;
2754 while (*p != '\0' && *p != '=' && *p != '&') {
2755 if ((q - tag) < sizeof(tag) - 1)
2756 *q++ = *p;
2757 p++;
2758 }
2759 *q = '\0';
2760 q = arg;
2761 if (*p == '=') {
2762 p++;
2763 while (*p != '&' && *p != '\0') {
2764 if ((q - arg) < arg_size - 1) {
2765 if (*p == '+')
2766 *q++ = ' ';
2767 else
2768 *q++ = *p;
2769 }
2770 p++;
2771 }
2772 *q = '\0';
2773 }
2774 if (!strcmp(tag, tag1))
2775 return 1;
2776 if (*p != '&')
2777 break;
2778 p++;
2779 }
2780 return 0;
2781 }
2782
2783 /**
2784 * Returns in 'buf' the path with '%d' replaced by number.
2785 *
2786 * Also handles the '%0nd' format where 'n' is the total number
2787 * of digits and '%%'. Return 0 if OK, and -1 if format error.
2788 */
2789 int get_frame_filename(char *buf, int buf_size,
2790 const char *path, int number)
2791 {
2792 const char *p;
2793 char *q, buf1[20], c;
2794 int nd, len, percentd_found;
2795
2796 q = buf;
2797 p = path;
2798 percentd_found = 0;
2799 for(;;) {
2800 c = *p++;
2801 if (c == '\0')
2802 break;
2803 if (c == '%') {
2804 do {
2805 nd = 0;
2806 while (isdigit(*p)) {
2807 nd = nd * 10 + *p++ - '0';
2808 }
2809 c = *p++;
2810 } while (isdigit(c));
2811
2812 switch(c) {
2813 case '%':
2814 goto addchar;
2815 case 'd':
2816 if (percentd_found)
2817 goto fail;
2818 percentd_found = 1;
2819 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2820 len = strlen(buf1);
2821 if ((q - buf + len) > buf_size - 1)
2822 goto fail;
2823 memcpy(q, buf1, len);
2824 q += len;
2825 break;
2826 default:
2827 goto fail;
2828 }
2829 } else {
2830 addchar:
2831 if ((q - buf) < buf_size - 1)
2832 *q++ = c;
2833 }
2834 }
2835 if (!percentd_found)
2836 goto fail;
2837 *q = '\0';
2838 return 0;
2839 fail:
2840 *q = '\0';
2841 return -1;
2842 }
2843
2844 /**
2845 * Print nice hexa dump of a buffer
2846 * @param f stream for output
2847 * @param buf buffer
2848 * @param size buffer size
2849 */
2850 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2851 {
2852 int len, i, j, c;
2853
2854 for(i=0;i<size;i+=16) {
2855 len = size - i;
2856 if (len > 16)
2857 len = 16;
2858 fprintf(f, "%08x ", i);
2859 for(j=0;j<16;j++) {
2860 if (j < len)
2861 fprintf(f, " %02x", buf[i+j]);
2862 else
2863 fprintf(f, " ");
2864 }
2865 fprintf(f, " ");
2866 for(j=0;j<len;j++) {
2867 c = buf[i+j];
2868 if (c < ' ' || c > '~')
2869 c = '.';
2870 fprintf(f, "%c", c);
2871 }
2872 fprintf(f, "\n");
2873 }
2874 }
2875
2876 /**
2877 * Print on 'f' a nice dump of a packet
2878 * @param f stream for output
2879 * @param pkt packet to dump
2880 * @param dump_payload true if the payload must be displayed too
2881 */
2882 //FIXME needs to know the time_base
2883 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2884 {
2885 fprintf(f, "stream #%d:\n", pkt->stream_index);
2886 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2887 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2888 /* DTS is _always_ valid after av_read_frame() */
2889 fprintf(f, " dts=");
2890 if (pkt->dts == AV_NOPTS_VALUE)
2891 fprintf(f, "N/A");
2892 else
2893 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2894 /* PTS may be not known if B frames are present */
2895 fprintf(f, " pts=");
2896 if (pkt->pts == AV_NOPTS_VALUE)
2897 fprintf(f, "N/A");
2898 else
2899 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2900 fprintf(f, "\n");
2901 fprintf(f, " size=%d\n", pkt->size);
2902 if (dump_payload)
2903 av_hex_dump(f, pkt->data, pkt->size);
2904 }
2905
2906 void url_split(char *proto, int proto_size,
2907 char *authorization, int authorization_size,
2908 char *hostname, int hostname_size,
2909 int *port_ptr,
2910 char *path, int path_size,
2911 const char *url)
2912 {
2913 const char *p;
2914 char *q;
2915 int port;
2916
2917 port = -1;
2918
2919 p = url;
2920 q = proto;
2921 while (*p != ':' && *p != '\0') {
2922 if ((q - proto) < proto_size - 1)
2923 *q++ = *p;
2924 p++;
2925 }
2926 if (proto_size > 0)
2927 *q = '\0';
2928 if (authorization_size > 0)
2929 authorization[0] = '\0';
2930 if (*p == '\0') {
2931 if (proto_size > 0)
2932 proto[0] = '\0';
2933 if (hostname_size > 0)
2934 hostname[0] = '\0';
2935 p = url;
2936 } else {
2937 char *at,*slash; // PETR: position of '@' character and '/' character
2938
2939 p++;
2940 if (*p == '/')
2941 p++;
2942 if (*p == '/')
2943 p++;
2944 at = strchr(p,'@'); // PETR: get the position of '@'
2945 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2946 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2947
2948 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2949
2950 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2951 if (*p == '@') { // PETR: passed '@'
2952 if (authorization_size > 0)
2953 *q = '\0';
2954 q = hostname;
2955 at = NULL;
2956 } else if (!at) { // PETR: hostname
2957 if ((q - hostname) < hostname_size - 1)
2958 *q++ = *p;
2959 } else {
2960 if ((q - authorization) < authorization_size - 1)
2961 *q++ = *p;
2962 }
2963 p++;
2964 }
2965 if (hostname_size > 0)
2966 *q = '\0';
2967 if (*p == ':') {
2968 p++;
2969 port = strtoul(p, (char **)&p, 10);
2970 }
2971 }
2972 if (port_ptr)
2973 *port_ptr = port;
2974 pstrcpy(path, path_size, p);
2975 }
2976
2977 /**
2978 * Set the pts for a given stream.
2979 *
2980 * @param s stream
2981 * @param pts_wrap_bits number of bits effectively used by the pts
2982 * (used for wrap control, 33 is the value for MPEG)
2983 * @param pts_num numerator to convert to seconds (MPEG: 1)
2984 * @param pts_den denominator to convert to seconds (MPEG: 90000)
2985 */
2986 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2987 int pts_num, int pts_den)
2988 {
2989 s->pts_wrap_bits = pts_wrap_bits;
2990 s->time_base.num = pts_num;
2991 s->time_base.den = pts_den;
2992 }
2993
2994 /* fraction handling */
2995
2996 /**
2997 * f = val + (num / den) + 0.5.
2998 *
2999 * 'num' is normalized so that it is such as 0 <= num < den.
3000 *
3001 * @param f fractional number
3002 * @param val integer value
3003 * @param num must be >= 0
3004 * @param den must be >= 1
3005 */
3006 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3007 {
3008 num += (den >> 1);
3009 if (num >= den) {
3010 val += num / den;
3011 num = num % den;
3012 }
3013 f->val = val;
3014 f->num = num;
3015 f->den = den;
3016 }
3017
3018 /**
3019 * Set f to (val + 0.5).
3020 */
3021 void av_frac_set(AVFrac *f, int64_t val)
3022 {
3023 f->val = val;
3024 f->num = f->den >> 1;
3025 }
3026
3027 /**
3028 * Fractionnal addition to f: f = f + (incr / f->den).
3029 *
3030 * @param f fractional number
3031 * @param incr increment, can be positive or negative
3032 */
3033 void av_frac_add(AVFrac *f, int64_t incr)
3034 {
3035 int64_t num, den;
3036
3037 num = f->num + incr;
3038 den = f->den;
3039 if (num < 0) {
3040 f->val += num / den;
3041 num = num % den;
3042 if (num < 0) {
3043 num += den;
3044 f->val--;
3045 }
3046 } else if (num >= den) {
3047 f->val += num / den;
3048 num = num % den;
3049 }
3050 f->num = num;
3051 }
3052
3053 /**
3054 * register a new image format
3055 * @param img_fmt Image format descriptor
3056 */
3057 void av_register_image_format(AVImageFormat *img_fmt)
3058 {
3059 AVImageFormat **p;
3060
3061 p = &first_image_format;
3062 while (*p != NULL) p = &(*p)->next;
3063 *p = img_fmt;
3064 img_fmt->next = NULL;
3065 }
3066
3067 /**
3068 * Guesses image format based on data in the image.
3069 */
3070 AVImageFormat *av_probe_image_format(AVProbeData *pd)
3071 {
3072 AVImageFormat *fmt1, *fmt;
3073 int score, score_max;
3074
3075 fmt = NULL;
3076 score_max = 0;
3077 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3078 if (fmt1->img_probe) {
3079 score = fmt1->img_probe(pd);
3080 if (score > score_max) {
3081 score_max = score;
3082 fmt = fmt1;
3083 }
3084 }
3085 }
3086 return fmt;
3087 }
3088
3089 /**
3090 * Guesses image format based on file name extensions.
3091 */
3092 AVImageFormat *guess_image_format(const char *filename)
3093 {
3094 AVImageFormat *fmt1;
3095
3096 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
3097 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
3098 return fmt1;
3099 }
3100 return NULL;
3101 }
3102
3103 /**
3104 * Read an image from a stream.
3105 * @param gb byte stream containing the image
3106 * @param fmt image format, NULL if probing is required
3107 */
3108 int av_read_image(ByteIOContext *pb, const char *filename,
3109 AVImageFormat *fmt,
3110 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
3111 {
3112 char buf[PROBE_BUF_SIZE];
3113 AVProbeData probe_data, *pd = &probe_data;
3114 offset_t pos;
3115 int ret;
3116
3117 if (!fmt) {
3118 pd->filename = filename;
3119 pd->buf = buf;
3120 pos = url_ftell(pb);
3121 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
3122 url_fseek(pb, pos, SEEK_SET);
3123 fmt = av_probe_image_format(pd);
3124 }
3125 if (!fmt)
3126 return AVERROR_NOFMT;
3127 ret = fmt->img_read(pb, alloc_cb, opaque);
3128 return ret;
3129 }
3130
3131 /**
3132 * Write an image to a stream.
3133 * @param pb byte stream for the image output
3134 * @param fmt image format
3135 * @param img image data and informations
3136 */
3137 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
3138 {
3139 return fmt->img_write(pb, img);
3140 }
3141