04e11e207020707760c82b28935cf0765bfe2fb4
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #include "avformat.h"
20
21 #undef NDEBUG
22 #include <assert.h>
23
24 AVInputFormat *first_iformat;
25 AVOutputFormat *first_oformat;
26 AVImageFormat *first_image_format;
27
28 void av_register_input_format(AVInputFormat *format)
29 {
30 AVInputFormat **p;
31 p = &first_iformat;
32 while (*p != NULL) p = &(*p)->next;
33 *p = format;
34 format->next = NULL;
35 }
36
37 void av_register_output_format(AVOutputFormat *format)
38 {
39 AVOutputFormat **p;
40 p = &first_oformat;
41 while (*p != NULL) p = &(*p)->next;
42 *p = format;
43 format->next = NULL;
44 }
45
46 int match_ext(const char *filename, const char *extensions)
47 {
48 const char *ext, *p;
49 char ext1[32], *q;
50
51 if(!filename)
52 return 0;
53
54 ext = strrchr(filename, '.');
55 if (ext) {
56 ext++;
57 p = extensions;
58 for(;;) {
59 q = ext1;
60 while (*p != '\0' && *p != ',')
61 *q++ = *p++;
62 *q = '\0';
63 if (!strcasecmp(ext1, ext))
64 return 1;
65 if (*p == '\0')
66 break;
67 p++;
68 }
69 }
70 return 0;
71 }
72
73 AVOutputFormat *guess_format(const char *short_name, const char *filename,
74 const char *mime_type)
75 {
76 AVOutputFormat *fmt, *fmt_found;
77 int score_max, score;
78
79 /* specific test for image sequences */
80 if (!short_name && filename &&
81 filename_number_test(filename) >= 0 &&
82 guess_image_format(filename)) {
83 return guess_format("image", NULL, NULL);
84 }
85
86 /* find the proper file type */
87 fmt_found = NULL;
88 score_max = 0;
89 fmt = first_oformat;
90 while (fmt != NULL) {
91 score = 0;
92 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
93 score += 100;
94 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
95 score += 10;
96 if (filename && fmt->extensions &&
97 match_ext(filename, fmt->extensions)) {
98 score += 5;
99 }
100 if (score > score_max) {
101 score_max = score;
102 fmt_found = fmt;
103 }
104 fmt = fmt->next;
105 }
106 return fmt_found;
107 }
108
109 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
110 const char *mime_type)
111 {
112 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
113
114 if (fmt) {
115 AVOutputFormat *stream_fmt;
116 char stream_format_name[64];
117
118 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
119 stream_fmt = guess_format(stream_format_name, NULL, NULL);
120
121 if (stream_fmt)
122 fmt = stream_fmt;
123 }
124
125 return fmt;
126 }
127
128 AVInputFormat *av_find_input_format(const char *short_name)
129 {
130 AVInputFormat *fmt;
131 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
132 if (!strcmp(fmt->name, short_name))
133 return fmt;
134 }
135 return NULL;
136 }
137
138 /* memory handling */
139
140 /**
141 * Default packet destructor
142 */
143 static void av_destruct_packet(AVPacket *pkt)
144 {
145 av_free(pkt->data);
146 pkt->data = NULL; pkt->size = 0;
147 }
148
149 /**
150 * Allocate the payload of a packet and intialized its fields to default values.
151 *
152 * @param pkt packet
153 * @param size wanted payload size
154 * @return 0 if OK. AVERROR_xxx otherwise.
155 */
156 int av_new_packet(AVPacket *pkt, int size)
157 {
158 void *data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
159 if (!data)
160 return AVERROR_NOMEM;
161 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
162
163 av_init_packet(pkt);
164 pkt->data = data;
165 pkt->size = size;
166 pkt->destruct = av_destruct_packet;
167 return 0;
168 }
169
170 /* This is a hack - the packet memory allocation stuff is broken. The
171 packet is allocated if it was not really allocated */
172 int av_dup_packet(AVPacket *pkt)
173 {
174 if (pkt->destruct != av_destruct_packet) {
175 uint8_t *data;
176 /* we duplicate the packet and don't forget to put the padding
177 again */
178 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
179 if (!data) {
180 return AVERROR_NOMEM;
181 }
182 memcpy(data, pkt->data, pkt->size);
183 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
184 pkt->data = data;
185 pkt->destruct = av_destruct_packet;
186 }
187 return 0;
188 }
189
190 /* fifo handling */
191
192 int fifo_init(FifoBuffer *f, int size)
193 {
194 f->buffer = av_malloc(size);
195 if (!f->buffer)
196 return -1;
197 f->end = f->buffer + size;
198 f->wptr = f->rptr = f->buffer;
199 return 0;
200 }
201
202 void fifo_free(FifoBuffer *f)
203 {
204 av_free(f->buffer);
205 }
206
207 int fifo_size(FifoBuffer *f, uint8_t *rptr)
208 {
209 int size;
210
211 if (f->wptr >= rptr) {
212 size = f->wptr - rptr;
213 } else {
214 size = (f->end - rptr) + (f->wptr - f->buffer);
215 }
216 return size;
217 }
218
219 /* get data from the fifo (return -1 if not enough data) */
220 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
221 {
222 uint8_t *rptr = *rptr_ptr;
223 int size, len;
224
225 if (f->wptr >= rptr) {
226 size = f->wptr - rptr;
227 } else {
228 size = (f->end - rptr) + (f->wptr - f->buffer);
229 }
230
231 if (size < buf_size)
232 return -1;
233 while (buf_size > 0) {
234 len = f->end - rptr;
235 if (len > buf_size)
236 len = buf_size;
237 memcpy(buf, rptr, len);
238 buf += len;
239 rptr += len;
240 if (rptr >= f->end)
241 rptr = f->buffer;
242 buf_size -= len;
243 }
244 *rptr_ptr = rptr;
245 return 0;
246 }
247
248 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
249 {
250 int len;
251 uint8_t *wptr;
252 wptr = *wptr_ptr;
253 while (size > 0) {
254 len = f->end - wptr;
255 if (len > size)
256 len = size;
257 memcpy(wptr, buf, len);
258 wptr += len;
259 if (wptr >= f->end)
260 wptr = f->buffer;
261 buf += len;
262 size -= len;
263 }
264 *wptr_ptr = wptr;
265 }
266
267 int filename_number_test(const char *filename)
268 {
269 char buf[1024];
270 if(!filename)
271 return -1;
272 return get_frame_filename(buf, sizeof(buf), filename, 1);
273 }
274
275 /* guess file format */
276 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
277 {
278 AVInputFormat *fmt1, *fmt;
279 int score, score_max;
280
281 fmt = NULL;
282 score_max = 0;
283 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
284 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
285 continue;
286 score = 0;
287 if (fmt1->read_probe) {
288 score = fmt1->read_probe(pd);
289 } else if (fmt1->extensions) {
290 if (match_ext(pd->filename, fmt1->extensions)) {
291 score = 50;
292 }
293 }
294 if (score > score_max) {
295 score_max = score;
296 fmt = fmt1;
297 }
298 }
299 return fmt;
300 }
301
302 /************************************************************/
303 /* input media file */
304
305 /**
306 * open a media file from an IO stream. 'fmt' must be specified.
307 */
308
309 static const char* format_to_name(void* ptr)
310 {
311 AVFormatContext* fc = (AVFormatContext*) ptr;
312 if(fc->iformat) return fc->iformat->name;
313 else if(fc->oformat) return fc->oformat->name;
314 else return "NULL";
315 }
316
317 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
318
319 AVFormatContext *av_alloc_format_context(void)
320 {
321 AVFormatContext *ic;
322 ic = av_mallocz(sizeof(AVFormatContext));
323 if (!ic) return ic;
324 ic->av_class = &av_format_context_class;
325 return ic;
326 }
327
328 int av_open_input_stream(AVFormatContext **ic_ptr,
329 ByteIOContext *pb, const char *filename,
330 AVInputFormat *fmt, AVFormatParameters *ap)
331 {
332 int err;
333 AVFormatContext *ic;
334
335 ic = av_alloc_format_context();
336 if (!ic) {
337 err = AVERROR_NOMEM;
338 goto fail;
339 }
340 ic->iformat = fmt;
341 if (pb)
342 ic->pb = *pb;
343 ic->duration = AV_NOPTS_VALUE;
344 ic->start_time = AV_NOPTS_VALUE;
345 pstrcpy(ic->filename, sizeof(ic->filename), filename);
346
347 /* allocate private data */
348 if (fmt->priv_data_size > 0) {
349 ic->priv_data = av_mallocz(fmt->priv_data_size);
350 if (!ic->priv_data) {
351 err = AVERROR_NOMEM;
352 goto fail;
353 }
354 } else {
355 ic->priv_data = NULL;
356 }
357
358 err = ic->iformat->read_header(ic, ap);
359 if (err < 0)
360 goto fail;
361
362 if (pb)
363 ic->data_offset = url_ftell(&ic->pb);
364
365 *ic_ptr = ic;
366 return 0;
367 fail:
368 if (ic) {
369 av_freep(&ic->priv_data);
370 }
371 av_free(ic);
372 *ic_ptr = NULL;
373 return err;
374 }
375
376 #define PROBE_BUF_SIZE 2048
377
378 /**
379 * Open a media file as input. The codec are not opened. Only the file
380 * header (if present) is read.
381 *
382 * @param ic_ptr the opened media file handle is put here
383 * @param filename filename to open.
384 * @param fmt if non NULL, force the file format to use
385 * @param buf_size optional buffer size (zero if default is OK)
386 * @param ap additionnal parameters needed when opening the file (NULL if default)
387 * @return 0 if OK. AVERROR_xxx otherwise.
388 */
389 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
390 AVInputFormat *fmt,
391 int buf_size,
392 AVFormatParameters *ap)
393 {
394 int err, must_open_file, file_opened;
395 uint8_t buf[PROBE_BUF_SIZE];
396 AVProbeData probe_data, *pd = &probe_data;
397 ByteIOContext pb1, *pb = &pb1;
398
399 file_opened = 0;
400 pd->filename = "";
401 if (filename)
402 pd->filename = filename;
403 pd->buf = buf;
404 pd->buf_size = 0;
405
406 if (!fmt) {
407 /* guess format if no file can be opened */
408 fmt = av_probe_input_format(pd, 0);
409 }
410
411 /* do not open file if the format does not need it. XXX: specific
412 hack needed to handle RTSP/TCP */
413 must_open_file = 1;
414 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
415 must_open_file = 0;
416 }
417
418 if (!fmt || must_open_file) {
419 /* if no file needed do not try to open one */
420 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
421 err = AVERROR_IO;
422 goto fail;
423 }
424 file_opened = 1;
425 if (buf_size > 0) {
426 url_setbufsize(pb, buf_size);
427 }
428 if (!fmt) {
429 /* read probe data */
430 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
431 url_fseek(pb, 0, SEEK_SET);
432 }
433 }
434
435 /* guess file format */
436 if (!fmt) {
437 fmt = av_probe_input_format(pd, 1);
438 }
439
440 /* if still no format found, error */
441 if (!fmt) {
442 err = AVERROR_NOFMT;
443 goto fail;
444 }
445
446 /* XXX: suppress this hack for redirectors */
447 #ifdef CONFIG_NETWORK
448 if (fmt == &redir_demux) {
449 err = redir_open(ic_ptr, pb);
450 url_fclose(pb);
451 return err;
452 }
453 #endif
454
455 /* check filename in case of an image number is expected */
456 if (fmt->flags & AVFMT_NEEDNUMBER) {
457 if (filename_number_test(filename) < 0) {
458 err = AVERROR_NUMEXPECTED;
459 goto fail;
460 }
461 }
462 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
463 if (err)
464 goto fail;
465 return 0;
466 fail:
467 if (file_opened)
468 url_fclose(pb);
469 *ic_ptr = NULL;
470 return err;
471
472 }
473
474 /*******************************************************/
475
476 /**
477 * Read a transport packet from a media file. This function is
478 * absolete and should never be used. Use av_read_frame() instead.
479 *
480 * @param s media file handle
481 * @param pkt is filled
482 * @return 0 if OK. AVERROR_xxx if error.
483 */
484 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
485 {
486 return s->iformat->read_packet(s, pkt);
487 }
488
489 /**********************************************************/
490
491 /* get the number of samples of an audio frame. Return (-1) if error */
492 static int get_audio_frame_size(AVCodecContext *enc, int size)
493 {
494 int frame_size;
495
496 if (enc->frame_size <= 1) {
497 /* specific hack for pcm codecs because no frame size is
498 provided */
499 switch(enc->codec_id) {
500 case CODEC_ID_PCM_S16LE:
501 case CODEC_ID_PCM_S16BE:
502 case CODEC_ID_PCM_U16LE:
503 case CODEC_ID_PCM_U16BE:
504 if (enc->channels == 0)
505 return -1;
506 frame_size = size / (2 * enc->channels);
507 break;
508 case CODEC_ID_PCM_S8:
509 case CODEC_ID_PCM_U8:
510 case CODEC_ID_PCM_MULAW:
511 case CODEC_ID_PCM_ALAW:
512 if (enc->channels == 0)
513 return -1;
514 frame_size = size / (enc->channels);
515 break;
516 default:
517 /* used for example by ADPCM codecs */
518 if (enc->bit_rate == 0)
519 return -1;
520 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
521 break;
522 }
523 } else {
524 frame_size = enc->frame_size;
525 }
526 return frame_size;
527 }
528
529
530 /* return the frame duration in seconds, return 0 if not available */
531 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
532 AVCodecParserContext *pc, AVPacket *pkt)
533 {
534 int frame_size;
535
536 *pnum = 0;
537 *pden = 0;
538 switch(st->codec.codec_type) {
539 case CODEC_TYPE_VIDEO:
540 *pnum = st->codec.frame_rate_base;
541 *pden = st->codec.frame_rate;
542 if (pc && pc->repeat_pict) {
543 *pden *= 2;
544 *pnum = (*pnum) * (2 + pc->repeat_pict);
545 }
546 break;
547 case CODEC_TYPE_AUDIO:
548 frame_size = get_audio_frame_size(&st->codec, pkt->size);
549 if (frame_size < 0)
550 break;
551 *pnum = frame_size;
552 *pden = st->codec.sample_rate;
553 break;
554 default:
555 break;
556 }
557 }
558
559 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
560 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
561 int64_t delta= last_ts - mask/2;
562 return ((lsb - delta)&mask) + delta;
563 }
564
565 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
566 AVCodecParserContext *pc, AVPacket *pkt)
567 {
568 int num, den, presentation_delayed;
569
570 /* handle wrapping */
571 if(st->cur_dts != AV_NOPTS_VALUE){
572 if(pkt->pts != AV_NOPTS_VALUE)
573 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
574 if(pkt->dts != AV_NOPTS_VALUE)
575 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
576 }
577
578 if (pkt->duration == 0) {
579 compute_frame_duration(&num, &den, st, pc, pkt);
580 if (den && num) {
581 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
582 }
583 }
584
585 /* do we have a video B frame ? */
586 presentation_delayed = 0;
587 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
588 /* XXX: need has_b_frame, but cannot get it if the codec is
589 not initialized */
590 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
591 st->codec.codec_id == CODEC_ID_MPEG2VIDEO ||
592 st->codec.codec_id == CODEC_ID_MPEG4 ||
593 st->codec.codec_id == CODEC_ID_H264) &&
594 pc && pc->pict_type != FF_B_TYPE)
595 presentation_delayed = 1;
596 /* this may be redundant, but it shouldnt hurt */
597 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
598 presentation_delayed = 1;
599 }
600
601 if(st->cur_dts == AV_NOPTS_VALUE){
602 if(presentation_delayed) st->cur_dts = -pkt->duration;
603 else st->cur_dts = 0;
604 }
605
606 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
607 /* interpolate PTS and DTS if they are not present */
608 if (presentation_delayed) {
609 /* DTS = decompression time stamp */
610 /* PTS = presentation time stamp */
611 if (pkt->dts == AV_NOPTS_VALUE) {
612 /* if we know the last pts, use it */
613 if(st->last_IP_pts != AV_NOPTS_VALUE)
614 st->cur_dts = pkt->dts = st->last_IP_pts;
615 else
616 pkt->dts = st->cur_dts;
617 } else {
618 st->cur_dts = pkt->dts;
619 }
620 /* this is tricky: the dts must be incremented by the duration
621 of the frame we are displaying, i.e. the last I or P frame */
622 if (st->last_IP_duration == 0)
623 st->cur_dts += pkt->duration;
624 else
625 st->cur_dts += st->last_IP_duration;
626 st->last_IP_duration = pkt->duration;
627 st->last_IP_pts= pkt->pts;
628 /* cannot compute PTS if not present (we can compute it only
629 by knowing the futur */
630 } else {
631 /* presentation is not delayed : PTS and DTS are the same */
632 if (pkt->pts == AV_NOPTS_VALUE) {
633 if (pkt->dts == AV_NOPTS_VALUE) {
634 pkt->pts = st->cur_dts;
635 pkt->dts = st->cur_dts;
636 }
637 else {
638 st->cur_dts = pkt->dts;
639 pkt->pts = pkt->dts;
640 }
641 } else {
642 st->cur_dts = pkt->pts;
643 pkt->dts = pkt->pts;
644 }
645 st->cur_dts += pkt->duration;
646 }
647 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
648
649 /* update flags */
650 if (pc) {
651 pkt->flags = 0;
652 /* key frame computation */
653 switch(st->codec.codec_type) {
654 case CODEC_TYPE_VIDEO:
655 if (pc->pict_type == FF_I_TYPE)
656 pkt->flags |= PKT_FLAG_KEY;
657 break;
658 case CODEC_TYPE_AUDIO:
659 pkt->flags |= PKT_FLAG_KEY;
660 break;
661 default:
662 break;
663 }
664 }
665
666 /* convert the packet time stamp units */
667 if(pkt->pts != AV_NOPTS_VALUE)
668 pkt->pts = av_rescale(pkt->pts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
669 if(pkt->dts != AV_NOPTS_VALUE)
670 pkt->dts = av_rescale(pkt->dts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
671
672 /* duration field */
673 pkt->duration = av_rescale(pkt->duration, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
674 }
675
676 static void av_destruct_packet_nofree(AVPacket *pkt)
677 {
678 pkt->data = NULL; pkt->size = 0;
679 }
680
681 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
682 {
683 AVStream *st;
684 int len, ret, i;
685
686 for(;;) {
687 /* select current input stream component */
688 st = s->cur_st;
689 if (st) {
690 if (!st->parser) {
691 /* no parsing needed: we just output the packet as is */
692 /* raw data support */
693 *pkt = s->cur_pkt;
694 compute_pkt_fields(s, st, NULL, pkt);
695 s->cur_st = NULL;
696 return 0;
697 } else if (s->cur_len > 0) {
698 len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size,
699 s->cur_ptr, s->cur_len,
700 s->cur_pkt.pts, s->cur_pkt.dts);
701 s->cur_pkt.pts = AV_NOPTS_VALUE;
702 s->cur_pkt.dts = AV_NOPTS_VALUE;
703 /* increment read pointer */
704 s->cur_ptr += len;
705 s->cur_len -= len;
706
707 /* return packet if any */
708 if (pkt->size) {
709 got_packet:
710 pkt->duration = 0;
711 pkt->stream_index = st->index;
712 pkt->pts = st->parser->pts;
713 pkt->dts = st->parser->dts;
714 pkt->destruct = av_destruct_packet_nofree;
715 compute_pkt_fields(s, st, st->parser, pkt);
716 return 0;
717 }
718 } else {
719 /* free packet */
720 av_free_packet(&s->cur_pkt);
721 s->cur_st = NULL;
722 }
723 } else {
724 /* read next packet */
725 ret = av_read_packet(s, &s->cur_pkt);
726 if (ret < 0) {
727 if (ret == -EAGAIN)
728 return ret;
729 /* return the last frames, if any */
730 for(i = 0; i < s->nb_streams; i++) {
731 st = s->streams[i];
732 if (st->parser) {
733 av_parser_parse(st->parser, &st->codec,
734 &pkt->data, &pkt->size,
735 NULL, 0,
736 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
737 if (pkt->size)
738 goto got_packet;
739 }
740 }
741 /* no more packets: really terminates parsing */
742 return ret;
743 }
744
745 st = s->streams[s->cur_pkt.stream_index];
746
747 s->cur_st = st;
748 s->cur_ptr = s->cur_pkt.data;
749 s->cur_len = s->cur_pkt.size;
750 if (st->need_parsing && !st->parser) {
751 st->parser = av_parser_init(st->codec.codec_id);
752 if (!st->parser) {
753 /* no parser available : just output the raw packets */
754 st->need_parsing = 0;
755 }
756 }
757 }
758 }
759 }
760
761 /**
762 * Return the next frame of a stream. The returned packet is valid
763 * until the next av_read_frame() or until av_close_input_file() and
764 * must be freed with av_free_packet. For video, the packet contains
765 * exactly one frame. For audio, it contains an integer number of
766 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
767 * data). If the audio frames have a variable size (e.g. MPEG audio),
768 * then it contains one frame.
769 *
770 * pkt->pts, pkt->dts and pkt->duration are always set to correct
771 * values in AV_TIME_BASE unit (and guessed if the format cannot
772 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
773 * has B frames, so it is better to rely on pkt->dts if you do not
774 * decompress the payload.
775 *
776 * Return 0 if OK, < 0 if error or end of file.
777 */
778 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
779 {
780 AVPacketList *pktl;
781
782 pktl = s->packet_buffer;
783 if (pktl) {
784 /* read packet from packet buffer, if there is data */
785 *pkt = pktl->pkt;
786 s->packet_buffer = pktl->next;
787 av_free(pktl);
788 return 0;
789 } else {
790 return av_read_frame_internal(s, pkt);
791 }
792 }
793
794 /* XXX: suppress the packet queue */
795 static void flush_packet_queue(AVFormatContext *s)
796 {
797 AVPacketList *pktl;
798
799 for(;;) {
800 pktl = s->packet_buffer;
801 if (!pktl)
802 break;
803 s->packet_buffer = pktl->next;
804 av_free_packet(&pktl->pkt);
805 av_free(pktl);
806 }
807 }
808
809 /*******************************************************/
810 /* seek support */
811
812 int av_find_default_stream_index(AVFormatContext *s)
813 {
814 int i;
815 AVStream *st;
816
817 if (s->nb_streams <= 0)
818 return -1;
819 for(i = 0; i < s->nb_streams; i++) {
820 st = s->streams[i];
821 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
822 return i;
823 }
824 }
825 return 0;
826 }
827
828 /* flush the frame reader */
829 static void av_read_frame_flush(AVFormatContext *s)
830 {
831 AVStream *st;
832 int i;
833
834 flush_packet_queue(s);
835
836 /* free previous packet */
837 if (s->cur_st) {
838 if (s->cur_st->parser)
839 av_free_packet(&s->cur_pkt);
840 s->cur_st = NULL;
841 }
842 /* fail safe */
843 s->cur_ptr = NULL;
844 s->cur_len = 0;
845
846 /* for each stream, reset read state */
847 for(i = 0; i < s->nb_streams; i++) {
848 st = s->streams[i];
849
850 if (st->parser) {
851 av_parser_close(st->parser);
852 st->parser = NULL;
853 }
854 st->last_IP_pts = AV_NOPTS_VALUE;
855 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
856 }
857 }
858
859 /**
860 * add a index entry into a sorted list updateing if it is already there.
861 * @param timestamp timestamp in the timebase of the given stream
862 */
863 int av_add_index_entry(AVStream *st,
864 int64_t pos, int64_t timestamp, int distance, int flags)
865 {
866 AVIndexEntry *entries, *ie;
867 int index;
868
869 entries = av_fast_realloc(st->index_entries,
870 &st->index_entries_allocated_size,
871 (st->nb_index_entries + 1) *
872 sizeof(AVIndexEntry));
873 st->index_entries= entries;
874
875 if(st->nb_index_entries){
876 index= av_index_search_timestamp(st, timestamp);
877 ie= &entries[index];
878
879 if(ie->timestamp != timestamp){
880 if(ie->timestamp < timestamp){
881 index++; //index points to next instead of previous entry, maybe nonexistant
882 ie= &st->index_entries[index];
883 }else
884 assert(index==0);
885
886 if(index != st->nb_index_entries){
887 assert(index < st->nb_index_entries);
888 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
889 }
890 st->nb_index_entries++;
891 }else{
892 if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
893 distance= ie->min_distance;
894 }
895 }else{
896 index= st->nb_index_entries++;
897 ie= &entries[index];
898 }
899
900 ie->pos = pos;
901 ie->timestamp = timestamp;
902 ie->min_distance= distance;
903 ie->flags = flags;
904
905 return index;
906 }
907
908 /* build an index for raw streams using a parser */
909 static void av_build_index_raw(AVFormatContext *s)
910 {
911 AVPacket pkt1, *pkt = &pkt1;
912 int ret;
913 AVStream *st;
914
915 st = s->streams[0];
916 av_read_frame_flush(s);
917 url_fseek(&s->pb, s->data_offset, SEEK_SET);
918
919 for(;;) {
920 ret = av_read_frame(s, pkt);
921 if (ret < 0)
922 break;
923 if (pkt->stream_index == 0 && st->parser &&
924 (pkt->flags & PKT_FLAG_KEY)) {
925 int64_t dts= av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
926 av_add_index_entry(st, st->parser->frame_offset, dts,
927 0, AVINDEX_KEYFRAME);
928 }
929 av_free_packet(pkt);
930 }
931 }
932
933 /* return TRUE if we deal with a raw stream (raw codec data and
934 parsing needed) */
935 static int is_raw_stream(AVFormatContext *s)
936 {
937 AVStream *st;
938
939 if (s->nb_streams != 1)
940 return 0;
941 st = s->streams[0];
942 if (!st->need_parsing)
943 return 0;
944 return 1;
945 }
946
947 /* return the largest index entry whose timestamp is <=
948 wanted_timestamp */
949 int av_index_search_timestamp(AVStream *st, int wanted_timestamp)
950 {
951 AVIndexEntry *entries= st->index_entries;
952 int nb_entries= st->nb_index_entries;
953 int a, b, m;
954 int64_t timestamp;
955
956 if (nb_entries <= 0)
957 return -1;
958
959 a = 0;
960 b = nb_entries - 1;
961
962 while (a < b) {
963 m = (a + b + 1) >> 1;
964 timestamp = entries[m].timestamp;
965 if (timestamp > wanted_timestamp) {
966 b = m - 1;
967 } else {
968 a = m;
969 }
970 }
971 return a;
972 }
973
974 #define DEBUG_SEEK
975
976 /**
977 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
978 * this isnt supposed to be called directly by a user application, but by demuxers
979 * @param target_ts target timestamp in the time base of the given stream
980 * @param stream_index stream number
981 */
982 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts){
983 AVInputFormat *avif= s->iformat;
984 int64_t pos_min, pos_max, pos, pos_limit;
985 int64_t ts_min, ts_max, ts;
986 int64_t start_pos;
987 int index, no_change, i;
988 AVStream *st;
989
990 if (stream_index < 0)
991 return -1;
992
993 #ifdef DEBUG_SEEK
994 av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
995 #endif
996
997 ts_max=
998 ts_min= AV_NOPTS_VALUE;
999 pos_limit= -1; //gcc falsely says it may be uninitalized
1000
1001 st= s->streams[stream_index];
1002 if(st->index_entries){
1003 AVIndexEntry *e;
1004
1005 index= av_index_search_timestamp(st, target_ts);
1006 e= &st->index_entries[index];
1007
1008 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1009 pos_min= e->pos;
1010 ts_min= e->timestamp;
1011 #ifdef DEBUG_SEEK
1012 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n",
1013 pos_min,ts_min);
1014 #endif
1015 }else{
1016 assert(index==0);
1017 }
1018 index++;
1019 if(index < st->nb_index_entries){
1020 e= &st->index_entries[index];
1021 assert(e->timestamp >= target_ts);
1022 pos_max= e->pos;
1023 ts_max= e->timestamp;
1024 pos_limit= pos_max - e->min_distance;
1025 #ifdef DEBUG_SEEK
1026 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
1027 pos_max,pos_limit, ts_max);
1028 #endif
1029 }
1030 }
1031
1032 if(ts_min == AV_NOPTS_VALUE){
1033 pos_min = s->data_offset;
1034 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1035 if (ts_min == AV_NOPTS_VALUE)
1036 return -1;
1037 }
1038
1039 if(ts_max == AV_NOPTS_VALUE){
1040 int step= 1024;
1041 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1042 do{
1043 pos_max -= step;
1044 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1045 step += step;
1046 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1047 if (ts_max == AV_NOPTS_VALUE)
1048 return -1;
1049
1050 for(;;){
1051 int64_t tmp_pos= pos_max + 1;
1052 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1053 if(tmp_ts == AV_NOPTS_VALUE)
1054 break;
1055 ts_max= tmp_ts;
1056 pos_max= tmp_pos;
1057 }
1058 pos_limit= pos_max;
1059 }
1060
1061 no_change=0;
1062 while (pos_min < pos_limit) {
1063 #ifdef DEBUG_SEEK
1064 av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
1065 pos_min, pos_max,
1066 ts_min, ts_max);
1067 #endif
1068 assert(pos_limit <= pos_max);
1069
1070 if(no_change==0){
1071 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1072 // interpolate position (better than dichotomy)
1073 pos = (int64_t)((double)(pos_max - pos_min) *
1074 (double)(target_ts - ts_min) /
1075 (double)(ts_max - ts_min)) + pos_min - approximate_keyframe_distance;
1076 }else if(no_change==1){
1077 // bisection, if interpolation failed to change min or max pos last time
1078 pos = (pos_min + pos_limit)>>1;
1079 }else{
1080 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1081 pos=pos_min;
1082 }
1083 if(pos <= pos_min)
1084 pos= pos_min + 1;
1085 else if(pos > pos_limit)
1086 pos= pos_limit;
1087 start_pos= pos;
1088
1089 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1090 if(pos == pos_max)
1091 no_change++;
1092 else
1093 no_change=0;
1094 #ifdef DEBUG_SEEK
1095 av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1096 #endif
1097 assert(ts != AV_NOPTS_VALUE);
1098 if (target_ts < ts) {
1099 pos_limit = start_pos - 1;
1100 pos_max = pos;
1101 ts_max = ts;
1102 } else {
1103 pos_min = pos;
1104 ts_min = ts;
1105 /* check if we are lucky */
1106 if (target_ts == ts)
1107 break;
1108 }
1109 }
1110
1111 pos = pos_min;
1112 #ifdef DEBUG_SEEK
1113 pos_min = pos;
1114 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1115 pos_min++;
1116 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1117 av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
1118 pos, ts_min, target_ts, ts_max);
1119 #endif
1120 /* do the seek */
1121 url_fseek(&s->pb, pos, SEEK_SET);
1122
1123 ts= av_rescale(ts_min, AV_TIME_BASE*(int64_t)st->time_base.num, st->time_base.den);
1124 for(i = 0; i < s->nb_streams; i++) {
1125 st = s->streams[i];
1126
1127 st->cur_dts = av_rescale(ts, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
1128 }
1129
1130 return 0;
1131 }
1132
1133 static int av_seek_frame_generic(AVFormatContext *s,
1134 int stream_index, int64_t timestamp)
1135 {
1136 int index, i;
1137 AVStream *st;
1138 AVIndexEntry *ie;
1139
1140 if (!s->index_built) {
1141 if (is_raw_stream(s)) {
1142 av_build_index_raw(s);
1143 } else {
1144 return -1;
1145 }
1146 s->index_built = 1;
1147 }
1148
1149 st = s->streams[stream_index];
1150 index = av_index_search_timestamp(st, timestamp);
1151 if (index < 0)
1152 return -1;
1153
1154 /* now we have found the index, we can seek */
1155 ie = &st->index_entries[index];
1156 av_read_frame_flush(s);
1157 url_fseek(&s->pb, ie->pos, SEEK_SET);
1158
1159 timestamp= av_rescale(ie->timestamp, AV_TIME_BASE*(int64_t)st->time_base.num, st->time_base.den);
1160 for(i = 0; i < s->nb_streams; i++) {
1161 st = s->streams[i];
1162
1163 st->cur_dts = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
1164 }
1165
1166 return 0;
1167 }
1168
1169 /**
1170 * Seek to the key frame just before the frame at timestamp
1171 * 'timestamp' in 'stream_index'.
1172 * @param stream_index If stream_index is (-1), a default
1173 * stream is selected
1174 * @param timestamp timestamp in AV_TIME_BASE units
1175 * @return >= 0 on success
1176 */
1177 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp)
1178 {
1179 int ret;
1180 AVStream *st;
1181
1182 av_read_frame_flush(s);
1183
1184 if(stream_index < 0){
1185 stream_index= av_find_default_stream_index(s);
1186 if(stream_index < 0)
1187 return -1;
1188 }
1189 st= s->streams[stream_index];
1190
1191 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1192
1193 /* first, we try the format specific seek */
1194 if (s->iformat->read_seek)
1195 ret = s->iformat->read_seek(s, stream_index, timestamp);
1196 else
1197 ret = -1;
1198 if (ret >= 0) {
1199 return 0;
1200 }
1201
1202 if(s->iformat->read_timestamp)
1203 return av_seek_frame_binary(s, stream_index, timestamp);
1204 else
1205 return av_seek_frame_generic(s, stream_index, timestamp);
1206 }
1207
1208 /*******************************************************/
1209
1210 /* return TRUE if the stream has accurate timings for at least one component */
1211 static int av_has_timings(AVFormatContext *ic)
1212 {
1213 int i;
1214 AVStream *st;
1215
1216 for(i = 0;i < ic->nb_streams; i++) {
1217 st = ic->streams[i];
1218 if (st->start_time != AV_NOPTS_VALUE &&
1219 st->duration != AV_NOPTS_VALUE)
1220 return 1;
1221 }
1222 return 0;
1223 }
1224
1225 /* estimate the stream timings from the one of each components. Also
1226 compute the global bitrate if possible */
1227 static void av_update_stream_timings(AVFormatContext *ic)
1228 {
1229 int64_t start_time, end_time, end_time1;
1230 int i;
1231 AVStream *st;
1232
1233 start_time = MAXINT64;
1234 end_time = MININT64;
1235 for(i = 0;i < ic->nb_streams; i++) {
1236 st = ic->streams[i];
1237 if (st->start_time != AV_NOPTS_VALUE) {
1238 if (st->start_time < start_time)
1239 start_time = st->start_time;
1240 if (st->duration != AV_NOPTS_VALUE) {
1241 end_time1 = st->start_time + st->duration;
1242 if (end_time1 > end_time)
1243 end_time = end_time1;
1244 }
1245 }
1246 }
1247 if (start_time != MAXINT64) {
1248 ic->start_time = start_time;
1249 if (end_time != MAXINT64) {
1250 ic->duration = end_time - start_time;
1251 if (ic->file_size > 0) {
1252 /* compute the bit rate */
1253 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1254 (double)ic->duration;
1255 }
1256 }
1257 }
1258
1259 }
1260
1261 static void fill_all_stream_timings(AVFormatContext *ic)
1262 {
1263 int i;
1264 AVStream *st;
1265
1266 av_update_stream_timings(ic);
1267 for(i = 0;i < ic->nb_streams; i++) {
1268 st = ic->streams[i];
1269 if (st->start_time == AV_NOPTS_VALUE) {
1270 st->start_time = ic->start_time;
1271 st->duration = ic->duration;
1272 }
1273 }
1274 }
1275
1276 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1277 {
1278 int64_t filesize, duration;
1279 int bit_rate, i;
1280 AVStream *st;
1281
1282 /* if bit_rate is already set, we believe it */
1283 if (ic->bit_rate == 0) {
1284 bit_rate = 0;
1285 for(i=0;i<ic->nb_streams;i++) {
1286 st = ic->streams[i];
1287 bit_rate += st->codec.bit_rate;
1288 }
1289 ic->bit_rate = bit_rate;
1290 }
1291
1292 /* if duration is already set, we believe it */
1293 if (ic->duration == AV_NOPTS_VALUE &&
1294 ic->bit_rate != 0 &&
1295 ic->file_size != 0) {
1296 filesize = ic->file_size;
1297 if (filesize > 0) {
1298 duration = (int64_t)((8 * AV_TIME_BASE * (double)filesize) / (double)ic->bit_rate);
1299 for(i = 0; i < ic->nb_streams; i++) {
1300 st = ic->streams[i];
1301 if (st->start_time == AV_NOPTS_VALUE ||
1302 st->duration == AV_NOPTS_VALUE) {
1303 st->start_time = 0;
1304 st->duration = duration;
1305 }
1306 }
1307 }
1308 }
1309 }
1310
1311 #define DURATION_MAX_READ_SIZE 250000
1312
1313 /* only usable for MPEG-PS streams */
1314 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1315 {
1316 AVPacket pkt1, *pkt = &pkt1;
1317 AVStream *st;
1318 int read_size, i, ret;
1319 int64_t start_time, end_time, end_time1;
1320 int64_t filesize, offset, duration;
1321
1322 /* free previous packet */
1323 if (ic->cur_st && ic->cur_st->parser)
1324 av_free_packet(&ic->cur_pkt);
1325 ic->cur_st = NULL;
1326
1327 /* flush packet queue */
1328 flush_packet_queue(ic);
1329
1330 for(i=0;i<ic->nb_streams;i++) {
1331 st = ic->streams[i];
1332 if (st->parser) {
1333 av_parser_close(st->parser);
1334 st->parser= NULL;
1335 }
1336 }
1337
1338 /* we read the first packets to get the first PTS (not fully
1339 accurate, but it is enough now) */
1340 url_fseek(&ic->pb, 0, SEEK_SET);
1341 read_size = 0;
1342 for(;;) {
1343 if (read_size >= DURATION_MAX_READ_SIZE)
1344 break;
1345 /* if all info is available, we can stop */
1346 for(i = 0;i < ic->nb_streams; i++) {
1347 st = ic->streams[i];
1348 if (st->start_time == AV_NOPTS_VALUE)
1349 break;
1350 }
1351 if (i == ic->nb_streams)
1352 break;
1353
1354 ret = av_read_packet(ic, pkt);
1355 if (ret != 0)
1356 break;
1357 read_size += pkt->size;
1358 st = ic->streams[pkt->stream_index];
1359 if (pkt->pts != AV_NOPTS_VALUE) {
1360 if (st->start_time == AV_NOPTS_VALUE)
1361 st->start_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
1362 }
1363 av_free_packet(pkt);
1364 }
1365
1366 /* we compute the minimum start_time and use it as default */
1367 start_time = MAXINT64;
1368 for(i = 0; i < ic->nb_streams; i++) {
1369 st = ic->streams[i];
1370 if (st->start_time != AV_NOPTS_VALUE &&
1371 st->start_time < start_time)
1372 start_time = st->start_time;
1373 }
1374 if (start_time != MAXINT64)
1375 ic->start_time = start_time;
1376
1377 /* estimate the end time (duration) */
1378 /* XXX: may need to support wrapping */
1379 filesize = ic->file_size;
1380 offset = filesize - DURATION_MAX_READ_SIZE;
1381 if (offset < 0)
1382 offset = 0;
1383
1384 url_fseek(&ic->pb, offset, SEEK_SET);
1385 read_size = 0;
1386 for(;;) {
1387 if (read_size >= DURATION_MAX_READ_SIZE)
1388 break;
1389 /* if all info is available, we can stop */
1390 for(i = 0;i < ic->nb_streams; i++) {
1391 st = ic->streams[i];
1392 if (st->duration == AV_NOPTS_VALUE)
1393 break;
1394 }
1395 if (i == ic->nb_streams)
1396 break;
1397
1398 ret = av_read_packet(ic, pkt);
1399 if (ret != 0)
1400 break;
1401 read_size += pkt->size;
1402 st = ic->streams[pkt->stream_index];
1403 if (pkt->pts != AV_NOPTS_VALUE) {
1404 end_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
1405 duration = end_time - st->start_time;
1406 if (duration > 0) {
1407 if (st->duration == AV_NOPTS_VALUE ||
1408 st->duration < duration)
1409 st->duration = duration;
1410 }
1411 }
1412 av_free_packet(pkt);
1413 }
1414
1415 /* estimate total duration */
1416 end_time = MININT64;
1417 for(i = 0;i < ic->nb_streams; i++) {
1418 st = ic->streams[i];
1419 if (st->duration != AV_NOPTS_VALUE) {
1420 end_time1 = st->start_time + st->duration;
1421 if (end_time1 > end_time)
1422 end_time = end_time1;
1423 }
1424 }
1425
1426 /* update start_time (new stream may have been created, so we do
1427 it at the end */
1428 if (ic->start_time != AV_NOPTS_VALUE) {
1429 for(i = 0; i < ic->nb_streams; i++) {
1430 st = ic->streams[i];
1431 if (st->start_time == AV_NOPTS_VALUE)
1432 st->start_time = ic->start_time;
1433 }
1434 }
1435
1436 if (end_time != MININT64) {
1437 /* put dummy values for duration if needed */
1438 for(i = 0;i < ic->nb_streams; i++) {
1439 st = ic->streams[i];
1440 if (st->duration == AV_NOPTS_VALUE &&
1441 st->start_time != AV_NOPTS_VALUE)
1442 st->duration = end_time - st->start_time;
1443 }
1444 ic->duration = end_time - ic->start_time;
1445 }
1446
1447 url_fseek(&ic->pb, 0, SEEK_SET);
1448 }
1449
1450 static void av_estimate_timings(AVFormatContext *ic)
1451 {
1452 URLContext *h;
1453 int64_t file_size;
1454
1455 /* get the file size, if possible */
1456 if (ic->iformat->flags & AVFMT_NOFILE) {
1457 file_size = 0;
1458 } else {
1459 h = url_fileno(&ic->pb);
1460 file_size = url_filesize(h);
1461 if (file_size < 0)
1462 file_size = 0;
1463 }
1464 ic->file_size = file_size;
1465
1466 if (ic->iformat == &mpegps_demux) {
1467 /* get accurate estimate from the PTSes */
1468 av_estimate_timings_from_pts(ic);
1469 } else if (av_has_timings(ic)) {
1470 /* at least one components has timings - we use them for all
1471 the components */
1472 fill_all_stream_timings(ic);
1473 } else {
1474 /* less precise: use bit rate info */
1475 av_estimate_timings_from_bit_rate(ic);
1476 }
1477 av_update_stream_timings(ic);
1478
1479 #if 0
1480 {
1481 int i;
1482 AVStream *st;
1483 for(i = 0;i < ic->nb_streams; i++) {
1484 st = ic->streams[i];
1485 printf("%d: start_time: %0.3f duration: %0.3f\n",
1486 i, (double)st->start_time / AV_TIME_BASE,
1487 (double)st->duration / AV_TIME_BASE);
1488 }
1489 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1490 (double)ic->start_time / AV_TIME_BASE,
1491 (double)ic->duration / AV_TIME_BASE,
1492 ic->bit_rate / 1000);
1493 }
1494 #endif
1495 }
1496
1497 static int has_codec_parameters(AVCodecContext *enc)
1498 {
1499 int val;
1500 switch(enc->codec_type) {
1501 case CODEC_TYPE_AUDIO:
1502 val = enc->sample_rate;
1503 break;
1504 case CODEC_TYPE_VIDEO:
1505 val = enc->width;
1506 break;
1507 default:
1508 val = 1;
1509 break;
1510 }
1511 return (val != 0);
1512 }
1513
1514 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1515 {
1516 int16_t *samples;
1517 AVCodec *codec;
1518 int got_picture, ret;
1519 AVFrame picture;
1520
1521 codec = avcodec_find_decoder(st->codec.codec_id);
1522 if (!codec)
1523 return -1;
1524 ret = avcodec_open(&st->codec, codec);
1525 if (ret < 0)
1526 return ret;
1527 switch(st->codec.codec_type) {
1528 case CODEC_TYPE_VIDEO:
1529 ret = avcodec_decode_video(&st->codec, &picture,
1530 &got_picture, (uint8_t *)data, size);
1531 break;
1532 case CODEC_TYPE_AUDIO:
1533 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1534 if (!samples)
1535 goto fail;
1536 ret = avcodec_decode_audio(&st->codec, samples,
1537 &got_picture, (uint8_t *)data, size);
1538 av_free(samples);
1539 break;
1540 default:
1541 break;
1542 }
1543 fail:
1544 avcodec_close(&st->codec);
1545 return ret;
1546 }
1547
1548 /* absolute maximum size we read until we abort */
1549 #define MAX_READ_SIZE 5000000
1550
1551 /* maximum duration until we stop analysing the stream */
1552 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 1.0))
1553
1554 /**
1555 * Read the beginning of a media file to get stream information. This
1556 * is useful for file formats with no headers such as MPEG. This
1557 * function also compute the real frame rate in case of mpeg2 repeat
1558 * frame mode.
1559 *
1560 * @param ic media file handle
1561 * @return >=0 if OK. AVERROR_xxx if error.
1562 */
1563 int av_find_stream_info(AVFormatContext *ic)
1564 {
1565 int i, count, ret, read_size;
1566 AVStream *st;
1567 AVPacket pkt1, *pkt;
1568 AVPacketList *pktl=NULL, **ppktl;
1569
1570 count = 0;
1571 read_size = 0;
1572 ppktl = &ic->packet_buffer;
1573 for(;;) {
1574 /* check if one codec still needs to be handled */
1575 for(i=0;i<ic->nb_streams;i++) {
1576 st = ic->streams[i];
1577 if (!has_codec_parameters(&st->codec))
1578 break;
1579 }
1580 if (i == ic->nb_streams) {
1581 /* NOTE: if the format has no header, then we need to read
1582 some packets to get most of the streams, so we cannot
1583 stop here */
1584 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1585 /* if we found the info for all the codecs, we can stop */
1586 ret = count;
1587 break;
1588 }
1589 } else {
1590 /* we did not get all the codec info, but we read too much data */
1591 if (read_size >= MAX_READ_SIZE) {
1592 ret = count;
1593 break;
1594 }
1595 }
1596
1597 /* NOTE: a new stream can be added there if no header in file
1598 (AVFMTCTX_NOHEADER) */
1599 ret = av_read_frame_internal(ic, &pkt1);
1600 if (ret < 0) {
1601 /* EOF or error */
1602 ret = -1; /* we could not have all the codec parameters before EOF */
1603 if ((ic->ctx_flags & AVFMTCTX_NOHEADER) &&
1604 i == ic->nb_streams)
1605 ret = 0;
1606 break;
1607 }
1608
1609 pktl = av_mallocz(sizeof(AVPacketList));
1610 if (!pktl) {
1611 ret = AVERROR_NOMEM;
1612 break;
1613 }
1614
1615 /* add the packet in the buffered packet list */
1616 *ppktl = pktl;
1617 ppktl = &pktl->next;
1618
1619 pkt = &pktl->pkt;
1620 *pkt = pkt1;
1621
1622 /* duplicate the packet */
1623 if (av_dup_packet(pkt) < 0) {
1624 ret = AVERROR_NOMEM;
1625 break;
1626 }
1627
1628 read_size += pkt->size;
1629
1630 st = ic->streams[pkt->stream_index];
1631 st->codec_info_duration += pkt->duration;
1632 if (pkt->duration != 0)
1633 st->codec_info_nb_frames++;
1634
1635 /* if still no information, we try to open the codec and to
1636 decompress the frame. We try to avoid that in most cases as
1637 it takes longer and uses more memory. For MPEG4, we need to
1638 decompress for Quicktime. */
1639 if (!has_codec_parameters(&st->codec) &&
1640 (st->codec.codec_id == CODEC_ID_FLV1 ||
1641 st->codec.codec_id == CODEC_ID_H264 ||
1642 st->codec.codec_id == CODEC_ID_H263 ||
1643 st->codec.codec_id == CODEC_ID_VORBIS ||
1644 st->codec.codec_id == CODEC_ID_MJPEG ||
1645 (st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing)))
1646 try_decode_frame(st, pkt->data, pkt->size);
1647
1648 if (st->codec_info_duration >= MAX_STREAM_DURATION) {
1649 break;
1650 }
1651 count++;
1652 }
1653
1654 /* set real frame rate info */
1655 for(i=0;i<ic->nb_streams;i++) {
1656 st = ic->streams[i];
1657 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1658 /* compute the real frame rate for telecine */
1659 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
1660 st->codec.codec_id == CODEC_ID_MPEG2VIDEO) &&
1661 st->codec.sub_id == 2) {
1662 if (st->codec_info_nb_frames >= 20) {
1663 float coded_frame_rate, est_frame_rate;
1664 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
1665 (double)st->codec_info_duration ;
1666 coded_frame_rate = (double)st->codec.frame_rate /
1667 (double)st->codec.frame_rate_base;
1668 #if 0
1669 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
1670 coded_frame_rate, est_frame_rate);
1671 #endif
1672 /* if we detect that it could be a telecine, we
1673 signal it. It would be better to do it at a
1674 higher level as it can change in a film */
1675 if (coded_frame_rate >= 24.97 &&
1676 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
1677 st->r_frame_rate = 24024;
1678 st->r_frame_rate_base = 1001;
1679 }
1680 }
1681 }
1682 /* if no real frame rate, use the codec one */
1683 if (!st->r_frame_rate){
1684 st->r_frame_rate = st->codec.frame_rate;
1685 st->r_frame_rate_base = st->codec.frame_rate_base;
1686 }
1687 }
1688 }
1689
1690 av_estimate_timings(ic);
1691 #if 0
1692 /* correct DTS for b frame streams with no timestamps */
1693 for(i=0;i<ic->nb_streams;i++) {
1694 st = ic->streams[i];
1695 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1696 if(b-frames){
1697 ppktl = &ic->packet_buffer;
1698 while(ppkt1){
1699 if(ppkt1->stream_index != i)
1700 continue;
1701 if(ppkt1->pkt->dts < 0)
1702 break;
1703 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1704 break;
1705 ppkt1->pkt->dts -= delta;
1706 ppkt1= ppkt1->next;
1707 }
1708 if(ppkt1)
1709 continue;
1710 st->cur_dts -= delta;
1711 }
1712 }
1713 }
1714 #endif
1715 return ret;
1716 }
1717
1718 /*******************************************************/
1719
1720 /**
1721 * start playing a network based stream (e.g. RTSP stream) at the
1722 * current position
1723 */
1724 int av_read_play(AVFormatContext *s)
1725 {
1726 if (!s->iformat->read_play)
1727 return AVERROR_NOTSUPP;
1728 return s->iformat->read_play(s);
1729 }
1730
1731 /**
1732 * pause a network based stream (e.g. RTSP stream). Use av_read_play()
1733 * to resume it.
1734 */
1735 int av_read_pause(AVFormatContext *s)
1736 {
1737 if (!s->iformat->read_pause)
1738 return AVERROR_NOTSUPP;
1739 return s->iformat->read_pause(s);
1740 }
1741
1742 /**
1743 * Close a media file (but not its codecs)
1744 *
1745 * @param s media file handle
1746 */
1747 void av_close_input_file(AVFormatContext *s)
1748 {
1749 int i, must_open_file;
1750 AVStream *st;
1751
1752 /* free previous packet */
1753 if (s->cur_st && s->cur_st->parser)
1754 av_free_packet(&s->cur_pkt);
1755
1756 if (s->iformat->read_close)
1757 s->iformat->read_close(s);
1758 for(i=0;i<s->nb_streams;i++) {
1759 /* free all data in a stream component */
1760 st = s->streams[i];
1761 if (st->parser) {
1762 av_parser_close(st->parser);
1763 }
1764 av_free(st->index_entries);
1765 av_free(st);
1766 }
1767 flush_packet_queue(s);
1768 must_open_file = 1;
1769 if (s->iformat->flags & AVFMT_NOFILE) {
1770 must_open_file = 0;
1771 }
1772 if (must_open_file) {
1773 url_fclose(&s->pb);
1774 }
1775 av_freep(&s->priv_data);
1776 av_free(s);
1777 }
1778
1779 /**
1780 * Add a new stream to a media file. Can only be called in the
1781 * read_header function. If the flag AVFMTCTX_NOHEADER is in the
1782 * format context, then new streams can be added in read_packet too.
1783 *
1784 *
1785 * @param s media file handle
1786 * @param id file format dependent stream id
1787 */
1788 AVStream *av_new_stream(AVFormatContext *s, int id)
1789 {
1790 AVStream *st;
1791
1792 if (s->nb_streams >= MAX_STREAMS)
1793 return NULL;
1794
1795 st = av_mallocz(sizeof(AVStream));
1796 if (!st)
1797 return NULL;
1798 avcodec_get_context_defaults(&st->codec);
1799 if (s->iformat) {
1800 /* no default bitrate if decoding */
1801 st->codec.bit_rate = 0;
1802 }
1803 st->index = s->nb_streams;
1804 st->id = id;
1805 st->start_time = AV_NOPTS_VALUE;
1806 st->duration = AV_NOPTS_VALUE;
1807 st->cur_dts = AV_NOPTS_VALUE;
1808
1809 /* default pts settings is MPEG like */
1810 av_set_pts_info(st, 33, 1, 90000);
1811 st->last_IP_pts = AV_NOPTS_VALUE;
1812
1813 s->streams[s->nb_streams++] = st;
1814 return st;
1815 }
1816
1817 /************************************************************/
1818 /* output media file */
1819
1820 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
1821 {
1822 int ret;
1823
1824 if (s->oformat->priv_data_size > 0) {
1825 s->priv_data = av_mallocz(s->oformat->priv_data_size);
1826 if (!s->priv_data)
1827 return AVERROR_NOMEM;
1828 } else
1829 s->priv_data = NULL;
1830
1831 if (s->oformat->set_parameters) {
1832 ret = s->oformat->set_parameters(s, ap);
1833 if (ret < 0)
1834 return ret;
1835 }
1836 return 0;
1837 }
1838
1839 /**
1840 * allocate the stream private data and write the stream header to an
1841 * output media file
1842 *
1843 * @param s media file handle
1844 * @return 0 if OK. AVERROR_xxx if error.
1845 */
1846 int av_write_header(AVFormatContext *s)
1847 {
1848 int ret, i;
1849 AVStream *st;
1850
1851 ret = s->oformat->write_header(s);
1852 if (ret < 0)
1853 return ret;
1854
1855 /* init PTS generation */
1856 for(i=0;i<s->nb_streams;i++) {
1857 st = s->streams[i];
1858
1859 switch (st->codec.codec_type) {
1860 case CODEC_TYPE_AUDIO:
1861 av_frac_init(&st->pts, 0, 0,
1862 (int64_t)st->time_base.num * st->codec.sample_rate);
1863 break;
1864 case CODEC_TYPE_VIDEO:
1865 av_frac_init(&st->pts, 0, 0,
1866 (int64_t)st->time_base.num * st->codec.frame_rate);
1867 break;
1868 default:
1869 break;
1870 }
1871 }
1872 return 0;
1873 }
1874
1875 //FIXME merge with compute_pkt_fields
1876 static void compute_pkt_fields2(AVStream *st, AVPacket *pkt){
1877 int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
1878 int num, den, frame_size;
1879
1880 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
1881
1882 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
1883 return -1;*/
1884
1885 if(pkt->pts != AV_NOPTS_VALUE)
1886 pkt->pts = av_rescale(pkt->pts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1887 if(pkt->dts != AV_NOPTS_VALUE)
1888 pkt->dts = av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1889
1890 /* duration field */
1891 pkt->duration = av_rescale(pkt->duration, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1892 if (pkt->duration == 0) {
1893 compute_frame_duration(&num, &den, st, NULL, pkt);
1894 if (den && num) {
1895 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
1896 }
1897 }
1898
1899 //XXX/FIXME this is a temporary hack until all encoders output pts
1900 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
1901 pkt->dts=
1902 // pkt->pts= st->cur_dts;
1903 pkt->pts= st->pts.val;
1904 }
1905
1906 //calculate dts from pts
1907 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
1908 if(b_frames){
1909 if(st->last_IP_pts == AV_NOPTS_VALUE){
1910 st->last_IP_pts= -pkt->duration;
1911 }
1912 if(st->last_IP_pts < pkt->pts){
1913 pkt->dts= st->last_IP_pts;
1914 st->last_IP_pts= pkt->pts;
1915 }else
1916 pkt->dts= pkt->pts;
1917 }else
1918 pkt->dts= pkt->pts;
1919 }
1920
1921 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
1922 st->cur_dts= pkt->dts;
1923 st->pts.val= pkt->dts;
1924
1925 /* update pts */
1926 switch (st->codec.codec_type) {
1927 case CODEC_TYPE_AUDIO:
1928 frame_size = get_audio_frame_size(&st->codec, pkt->size);
1929
1930 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
1931 but it would be better if we had the real timestamps from the encoder */
1932 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
1933 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
1934 }
1935 break;
1936 case CODEC_TYPE_VIDEO:
1937 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.frame_rate_base);
1938 break;
1939 default:
1940 break;
1941 }
1942 }
1943
1944 static void truncate_ts(AVStream *st, AVPacket *pkt){
1945 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
1946
1947 if(pkt->dts < 0)
1948 pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
1949
1950 pkt->pts &= pts_mask;
1951 pkt->dts &= pts_mask;
1952 }
1953
1954 /**
1955 * Write a packet to an output media file. The packet shall contain
1956 * one audio or video frame.
1957 *
1958 * @param s media file handle
1959 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
1960 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
1961 */
1962 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
1963 {
1964 compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
1965
1966 truncate_ts(s->streams[pkt->stream_index], pkt);
1967
1968 return s->oformat->write_packet(s, pkt);
1969 }
1970
1971 /**
1972 * Writes a packet to an output media file ensuring correct interleaving.
1973 * The packet shall contain one audio or video frame.
1974 * If the packets are already correctly interleaved the application should
1975 * call av_write_frame() instead as its slightly faster, its also important
1976 * to keep in mind that non interlaved input will need huge amounts
1977 * of memory to interleave with this, so its prefereable to interleave at the
1978 * demuxer level
1979 *
1980 * @param s media file handle
1981 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
1982 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
1983 */
1984 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
1985 AVPacketList *pktl, **next_point, *this_pktl;
1986 int stream_count=0;
1987 int streams[MAX_STREAMS];
1988 AVStream *st= s->streams[ pkt->stream_index];
1989
1990 compute_pkt_fields2(st, pkt);
1991
1992 //FIXME/XXX/HACK drop zero sized packets
1993 if(st->codec.codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
1994 return 0;
1995
1996 if(pkt->dts == AV_NOPTS_VALUE)
1997 return -1;
1998
1999 assert(pkt->destruct != av_destruct_packet); //FIXME
2000
2001 this_pktl = av_mallocz(sizeof(AVPacketList));
2002 this_pktl->pkt= *pkt;
2003 av_dup_packet(&this_pktl->pkt);
2004
2005 next_point = &s->packet_buffer;
2006 while(*next_point){
2007 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2008 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2009 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2010 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2011 break;
2012 next_point= &(*next_point)->next;
2013 }
2014 this_pktl->next= *next_point;
2015 *next_point= this_pktl;
2016
2017 memset(streams, 0, sizeof(streams));
2018 pktl= s->packet_buffer;
2019 while(pktl){
2020 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2021 if(streams[ pktl->pkt.stream_index ] == 0)
2022 stream_count++;
2023 streams[ pktl->pkt.stream_index ]++;
2024 pktl= pktl->next;
2025 }
2026
2027 while(s->nb_streams == stream_count){
2028 int ret;
2029
2030 pktl= s->packet_buffer;
2031 //av_log(s, AV_LOG_DEBUG, "write st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2032 truncate_ts(s->streams[pktl->pkt.stream_index], &pktl->pkt);
2033 ret= s->oformat->write_packet(s, &pktl->pkt);
2034
2035 s->packet_buffer= pktl->next;
2036 if((--streams[ pktl->pkt.stream_index ]) == 0)
2037 stream_count--;
2038
2039 av_free_packet(&pktl->pkt);
2040 av_freep(&pktl);
2041
2042 if(ret<0)
2043 return ret;
2044 }
2045 return 0;
2046 }
2047
2048 /**
2049 * write the stream trailer to an output media file and and free the
2050 * file private data.
2051 *
2052 * @param s media file handle
2053 * @return 0 if OK. AVERROR_xxx if error. */
2054 int av_write_trailer(AVFormatContext *s)
2055 {
2056 int ret;
2057
2058 while(s->packet_buffer){
2059 int ret;
2060 AVPacketList *pktl= s->packet_buffer;
2061
2062 //av_log(s, AV_LOG_DEBUG, "write_trailer st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2063 truncate_ts(s->streams[pktl->pkt.stream_index], &pktl->pkt);
2064 ret= s->oformat->write_packet(s, &pktl->pkt);
2065
2066 s->packet_buffer= pktl->next;
2067
2068 av_free_packet(&pktl->pkt);
2069 av_freep(&pktl);
2070
2071 if(ret<0)
2072 return ret;
2073 }
2074
2075 ret = s->oformat->write_trailer(s);
2076 av_freep(&s->priv_data);
2077 return ret;
2078 }
2079
2080 /* "user interface" functions */
2081
2082 void dump_format(AVFormatContext *ic,
2083 int index,
2084 const char *url,
2085 int is_output)
2086 {
2087 int i, flags;
2088 char buf[256];
2089
2090 av_log(NULL, AV_LOG_DEBUG, "%s #%d, %s, %s '%s':\n",
2091 is_output ? "Output" : "Input",
2092 index,
2093 is_output ? ic->oformat->name : ic->iformat->name,
2094 is_output ? "to" : "from", url);
2095 if (!is_output) {
2096 av_log(NULL, AV_LOG_DEBUG, " Duration: ");
2097 if (ic->duration != AV_NOPTS_VALUE) {
2098 int hours, mins, secs, us;
2099 secs = ic->duration / AV_TIME_BASE;
2100 us = ic->duration % AV_TIME_BASE;
2101 mins = secs / 60;
2102 secs %= 60;
2103 hours = mins / 60;
2104 mins %= 60;
2105 av_log(NULL, AV_LOG_DEBUG, "%02d:%02d:%02d.%01d", hours, mins, secs,
2106 (10 * us) / AV_TIME_BASE);
2107 } else {
2108 av_log(NULL, AV_LOG_DEBUG, "N/A");
2109 }
2110 av_log(NULL, AV_LOG_DEBUG, ", bitrate: ");
2111 if (ic->bit_rate) {
2112 av_log(NULL, AV_LOG_DEBUG,"%d kb/s", ic->bit_rate / 1000);
2113 } else {
2114 av_log(NULL, AV_LOG_DEBUG, "N/A");
2115 }
2116 av_log(NULL, AV_LOG_DEBUG, "\n");
2117 }
2118 for(i=0;i<ic->nb_streams;i++) {
2119 AVStream *st = ic->streams[i];
2120 avcodec_string(buf, sizeof(buf), &st->codec, is_output);
2121 av_log(NULL, AV_LOG_DEBUG, " Stream #%d.%d", index, i);
2122 /* the pid is an important information, so we display it */
2123 /* XXX: add a generic system */
2124 if (is_output)
2125 flags = ic->oformat->flags;
2126 else
2127 flags = ic->iformat->flags;
2128 if (flags & AVFMT_SHOW_IDS) {
2129 av_log(NULL, AV_LOG_DEBUG, "[0x%x]", st->id);
2130 }
2131 av_log(NULL, AV_LOG_DEBUG, ": %s\n", buf);
2132 }
2133 }
2134
2135 typedef struct {
2136 const char *abv;
2137 int width, height;
2138 int frame_rate, frame_rate_base;
2139 } AbvEntry;
2140
2141 static AbvEntry frame_abvs[] = {
2142 { "ntsc", 720, 480, 30000, 1001 },
2143 { "pal", 720, 576, 25, 1 },
2144 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2145 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2146 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2147 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2148 { "film", 352, 240, 24, 1 },
2149 { "ntsc-film", 352, 240, 24000, 1001 },
2150 { "sqcif", 128, 96, 0, 0 },
2151 { "qcif", 176, 144, 0, 0 },
2152 { "cif", 352, 288, 0, 0 },
2153 { "4cif", 704, 576, 0, 0 },
2154 };
2155
2156 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2157 {
2158 int i;
2159 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2160 const char *p;
2161 int frame_width = 0, frame_height = 0;
2162
2163 for(i=0;i<n;i++) {
2164 if (!strcmp(frame_abvs[i].abv, str)) {
2165 frame_width = frame_abvs[i].width;
2166 frame_height = frame_abvs[i].height;
2167 break;
2168 }
2169 }
2170 if (i == n) {
2171 p = str;
2172 frame_width = strtol(p, (char **)&p, 10);
2173 if (*p)
2174 p++;
2175 frame_height = strtol(p, (char **)&p, 10);
2176 }
2177 if (frame_width <= 0 || frame_height <= 0)
2178 return -1;
2179 *width_ptr = frame_width;
2180 *height_ptr = frame_height;
2181 return 0;
2182 }
2183
2184 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2185 {
2186 int i;
2187 char* cp;
2188
2189 /* First, we check our abbreviation table */
2190 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2191 if (!strcmp(frame_abvs[i].abv, arg)) {
2192 *frame_rate = frame_abvs[i].frame_rate;
2193 *frame_rate_base = frame_abvs[i].frame_rate_base;
2194 return 0;
2195 }
2196
2197 /* Then, we try to parse it as fraction */
2198 cp = strchr(arg, '/');
2199 if (cp) {
2200 char* cpp;
2201 *frame_rate = strtol(arg, &cpp, 10);
2202 if (cpp != arg || cpp == cp)
2203 *frame_rate_base = strtol(cp+1, &cpp, 10);
2204 else
2205 *frame_rate = 0;
2206 }
2207 else {
2208 /* Finally we give up and parse it as double */
2209 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2210 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2211 }
2212 if (!*frame_rate || !*frame_rate_base)
2213 return -1;
2214 else
2215 return 0;
2216 }
2217
2218 /* Syntax:
2219 * - If not a duration:
2220 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2221 * Time is localtime unless Z is suffixed to the end. In this case GMT
2222 * Return the date in micro seconds since 1970
2223 * - If duration:
2224 * HH[:MM[:SS[.m...]]]
2225 * S+[.m...]
2226 */
2227 int64_t parse_date(const char *datestr, int duration)
2228 {
2229 const char *p;
2230 int64_t t;
2231 struct tm dt;
2232 int i;
2233 static const char *date_fmt[] = {
2234 "%Y-%m-%d",
2235 "%Y%m%d",
2236 };
2237 static const char *time_fmt[] = {
2238 "%H:%M:%S",
2239 "%H%M%S",
2240 };
2241 const char *q;
2242 int is_utc, len;
2243 char lastch;
2244 int negative = 0;
2245
2246 #undef time
2247 time_t now = time(0);
2248
2249 len = strlen(datestr);
2250 if (len > 0)
2251 lastch = datestr[len - 1];
2252 else
2253 lastch = '\0';
2254 is_utc = (lastch == 'z' || lastch == 'Z');
2255
2256 memset(&dt, 0, sizeof(dt));
2257
2258 p = datestr;
2259 q = NULL;
2260 if (!duration) {
2261 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2262 q = small_strptime(p, date_fmt[i], &dt);
2263 if (q) {
2264 break;
2265 }
2266 }
2267
2268 if (!q) {
2269 if (is_utc) {
2270 dt = *gmtime(&now);
2271 } else {
2272 dt = *localtime(&now);
2273 }
2274 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2275 } else {
2276 p = q;
2277 }
2278
2279 if (*p == 'T' || *p == 't' || *p == ' ')
2280 p++;
2281
2282 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2283 q = small_strptime(p, time_fmt[i], &dt);
2284 if (q) {
2285 break;
2286 }
2287 }
2288 } else {
2289 if (p[0] == '-') {
2290 negative = 1;
2291 ++p;
2292 }
2293 q = small_strptime(p, time_fmt[0], &dt);
2294 if (!q) {
2295 dt.tm_sec = strtol(p, (char **)&q, 10);
2296 dt.tm_min = 0;
2297 dt.tm_hour = 0;
2298 }
2299 }
2300
2301 /* Now we have all the fields that we can get */
2302 if (!q) {
2303 if (duration)
2304 return 0;
2305 else
2306 return now * int64_t_C(1000000);
2307 }
2308
2309 if (duration) {
2310 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2311 } else {
2312 dt.tm_isdst = -1; /* unknown */
2313 if (is_utc) {
2314 t = mktimegm(&dt);
2315 } else {
2316 t = mktime(&dt);
2317 }
2318 }
2319
2320 t *= 1000000;
2321
2322 if (*q == '.') {
2323 int val, n;
2324 q++;
2325 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2326 if (!isdigit(*q))
2327 break;
2328 val += n * (*q - '0');
2329 }
2330 t += val;
2331 }
2332 return negative ? -t : t;
2333 }
2334
2335 /* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. Return
2336 1 if found */
2337 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2338 {
2339 const char *p;
2340 char tag[128], *q;
2341
2342 p = info;
2343 if (*p == '?')
2344 p++;
2345 for(;;) {
2346 q = tag;
2347 while (*p != '\0' && *p != '=' && *p != '&') {
2348 if ((q - tag) < sizeof(tag) - 1)
2349 *q++ = *p;
2350 p++;
2351 }
2352 *q = '\0';
2353 q = arg;
2354 if (*p == '=') {
2355 p++;
2356 while (*p != '&' && *p != '\0') {
2357 if ((q - arg) < arg_size - 1) {
2358 if (*p == '+')
2359 *q++ = ' ';
2360 else
2361 *q++ = *p;
2362 }
2363 p++;
2364 }
2365 *q = '\0';
2366 }
2367 if (!strcmp(tag, tag1))
2368 return 1;
2369 if (*p != '&')
2370 break;
2371 p++;
2372 }
2373 return 0;
2374 }
2375
2376 /* Return in 'buf' the path with '%d' replaced by number. Also handles
2377 the '%0nd' format where 'n' is the total number of digits and
2378 '%%'. Return 0 if OK, and -1 if format error */
2379 int get_frame_filename(char *buf, int buf_size,
2380 const char *path, int number)
2381 {
2382 const char *p;
2383 char *q, buf1[20], c;
2384 int nd, len, percentd_found;
2385
2386 q = buf;
2387 p = path;
2388 percentd_found = 0;
2389 for(;;) {
2390 c = *p++;
2391 if (c == '\0')
2392 break;
2393 if (c == '%') {
2394 do {
2395 nd = 0;
2396 while (isdigit(*p)) {
2397 nd = nd * 10 + *p++ - '0';
2398 }
2399 c = *p++;
2400 } while (isdigit(c));
2401
2402 switch(c) {
2403 case '%':
2404 goto addchar;
2405 case 'd':
2406 if (percentd_found)
2407 goto fail;
2408 percentd_found = 1;
2409 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2410 len = strlen(buf1);
2411 if ((q - buf + len) > buf_size - 1)
2412 goto fail;
2413 memcpy(q, buf1, len);
2414 q += len;
2415 break;
2416 default:
2417 goto fail;
2418 }
2419 } else {
2420 addchar:
2421 if ((q - buf) < buf_size - 1)
2422 *q++ = c;
2423 }
2424 }
2425 if (!percentd_found)
2426 goto fail;
2427 *q = '\0';
2428 return 0;
2429 fail:
2430 *q = '\0';
2431 return -1;
2432 }
2433
2434 /**
2435 * Print nice hexa dump of a buffer
2436 * @param f stream for output
2437 * @param buf buffer
2438 * @param size buffer size
2439 */
2440 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2441 {
2442 int len, i, j, c;
2443
2444 for(i=0;i<size;i+=16) {
2445 len = size - i;
2446 if (len > 16)
2447 len = 16;
2448 fprintf(f, "%08x ", i);
2449 for(j=0;j<16;j++) {
2450 if (j < len)
2451 fprintf(f, " %02x", buf[i+j]);
2452 else
2453 fprintf(f, " ");
2454 }
2455 fprintf(f, " ");
2456 for(j=0;j<len;j++) {
2457 c = buf[i+j];
2458 if (c < ' ' || c > '~')
2459 c = '.';
2460 fprintf(f, "%c", c);
2461 }
2462 fprintf(f, "\n");
2463 }
2464 }
2465
2466 /**
2467 * Print on 'f' a nice dump of a packet
2468 * @param f stream for output
2469 * @param pkt packet to dump
2470 * @param dump_payload true if the payload must be displayed too
2471 */
2472 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2473 {
2474 fprintf(f, "stream #%d:\n", pkt->stream_index);
2475 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2476 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2477 /* DTS is _always_ valid after av_read_frame() */
2478 fprintf(f, " dts=");
2479 if (pkt->dts == AV_NOPTS_VALUE)
2480 fprintf(f, "N/A");
2481 else
2482 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2483 /* PTS may be not known if B frames are present */
2484 fprintf(f, " pts=");
2485 if (pkt->pts == AV_NOPTS_VALUE)
2486 fprintf(f, "N/A");
2487 else
2488 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2489 fprintf(f, "\n");
2490 fprintf(f, " size=%d\n", pkt->size);
2491 if (dump_payload)
2492 av_hex_dump(f, pkt->data, pkt->size);
2493 }
2494
2495 void url_split(char *proto, int proto_size,
2496 char *hostname, int hostname_size,
2497 int *port_ptr,
2498 char *path, int path_size,
2499 const char *url)
2500 {
2501 const char *p;
2502 char *q;
2503 int port;
2504
2505 port = -1;
2506
2507 p = url;
2508 q = proto;
2509 while (*p != ':' && *p != '\0') {
2510 if ((q - proto) < proto_size - 1)
2511 *q++ = *p;
2512 p++;
2513 }
2514 if (proto_size > 0)
2515 *q = '\0';
2516 if (*p == '\0') {
2517 if (proto_size > 0)
2518 proto[0] = '\0';
2519 if (hostname_size > 0)
2520 hostname[0] = '\0';
2521 p = url;
2522 } else {
2523 p++;
2524 if (*p == '/')
2525 p++;
2526 if (*p == '/')
2527 p++;
2528 q = hostname;
2529 while (*p != ':' && *p != '/' && *p != '?' && *p != '\0') {
2530 if ((q - hostname) < hostname_size - 1)
2531 *q++ = *p;
2532 p++;
2533 }
2534 if (hostname_size > 0)
2535 *q = '\0';
2536 if (*p == ':') {
2537 p++;
2538 port = strtoul(p, (char **)&p, 10);
2539 }
2540 }
2541 if (port_ptr)
2542 *port_ptr = port;
2543 pstrcpy(path, path_size, p);
2544 }
2545
2546 /**
2547 * Set the pts for a given stream
2548 * @param s stream
2549 * @param pts_wrap_bits number of bits effectively used by the pts
2550 * (used for wrap control, 33 is the value for MPEG)
2551 * @param pts_num numerator to convert to seconds (MPEG: 1)
2552 * @param pts_den denominator to convert to seconds (MPEG: 90000)
2553 */
2554 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2555 int pts_num, int pts_den)
2556 {
2557 s->pts_wrap_bits = pts_wrap_bits;
2558 s->time_base.num = pts_num;
2559 s->time_base.den = pts_den;
2560 }
2561
2562 /* fraction handling */
2563
2564 /**
2565 * f = val + (num / den) + 0.5. 'num' is normalized so that it is such
2566 * as 0 <= num < den.
2567 *
2568 * @param f fractional number
2569 * @param val integer value
2570 * @param num must be >= 0
2571 * @param den must be >= 1
2572 */
2573 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2574 {
2575 num += (den >> 1);
2576 if (num >= den) {
2577 val += num / den;
2578 num = num % den;
2579 }
2580 f->val = val;
2581 f->num = num;
2582 f->den = den;
2583 }
2584
2585 /* set f to (val + 0.5) */
2586 void av_frac_set(AVFrac *f, int64_t val)
2587 {
2588 f->val = val;
2589 f->num = f->den >> 1;
2590 }
2591
2592 /**
2593 * Fractionnal addition to f: f = f + (incr / f->den)
2594 *
2595 * @param f fractional number
2596 * @param incr increment, can be positive or negative
2597 */
2598 void av_frac_add(AVFrac *f, int64_t incr)
2599 {
2600 int64_t num, den;
2601
2602 num = f->num + incr;
2603 den = f->den;
2604 if (num < 0) {
2605 f->val += num / den;
2606 num = num % den;
2607 if (num < 0) {
2608 num += den;
2609 f->val--;
2610 }
2611 } else if (num >= den) {
2612 f->val += num / den;
2613 num = num % den;
2614 }
2615 f->num = num;
2616 }
2617
2618 /**
2619 * register a new image format
2620 * @param img_fmt Image format descriptor
2621 */
2622 void av_register_image_format(AVImageFormat *img_fmt)
2623 {
2624 AVImageFormat **p;
2625
2626 p = &first_image_format;
2627 while (*p != NULL) p = &(*p)->next;
2628 *p = img_fmt;
2629 img_fmt->next = NULL;
2630 }
2631
2632 /* guess image format */
2633 AVImageFormat *av_probe_image_format(AVProbeData *pd)
2634 {
2635 AVImageFormat *fmt1, *fmt;
2636 int score, score_max;
2637
2638 fmt = NULL;
2639 score_max = 0;
2640 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2641 if (fmt1->img_probe) {
2642 score = fmt1->img_probe(pd);
2643 if (score > score_max) {
2644 score_max = score;
2645 fmt = fmt1;
2646 }
2647 }
2648 }
2649 return fmt;
2650 }
2651
2652 AVImageFormat *guess_image_format(const char *filename)
2653 {
2654 AVImageFormat *fmt1;
2655
2656 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2657 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
2658 return fmt1;
2659 }
2660 return NULL;
2661 }
2662
2663 /**
2664 * Read an image from a stream.
2665 * @param gb byte stream containing the image
2666 * @param fmt image format, NULL if probing is required
2667 */
2668 int av_read_image(ByteIOContext *pb, const char *filename,
2669 AVImageFormat *fmt,
2670 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
2671 {
2672 char buf[PROBE_BUF_SIZE];
2673 AVProbeData probe_data, *pd = &probe_data;
2674 offset_t pos;
2675 int ret;
2676
2677 if (!fmt) {
2678 pd->filename = filename;
2679 pd->buf = buf;
2680 pos = url_ftell(pb);
2681 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
2682 url_fseek(pb, pos, SEEK_SET);
2683 fmt = av_probe_image_format(pd);
2684 }
2685 if (!fmt)
2686 return AVERROR_NOFMT;
2687 ret = fmt->img_read(pb, alloc_cb, opaque);
2688 return ret;
2689 }
2690
2691 /**
2692 * Write an image to a stream.
2693 * @param pb byte stream for the image output
2694 * @param fmt image format
2695 * @param img image data and informations
2696 */
2697 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
2698 {
2699 return fmt->img_write(pb, img);
2700 }
2701