keyframe & non keyframe index fixes
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #include "avformat.h"
20
21 #undef NDEBUG
22 #include <assert.h>
23
24 AVInputFormat *first_iformat = NULL;
25 AVOutputFormat *first_oformat = NULL;
26 AVImageFormat *first_image_format = NULL;
27
28 void av_register_input_format(AVInputFormat *format)
29 {
30 AVInputFormat **p;
31 p = &first_iformat;
32 while (*p != NULL) p = &(*p)->next;
33 *p = format;
34 format->next = NULL;
35 }
36
37 void av_register_output_format(AVOutputFormat *format)
38 {
39 AVOutputFormat **p;
40 p = &first_oformat;
41 while (*p != NULL) p = &(*p)->next;
42 *p = format;
43 format->next = NULL;
44 }
45
46 int match_ext(const char *filename, const char *extensions)
47 {
48 const char *ext, *p;
49 char ext1[32], *q;
50
51 if(!filename)
52 return 0;
53
54 ext = strrchr(filename, '.');
55 if (ext) {
56 ext++;
57 p = extensions;
58 for(;;) {
59 q = ext1;
60 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
61 *q++ = *p++;
62 *q = '\0';
63 if (!strcasecmp(ext1, ext))
64 return 1;
65 if (*p == '\0')
66 break;
67 p++;
68 }
69 }
70 return 0;
71 }
72
73 AVOutputFormat *guess_format(const char *short_name, const char *filename,
74 const char *mime_type)
75 {
76 AVOutputFormat *fmt, *fmt_found;
77 int score_max, score;
78
79 /* specific test for image sequences */
80 if (!short_name && filename &&
81 filename_number_test(filename) >= 0 &&
82 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
83 return guess_format("image2", NULL, NULL);
84 }
85 if (!short_name && filename &&
86 filename_number_test(filename) >= 0 &&
87 guess_image_format(filename)) {
88 return guess_format("image", NULL, NULL);
89 }
90
91 /* find the proper file type */
92 fmt_found = NULL;
93 score_max = 0;
94 fmt = first_oformat;
95 while (fmt != NULL) {
96 score = 0;
97 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
98 score += 100;
99 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
100 score += 10;
101 if (filename && fmt->extensions &&
102 match_ext(filename, fmt->extensions)) {
103 score += 5;
104 }
105 if (score > score_max) {
106 score_max = score;
107 fmt_found = fmt;
108 }
109 fmt = fmt->next;
110 }
111 return fmt_found;
112 }
113
114 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
115 const char *mime_type)
116 {
117 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
118
119 if (fmt) {
120 AVOutputFormat *stream_fmt;
121 char stream_format_name[64];
122
123 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
124 stream_fmt = guess_format(stream_format_name, NULL, NULL);
125
126 if (stream_fmt)
127 fmt = stream_fmt;
128 }
129
130 return fmt;
131 }
132
133 /**
134 * guesses the codec id based upon muxer and filename.
135 */
136 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
137 const char *filename, const char *mime_type, enum CodecType type){
138 if(type == CODEC_TYPE_VIDEO){
139 enum CodecID codec_id= CODEC_ID_NONE;
140
141 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
142 codec_id= av_guess_image2_codec(filename);
143 }
144 if(codec_id == CODEC_ID_NONE)
145 codec_id= fmt->video_codec;
146 return codec_id;
147 }else if(type == CODEC_TYPE_AUDIO)
148 return fmt->audio_codec;
149 else
150 return CODEC_ID_NONE;
151 }
152
153 AVInputFormat *av_find_input_format(const char *short_name)
154 {
155 AVInputFormat *fmt;
156 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
157 if (!strcmp(fmt->name, short_name))
158 return fmt;
159 }
160 return NULL;
161 }
162
163 /* memory handling */
164
165 /**
166 * Default packet destructor
167 */
168 static void av_destruct_packet(AVPacket *pkt)
169 {
170 av_free(pkt->data);
171 pkt->data = NULL; pkt->size = 0;
172 }
173
174 /**
175 * Allocate the payload of a packet and intialized its fields to default values.
176 *
177 * @param pkt packet
178 * @param size wanted payload size
179 * @return 0 if OK. AVERROR_xxx otherwise.
180 */
181 int av_new_packet(AVPacket *pkt, int size)
182 {
183 void *data;
184 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
185 return AVERROR_NOMEM;
186 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
187 if (!data)
188 return AVERROR_NOMEM;
189 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
190
191 av_init_packet(pkt);
192 pkt->data = data;
193 pkt->size = size;
194 pkt->destruct = av_destruct_packet;
195 return 0;
196 }
197
198 /* This is a hack - the packet memory allocation stuff is broken. The
199 packet is allocated if it was not really allocated */
200 int av_dup_packet(AVPacket *pkt)
201 {
202 if (pkt->destruct != av_destruct_packet) {
203 uint8_t *data;
204 /* we duplicate the packet and don't forget to put the padding
205 again */
206 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
207 return AVERROR_NOMEM;
208 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
209 if (!data) {
210 return AVERROR_NOMEM;
211 }
212 memcpy(data, pkt->data, pkt->size);
213 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
214 pkt->data = data;
215 pkt->destruct = av_destruct_packet;
216 }
217 return 0;
218 }
219
220 /* fifo handling */
221
222 int fifo_init(FifoBuffer *f, int size)
223 {
224 f->buffer = av_malloc(size);
225 if (!f->buffer)
226 return -1;
227 f->end = f->buffer + size;
228 f->wptr = f->rptr = f->buffer;
229 return 0;
230 }
231
232 void fifo_free(FifoBuffer *f)
233 {
234 av_free(f->buffer);
235 }
236
237 int fifo_size(FifoBuffer *f, uint8_t *rptr)
238 {
239 int size;
240
241 if(!rptr)
242 rptr= f->rptr;
243
244 if (f->wptr >= rptr) {
245 size = f->wptr - rptr;
246 } else {
247 size = (f->end - rptr) + (f->wptr - f->buffer);
248 }
249 return size;
250 }
251
252 /* get data from the fifo (return -1 if not enough data) */
253 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
254 {
255 uint8_t *rptr;
256 int size, len;
257
258 if(!rptr_ptr)
259 rptr_ptr= &f->rptr;
260 rptr = *rptr_ptr;
261
262 if (f->wptr >= rptr) {
263 size = f->wptr - rptr;
264 } else {
265 size = (f->end - rptr) + (f->wptr - f->buffer);
266 }
267
268 if (size < buf_size)
269 return -1;
270 while (buf_size > 0) {
271 len = f->end - rptr;
272 if (len > buf_size)
273 len = buf_size;
274 memcpy(buf, rptr, len);
275 buf += len;
276 rptr += len;
277 if (rptr >= f->end)
278 rptr = f->buffer;
279 buf_size -= len;
280 }
281 *rptr_ptr = rptr;
282 return 0;
283 }
284
285 void fifo_realloc(FifoBuffer *f, unsigned int new_size){
286 unsigned int old_size= f->end - f->buffer;
287
288 if(old_size < new_size){
289 uint8_t *old= f->buffer;
290
291 f->buffer= av_realloc(f->buffer, new_size);
292
293 f->rptr += f->buffer - old;
294 f->wptr += f->buffer - old;
295
296 if(f->wptr < f->rptr){
297 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
298 f->rptr += new_size - old_size;
299 }
300 f->end= f->buffer + new_size;
301 }
302 }
303
304 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
305 {
306 int len;
307 uint8_t *wptr;
308
309 if(!wptr_ptr)
310 wptr_ptr= &f->wptr;
311 wptr = *wptr_ptr;
312
313 while (size > 0) {
314 len = f->end - wptr;
315 if (len > size)
316 len = size;
317 memcpy(wptr, buf, len);
318 wptr += len;
319 if (wptr >= f->end)
320 wptr = f->buffer;
321 buf += len;
322 size -= len;
323 }
324 *wptr_ptr = wptr;
325 }
326
327 /* get data from the fifo (return -1 if not enough data) */
328 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
329 {
330 uint8_t *rptr = *rptr_ptr;
331 int size, len;
332
333 if (f->wptr >= rptr) {
334 size = f->wptr - rptr;
335 } else {
336 size = (f->end - rptr) + (f->wptr - f->buffer);
337 }
338
339 if (size < buf_size)
340 return -1;
341 while (buf_size > 0) {
342 len = f->end - rptr;
343 if (len > buf_size)
344 len = buf_size;
345 put_buffer(pb, rptr, len);
346 rptr += len;
347 if (rptr >= f->end)
348 rptr = f->buffer;
349 buf_size -= len;
350 }
351 *rptr_ptr = rptr;
352 return 0;
353 }
354
355 int filename_number_test(const char *filename)
356 {
357 char buf[1024];
358 if(!filename)
359 return -1;
360 return get_frame_filename(buf, sizeof(buf), filename, 1);
361 }
362
363 /* guess file format */
364 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
365 {
366 AVInputFormat *fmt1, *fmt;
367 int score, score_max;
368
369 fmt = NULL;
370 score_max = 0;
371 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
372 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
373 continue;
374 score = 0;
375 if (fmt1->read_probe) {
376 score = fmt1->read_probe(pd);
377 } else if (fmt1->extensions) {
378 if (match_ext(pd->filename, fmt1->extensions)) {
379 score = 50;
380 }
381 }
382 if (score > score_max) {
383 score_max = score;
384 fmt = fmt1;
385 }
386 }
387 return fmt;
388 }
389
390 /************************************************************/
391 /* input media file */
392
393 /**
394 * open a media file from an IO stream. 'fmt' must be specified.
395 */
396
397 static const char* format_to_name(void* ptr)
398 {
399 AVFormatContext* fc = (AVFormatContext*) ptr;
400 if(fc->iformat) return fc->iformat->name;
401 else if(fc->oformat) return fc->oformat->name;
402 else return "NULL";
403 }
404
405 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
406
407 AVFormatContext *av_alloc_format_context(void)
408 {
409 AVFormatContext *ic;
410 ic = av_mallocz(sizeof(AVFormatContext));
411 if (!ic) return ic;
412 ic->av_class = &av_format_context_class;
413 return ic;
414 }
415
416 int av_open_input_stream(AVFormatContext **ic_ptr,
417 ByteIOContext *pb, const char *filename,
418 AVInputFormat *fmt, AVFormatParameters *ap)
419 {
420 int err;
421 AVFormatContext *ic;
422
423 ic = av_alloc_format_context();
424 if (!ic) {
425 err = AVERROR_NOMEM;
426 goto fail;
427 }
428 ic->iformat = fmt;
429 if (pb)
430 ic->pb = *pb;
431 ic->duration = AV_NOPTS_VALUE;
432 ic->start_time = AV_NOPTS_VALUE;
433 pstrcpy(ic->filename, sizeof(ic->filename), filename);
434
435 /* allocate private data */
436 if (fmt->priv_data_size > 0) {
437 ic->priv_data = av_mallocz(fmt->priv_data_size);
438 if (!ic->priv_data) {
439 err = AVERROR_NOMEM;
440 goto fail;
441 }
442 } else {
443 ic->priv_data = NULL;
444 }
445
446 err = ic->iformat->read_header(ic, ap);
447 if (err < 0)
448 goto fail;
449
450 if (pb)
451 ic->data_offset = url_ftell(&ic->pb);
452
453 *ic_ptr = ic;
454 return 0;
455 fail:
456 if (ic) {
457 av_freep(&ic->priv_data);
458 }
459 av_free(ic);
460 *ic_ptr = NULL;
461 return err;
462 }
463
464 #define PROBE_BUF_SIZE 2048
465
466 /**
467 * Open a media file as input. The codec are not opened. Only the file
468 * header (if present) is read.
469 *
470 * @param ic_ptr the opened media file handle is put here
471 * @param filename filename to open.
472 * @param fmt if non NULL, force the file format to use
473 * @param buf_size optional buffer size (zero if default is OK)
474 * @param ap additionnal parameters needed when opening the file (NULL if default)
475 * @return 0 if OK. AVERROR_xxx otherwise.
476 */
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
478 AVInputFormat *fmt,
479 int buf_size,
480 AVFormatParameters *ap)
481 {
482 int err, must_open_file, file_opened;
483 uint8_t buf[PROBE_BUF_SIZE];
484 AVProbeData probe_data, *pd = &probe_data;
485 ByteIOContext pb1, *pb = &pb1;
486
487 file_opened = 0;
488 pd->filename = "";
489 if (filename)
490 pd->filename = filename;
491 pd->buf = buf;
492 pd->buf_size = 0;
493
494 if (!fmt) {
495 /* guess format if no file can be opened */
496 fmt = av_probe_input_format(pd, 0);
497 }
498
499 /* do not open file if the format does not need it. XXX: specific
500 hack needed to handle RTSP/TCP */
501 must_open_file = 1;
502 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
503 must_open_file = 0;
504 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
505 }
506
507 if (!fmt || must_open_file) {
508 /* if no file needed do not try to open one */
509 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
510 err = AVERROR_IO;
511 goto fail;
512 }
513 file_opened = 1;
514 if (buf_size > 0) {
515 url_setbufsize(pb, buf_size);
516 }
517 if (!fmt) {
518 /* read probe data */
519 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
520 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
521 url_fclose(pb);
522 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
523 err = AVERROR_IO;
524 goto fail;
525 }
526 }
527 }
528 }
529
530 /* guess file format */
531 if (!fmt) {
532 fmt = av_probe_input_format(pd, 1);
533 }
534
535 /* if still no format found, error */
536 if (!fmt) {
537 err = AVERROR_NOFMT;
538 goto fail;
539 }
540
541 /* XXX: suppress this hack for redirectors */
542 #ifdef CONFIG_NETWORK
543 if (fmt == &redir_demux) {
544 err = redir_open(ic_ptr, pb);
545 url_fclose(pb);
546 return err;
547 }
548 #endif
549
550 /* check filename in case of an image number is expected */
551 if (fmt->flags & AVFMT_NEEDNUMBER) {
552 if (filename_number_test(filename) < 0) {
553 err = AVERROR_NUMEXPECTED;
554 goto fail;
555 }
556 }
557 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
558 if (err)
559 goto fail;
560 return 0;
561 fail:
562 if (file_opened)
563 url_fclose(pb);
564 *ic_ptr = NULL;
565 return err;
566
567 }
568
569 /*******************************************************/
570
571 /**
572 * Read a transport packet from a media file. This function is
573 * absolete and should never be used. Use av_read_frame() instead.
574 *
575 * @param s media file handle
576 * @param pkt is filled
577 * @return 0 if OK. AVERROR_xxx if error.
578 */
579 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
580 {
581 return s->iformat->read_packet(s, pkt);
582 }
583
584 /**********************************************************/
585
586 /* get the number of samples of an audio frame. Return (-1) if error */
587 static int get_audio_frame_size(AVCodecContext *enc, int size)
588 {
589 int frame_size;
590
591 if (enc->frame_size <= 1) {
592 /* specific hack for pcm codecs because no frame size is
593 provided */
594 switch(enc->codec_id) {
595 case CODEC_ID_PCM_S16LE:
596 case CODEC_ID_PCM_S16BE:
597 case CODEC_ID_PCM_U16LE:
598 case CODEC_ID_PCM_U16BE:
599 if (enc->channels == 0)
600 return -1;
601 frame_size = size / (2 * enc->channels);
602 break;
603 case CODEC_ID_PCM_S8:
604 case CODEC_ID_PCM_U8:
605 case CODEC_ID_PCM_MULAW:
606 case CODEC_ID_PCM_ALAW:
607 if (enc->channels == 0)
608 return -1;
609 frame_size = size / (enc->channels);
610 break;
611 default:
612 /* used for example by ADPCM codecs */
613 if (enc->bit_rate == 0)
614 return -1;
615 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
616 break;
617 }
618 } else {
619 frame_size = enc->frame_size;
620 }
621 return frame_size;
622 }
623
624
625 /* return the frame duration in seconds, return 0 if not available */
626 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
627 AVCodecParserContext *pc, AVPacket *pkt)
628 {
629 int frame_size;
630
631 *pnum = 0;
632 *pden = 0;
633 switch(st->codec.codec_type) {
634 case CODEC_TYPE_VIDEO:
635 *pnum = st->codec.frame_rate_base;
636 *pden = st->codec.frame_rate;
637 if (pc && pc->repeat_pict) {
638 *pden *= 2;
639 *pnum = (*pnum) * (2 + pc->repeat_pict);
640 }
641 break;
642 case CODEC_TYPE_AUDIO:
643 frame_size = get_audio_frame_size(&st->codec, pkt->size);
644 if (frame_size < 0)
645 break;
646 *pnum = frame_size;
647 *pden = st->codec.sample_rate;
648 break;
649 default:
650 break;
651 }
652 }
653
654 static int is_intra_only(AVCodecContext *enc){
655 if(enc->codec_type == CODEC_TYPE_AUDIO){
656 return 1;
657 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
658 switch(enc->codec_id){
659 case CODEC_ID_MJPEG:
660 case CODEC_ID_MJPEGB:
661 case CODEC_ID_LJPEG:
662 case CODEC_ID_RAWVIDEO:
663 case CODEC_ID_DVVIDEO:
664 case CODEC_ID_HUFFYUV:
665 case CODEC_ID_FFVHUFF:
666 case CODEC_ID_ASV1:
667 case CODEC_ID_ASV2:
668 case CODEC_ID_VCR1:
669 return 1;
670 default: break;
671 }
672 }
673 return 0;
674 }
675
676 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
677 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
678 int64_t delta= last_ts - mask/2;
679 return ((lsb - delta)&mask) + delta;
680 }
681
682 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
683 AVCodecParserContext *pc, AVPacket *pkt)
684 {
685 int num, den, presentation_delayed;
686
687 /* handle wrapping */
688 if(st->cur_dts != AV_NOPTS_VALUE){
689 if(pkt->pts != AV_NOPTS_VALUE)
690 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
691 if(pkt->dts != AV_NOPTS_VALUE)
692 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
693 }
694
695 if (pkt->duration == 0) {
696 compute_frame_duration(&num, &den, st, pc, pkt);
697 if (den && num) {
698 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
699 }
700 }
701
702 if(is_intra_only(&st->codec))
703 pkt->flags |= PKT_FLAG_KEY;
704
705 /* do we have a video B frame ? */
706 presentation_delayed = 0;
707 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
708 /* XXX: need has_b_frame, but cannot get it if the codec is
709 not initialized */
710 if (( st->codec.codec_id == CODEC_ID_H264
711 || st->codec.has_b_frames) &&
712 pc && pc->pict_type != FF_B_TYPE)
713 presentation_delayed = 1;
714 /* this may be redundant, but it shouldnt hurt */
715 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
716 presentation_delayed = 1;
717 }
718
719 if(st->cur_dts == AV_NOPTS_VALUE){
720 if(presentation_delayed) st->cur_dts = -pkt->duration;
721 else st->cur_dts = 0;
722 }
723
724 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
725 /* interpolate PTS and DTS if they are not present */
726 if (presentation_delayed) {
727 /* DTS = decompression time stamp */
728 /* PTS = presentation time stamp */
729 if (pkt->dts == AV_NOPTS_VALUE) {
730 /* if we know the last pts, use it */
731 if(st->last_IP_pts != AV_NOPTS_VALUE)
732 st->cur_dts = pkt->dts = st->last_IP_pts;
733 else
734 pkt->dts = st->cur_dts;
735 } else {
736 st->cur_dts = pkt->dts;
737 }
738 /* this is tricky: the dts must be incremented by the duration
739 of the frame we are displaying, i.e. the last I or P frame */
740 if (st->last_IP_duration == 0)
741 st->cur_dts += pkt->duration;
742 else
743 st->cur_dts += st->last_IP_duration;
744 st->last_IP_duration = pkt->duration;
745 st->last_IP_pts= pkt->pts;
746 /* cannot compute PTS if not present (we can compute it only
747 by knowing the futur */
748 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
749 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
750 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
751 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
752 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
753 pkt->pts += pkt->duration;
754 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
755 }
756 }
757
758 /* presentation is not delayed : PTS and DTS are the same */
759 if (pkt->pts == AV_NOPTS_VALUE) {
760 if (pkt->dts == AV_NOPTS_VALUE) {
761 pkt->pts = st->cur_dts;
762 pkt->dts = st->cur_dts;
763 }
764 else {
765 st->cur_dts = pkt->dts;
766 pkt->pts = pkt->dts;
767 }
768 } else {
769 st->cur_dts = pkt->pts;
770 pkt->dts = pkt->pts;
771 }
772 st->cur_dts += pkt->duration;
773 }
774 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
775
776 /* update flags */
777 if (pc) {
778 pkt->flags = 0;
779 /* key frame computation */
780 switch(st->codec.codec_type) {
781 case CODEC_TYPE_VIDEO:
782 if (pc->pict_type == FF_I_TYPE)
783 pkt->flags |= PKT_FLAG_KEY;
784 break;
785 case CODEC_TYPE_AUDIO:
786 pkt->flags |= PKT_FLAG_KEY;
787 break;
788 default:
789 break;
790 }
791 }
792
793 /* convert the packet time stamp units */
794 if(pkt->pts != AV_NOPTS_VALUE)
795 pkt->pts = av_rescale(pkt->pts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
796 if(pkt->dts != AV_NOPTS_VALUE)
797 pkt->dts = av_rescale(pkt->dts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
798
799 /* duration field */
800 pkt->duration = av_rescale(pkt->duration, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
801 }
802
803 void av_destruct_packet_nofree(AVPacket *pkt)
804 {
805 pkt->data = NULL; pkt->size = 0;
806 }
807
808 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
809 {
810 AVStream *st;
811 int len, ret, i;
812
813 for(;;) {
814 /* select current input stream component */
815 st = s->cur_st;
816 if (st) {
817 if (!st->parser) {
818 /* no parsing needed: we just output the packet as is */
819 /* raw data support */
820 *pkt = s->cur_pkt;
821 compute_pkt_fields(s, st, NULL, pkt);
822 s->cur_st = NULL;
823 return 0;
824 } else if (s->cur_len > 0 && !st->discard) {
825 len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size,
826 s->cur_ptr, s->cur_len,
827 s->cur_pkt.pts, s->cur_pkt.dts);
828 s->cur_pkt.pts = AV_NOPTS_VALUE;
829 s->cur_pkt.dts = AV_NOPTS_VALUE;
830 /* increment read pointer */
831 s->cur_ptr += len;
832 s->cur_len -= len;
833
834 /* return packet if any */
835 if (pkt->size) {
836 got_packet:
837 pkt->duration = 0;
838 pkt->stream_index = st->index;
839 pkt->pts = st->parser->pts;
840 pkt->dts = st->parser->dts;
841 pkt->destruct = av_destruct_packet_nofree;
842 compute_pkt_fields(s, st, st->parser, pkt);
843 return 0;
844 }
845 } else {
846 /* free packet */
847 av_free_packet(&s->cur_pkt);
848 s->cur_st = NULL;
849 }
850 } else {
851 /* read next packet */
852 ret = av_read_packet(s, &s->cur_pkt);
853 if (ret < 0) {
854 if (ret == -EAGAIN)
855 return ret;
856 /* return the last frames, if any */
857 for(i = 0; i < s->nb_streams; i++) {
858 st = s->streams[i];
859 if (st->parser) {
860 av_parser_parse(st->parser, &st->codec,
861 &pkt->data, &pkt->size,
862 NULL, 0,
863 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
864 if (pkt->size)
865 goto got_packet;
866 }
867 }
868 /* no more packets: really terminates parsing */
869 return ret;
870 }
871
872 st = s->streams[s->cur_pkt.stream_index];
873
874 s->cur_st = st;
875 s->cur_ptr = s->cur_pkt.data;
876 s->cur_len = s->cur_pkt.size;
877 if (st->need_parsing && !st->parser) {
878 st->parser = av_parser_init(st->codec.codec_id);
879 if (!st->parser) {
880 /* no parser available : just output the raw packets */
881 st->need_parsing = 0;
882 }
883 }
884 }
885 }
886 }
887
888 /**
889 * Return the next frame of a stream. The returned packet is valid
890 * until the next av_read_frame() or until av_close_input_file() and
891 * must be freed with av_free_packet. For video, the packet contains
892 * exactly one frame. For audio, it contains an integer number of
893 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
894 * data). If the audio frames have a variable size (e.g. MPEG audio),
895 * then it contains one frame.
896 *
897 * pkt->pts, pkt->dts and pkt->duration are always set to correct
898 * values in AV_TIME_BASE unit (and guessed if the format cannot
899 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
900 * has B frames, so it is better to rely on pkt->dts if you do not
901 * decompress the payload.
902 *
903 * Return 0 if OK, < 0 if error or end of file.
904 */
905 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
906 {
907 AVPacketList *pktl;
908
909 pktl = s->packet_buffer;
910 if (pktl) {
911 /* read packet from packet buffer, if there is data */
912 *pkt = pktl->pkt;
913 s->packet_buffer = pktl->next;
914 av_free(pktl);
915 return 0;
916 } else {
917 return av_read_frame_internal(s, pkt);
918 }
919 }
920
921 /* XXX: suppress the packet queue */
922 static void flush_packet_queue(AVFormatContext *s)
923 {
924 AVPacketList *pktl;
925
926 for(;;) {
927 pktl = s->packet_buffer;
928 if (!pktl)
929 break;
930 s->packet_buffer = pktl->next;
931 av_free_packet(&pktl->pkt);
932 av_free(pktl);
933 }
934 }
935
936 /*******************************************************/
937 /* seek support */
938
939 int av_find_default_stream_index(AVFormatContext *s)
940 {
941 int i;
942 AVStream *st;
943
944 if (s->nb_streams <= 0)
945 return -1;
946 for(i = 0; i < s->nb_streams; i++) {
947 st = s->streams[i];
948 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
949 return i;
950 }
951 }
952 return 0;
953 }
954
955 /* flush the frame reader */
956 static void av_read_frame_flush(AVFormatContext *s)
957 {
958 AVStream *st;
959 int i;
960
961 flush_packet_queue(s);
962
963 /* free previous packet */
964 if (s->cur_st) {
965 if (s->cur_st->parser)
966 av_free_packet(&s->cur_pkt);
967 s->cur_st = NULL;
968 }
969 /* fail safe */
970 s->cur_ptr = NULL;
971 s->cur_len = 0;
972
973 /* for each stream, reset read state */
974 for(i = 0; i < s->nb_streams; i++) {
975 st = s->streams[i];
976
977 if (st->parser) {
978 av_parser_close(st->parser);
979 st->parser = NULL;
980 }
981 st->last_IP_pts = AV_NOPTS_VALUE;
982 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
983 }
984 }
985
986 /**
987 * updates cur_dts of all streams based on given timestamp and AVStream.
988 * stream ref_st unchanged, others set cur_dts in their native timebase
989 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
990 * @param timestamp new dts expressed in time_base of param ref_st
991 * @param ref_st reference stream giving time_base of param timestamp
992 */
993 static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
994 int i;
995
996 for(i = 0; i < s->nb_streams; i++) {
997 AVStream *st = s->streams[i];
998
999 st->cur_dts = av_rescale(timestamp,
1000 st->time_base.den * (int64_t)ref_st->time_base.num,
1001 st->time_base.num * (int64_t)ref_st->time_base.den);
1002 }
1003 }
1004
1005 /**
1006 * add a index entry into a sorted list updateing if it is already there.
1007 * @param timestamp timestamp in the timebase of the given stream
1008 */
1009 int av_add_index_entry(AVStream *st,
1010 int64_t pos, int64_t timestamp, int distance, int flags)
1011 {
1012 AVIndexEntry *entries, *ie;
1013 int index;
1014
1015 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1016 return -1;
1017
1018 entries = av_fast_realloc(st->index_entries,
1019 &st->index_entries_allocated_size,
1020 (st->nb_index_entries + 1) *
1021 sizeof(AVIndexEntry));
1022 if(!entries)
1023 return -1;
1024
1025 st->index_entries= entries;
1026
1027 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1028
1029 if(index<0){
1030 index= st->nb_index_entries++;
1031 ie= &entries[index];
1032 assert(index==0 || ie[-1].timestamp < timestamp);
1033 }else{
1034 ie= &entries[index];
1035 if(ie->timestamp != timestamp){
1036 if(ie->timestamp <= timestamp)
1037 return -1;
1038 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1039 st->nb_index_entries++;
1040 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1041 distance= ie->min_distance;
1042 }
1043
1044 ie->pos = pos;
1045 ie->timestamp = timestamp;
1046 ie->min_distance= distance;
1047 ie->flags = flags;
1048
1049 return index;
1050 }
1051
1052 /* build an index for raw streams using a parser */
1053 static void av_build_index_raw(AVFormatContext *s)
1054 {
1055 AVPacket pkt1, *pkt = &pkt1;
1056 int ret;
1057 AVStream *st;
1058
1059 st = s->streams[0];
1060 av_read_frame_flush(s);
1061 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1062
1063 for(;;) {
1064 ret = av_read_frame(s, pkt);
1065 if (ret < 0)
1066 break;
1067 if (pkt->stream_index == 0 && st->parser &&
1068 (pkt->flags & PKT_FLAG_KEY)) {
1069 int64_t dts= av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
1070 av_add_index_entry(st, st->parser->frame_offset, dts,
1071 0, AVINDEX_KEYFRAME);
1072 }
1073 av_free_packet(pkt);
1074 }
1075 }
1076
1077 /* return TRUE if we deal with a raw stream (raw codec data and
1078 parsing needed) */
1079 static int is_raw_stream(AVFormatContext *s)
1080 {
1081 AVStream *st;
1082
1083 if (s->nb_streams != 1)
1084 return 0;
1085 st = s->streams[0];
1086 if (!st->need_parsing)
1087 return 0;
1088 return 1;
1089 }
1090
1091 /**
1092 * gets the index for a specific timestamp.
1093 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1094 * the timestamp which is <= the requested one, if backward is 0
1095 * then it will be >=
1096 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1097 * @return < 0 if no such timestamp could be found
1098 */
1099 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1100 int flags)
1101 {
1102 AVIndexEntry *entries= st->index_entries;
1103 int nb_entries= st->nb_index_entries;
1104 int a, b, m;
1105 int64_t timestamp;
1106
1107 a = - 1;
1108 b = nb_entries;
1109
1110 while (b - a > 1) {
1111 m = (a + b) >> 1;
1112 timestamp = entries[m].timestamp;
1113 if(timestamp >= wanted_timestamp)
1114 b = m;
1115 if(timestamp <= wanted_timestamp)
1116 a = m;
1117 }
1118 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1119
1120 if(!(flags & AVSEEK_FLAG_ANY)){
1121 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1122 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1123 }
1124 }
1125
1126 if(m == nb_entries)
1127 return -1;
1128 return m;
1129 }
1130
1131 #define DEBUG_SEEK
1132
1133 /**
1134 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1135 * this isnt supposed to be called directly by a user application, but by demuxers
1136 * @param target_ts target timestamp in the time base of the given stream
1137 * @param stream_index stream number
1138 */
1139 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1140 AVInputFormat *avif= s->iformat;
1141 int64_t pos_min, pos_max, pos, pos_limit;
1142 int64_t ts_min, ts_max, ts;
1143 int64_t start_pos;
1144 int index, no_change;
1145 AVStream *st;
1146
1147 if (stream_index < 0)
1148 return -1;
1149
1150 #ifdef DEBUG_SEEK
1151 av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
1152 #endif
1153
1154 ts_max=
1155 ts_min= AV_NOPTS_VALUE;
1156 pos_limit= -1; //gcc falsely says it may be uninitalized
1157
1158 st= s->streams[stream_index];
1159 if(st->index_entries){
1160 AVIndexEntry *e;
1161
1162 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1163 index= FFMAX(index, 0);
1164 e= &st->index_entries[index];
1165
1166 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1167 pos_min= e->pos;
1168 ts_min= e->timestamp;
1169 #ifdef DEBUG_SEEK
1170 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n",
1171 pos_min,ts_min);
1172 #endif
1173 }else{
1174 assert(index==0);
1175 }
1176
1177 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1178 assert(index < st->nb_index_entries);
1179 if(index >= 0){
1180 e= &st->index_entries[index];
1181 assert(e->timestamp >= target_ts);
1182 pos_max= e->pos;
1183 ts_max= e->timestamp;
1184 pos_limit= pos_max - e->min_distance;
1185 #ifdef DEBUG_SEEK
1186 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
1187 pos_max,pos_limit, ts_max);
1188 #endif
1189 }
1190 }
1191
1192 if(ts_min == AV_NOPTS_VALUE){
1193 pos_min = s->data_offset;
1194 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1195 if (ts_min == AV_NOPTS_VALUE)
1196 return -1;
1197 }
1198
1199 if(ts_max == AV_NOPTS_VALUE){
1200 int step= 1024;
1201 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1202 do{
1203 pos_max -= step;
1204 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1205 step += step;
1206 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1207 if (ts_max == AV_NOPTS_VALUE)
1208 return -1;
1209
1210 for(;;){
1211 int64_t tmp_pos= pos_max + 1;
1212 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1213 if(tmp_ts == AV_NOPTS_VALUE)
1214 break;
1215 ts_max= tmp_ts;
1216 pos_max= tmp_pos;
1217 }
1218 pos_limit= pos_max;
1219 }
1220
1221 no_change=0;
1222 while (pos_min < pos_limit) {
1223 #ifdef DEBUG_SEEK
1224 av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
1225 pos_min, pos_max,
1226 ts_min, ts_max);
1227 #endif
1228 assert(pos_limit <= pos_max);
1229
1230 if(no_change==0){
1231 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1232 // interpolate position (better than dichotomy)
1233 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1234 + pos_min - approximate_keyframe_distance;
1235 }else if(no_change==1){
1236 // bisection, if interpolation failed to change min or max pos last time
1237 pos = (pos_min + pos_limit)>>1;
1238 }else{
1239 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1240 pos=pos_min;
1241 }
1242 if(pos <= pos_min)
1243 pos= pos_min + 1;
1244 else if(pos > pos_limit)
1245 pos= pos_limit;
1246 start_pos= pos;
1247
1248 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1249 if(pos == pos_max)
1250 no_change++;
1251 else
1252 no_change=0;
1253 #ifdef DEBUG_SEEK
1254 av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1255 #endif
1256 assert(ts != AV_NOPTS_VALUE);
1257 if (target_ts <= ts) {
1258 pos_limit = start_pos - 1;
1259 pos_max = pos;
1260 ts_max = ts;
1261 }
1262 if (target_ts >= ts) {
1263 pos_min = pos;
1264 ts_min = ts;
1265 }
1266 }
1267
1268 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1269 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1270 #ifdef DEBUG_SEEK
1271 pos_min = pos;
1272 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1273 pos_min++;
1274 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1275 av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
1276 pos, ts_min, target_ts, ts_max);
1277 #endif
1278 /* do the seek */
1279 url_fseek(&s->pb, pos, SEEK_SET);
1280
1281 av_update_cur_dts(s, st, ts);
1282
1283 return 0;
1284 }
1285
1286 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1287 int64_t pos_min, pos_max;
1288 #if 0
1289 AVStream *st;
1290
1291 if (stream_index < 0)
1292 return -1;
1293
1294 st= s->streams[stream_index];
1295 #endif
1296
1297 pos_min = s->data_offset;
1298 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1299
1300 if (pos < pos_min) pos= pos_min;
1301 else if(pos > pos_max) pos= pos_max;
1302
1303 url_fseek(&s->pb, pos, SEEK_SET);
1304
1305 #if 0
1306 av_update_cur_dts(s, st, ts);
1307 #endif
1308 return 0;
1309 }
1310
1311 static int av_seek_frame_generic(AVFormatContext *s,
1312 int stream_index, int64_t timestamp, int flags)
1313 {
1314 int index;
1315 AVStream *st;
1316 AVIndexEntry *ie;
1317
1318 if (!s->index_built) {
1319 if (is_raw_stream(s)) {
1320 av_build_index_raw(s);
1321 } else {
1322 return -1;
1323 }
1324 s->index_built = 1;
1325 }
1326
1327 st = s->streams[stream_index];
1328 index = av_index_search_timestamp(st, timestamp, flags);
1329 if (index < 0)
1330 return -1;
1331
1332 /* now we have found the index, we can seek */
1333 ie = &st->index_entries[index];
1334 av_read_frame_flush(s);
1335 url_fseek(&s->pb, ie->pos, SEEK_SET);
1336
1337 av_update_cur_dts(s, st, ie->timestamp);
1338
1339 return 0;
1340 }
1341
1342 /**
1343 * Seek to the key frame at timestamp.
1344 * 'timestamp' in 'stream_index'.
1345 * @param stream_index If stream_index is (-1), a default
1346 * stream is selected, and timestamp is automatically converted
1347 * from AV_TIME_BASE units to the stream specific time_base.
1348 * @param timestamp timestamp in AVStream.time_base units
1349 * @param flags flags which select direction and seeking mode
1350 * @return >= 0 on success
1351 */
1352 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1353 {
1354 int ret;
1355 AVStream *st;
1356
1357 av_read_frame_flush(s);
1358
1359 if(flags & AVSEEK_FLAG_BYTE)
1360 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1361
1362 if(stream_index < 0){
1363 stream_index= av_find_default_stream_index(s);
1364 if(stream_index < 0)
1365 return -1;
1366
1367 st= s->streams[stream_index];
1368 /* timestamp for default must be expressed in AV_TIME_BASE units */
1369 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1370 }
1371 st= s->streams[stream_index];
1372
1373 /* first, we try the format specific seek */
1374 if (s->iformat->read_seek)
1375 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1376 else
1377 ret = -1;
1378 if (ret >= 0) {
1379 return 0;
1380 }
1381
1382 if(s->iformat->read_timestamp)
1383 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1384 else
1385 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1386 }
1387
1388 /*******************************************************/
1389
1390 /* return TRUE if the stream has accurate timings for at least one component */
1391 static int av_has_timings(AVFormatContext *ic)
1392 {
1393 int i;
1394 AVStream *st;
1395
1396 for(i = 0;i < ic->nb_streams; i++) {
1397 st = ic->streams[i];
1398 if (st->start_time != AV_NOPTS_VALUE &&
1399 st->duration != AV_NOPTS_VALUE)
1400 return 1;
1401 }
1402 return 0;
1403 }
1404
1405 /* estimate the stream timings from the one of each components. Also
1406 compute the global bitrate if possible */
1407 static void av_update_stream_timings(AVFormatContext *ic)
1408 {
1409 int64_t start_time, end_time, end_time1;
1410 int i;
1411 AVStream *st;
1412
1413 start_time = MAXINT64;
1414 end_time = MININT64;
1415 for(i = 0;i < ic->nb_streams; i++) {
1416 st = ic->streams[i];
1417 if (st->start_time != AV_NOPTS_VALUE) {
1418 if (st->start_time < start_time)
1419 start_time = st->start_time;
1420 if (st->duration != AV_NOPTS_VALUE) {
1421 end_time1 = st->start_time + st->duration;
1422 if (end_time1 > end_time)
1423 end_time = end_time1;
1424 }
1425 }
1426 }
1427 if (start_time != MAXINT64) {
1428 ic->start_time = start_time;
1429 if (end_time != MAXINT64) {
1430 ic->duration = end_time - start_time;
1431 if (ic->file_size > 0) {
1432 /* compute the bit rate */
1433 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1434 (double)ic->duration;
1435 }
1436 }
1437 }
1438
1439 }
1440
1441 static void fill_all_stream_timings(AVFormatContext *ic)
1442 {
1443 int i;
1444 AVStream *st;
1445
1446 av_update_stream_timings(ic);
1447 for(i = 0;i < ic->nb_streams; i++) {
1448 st = ic->streams[i];
1449 if (st->start_time == AV_NOPTS_VALUE) {
1450 st->start_time = ic->start_time;
1451 st->duration = ic->duration;
1452 }
1453 }
1454 }
1455
1456 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1457 {
1458 int64_t filesize, duration;
1459 int bit_rate, i;
1460 AVStream *st;
1461
1462 /* if bit_rate is already set, we believe it */
1463 if (ic->bit_rate == 0) {
1464 bit_rate = 0;
1465 for(i=0;i<ic->nb_streams;i++) {
1466 st = ic->streams[i];
1467 bit_rate += st->codec.bit_rate;
1468 }
1469 ic->bit_rate = bit_rate;
1470 }
1471
1472 /* if duration is already set, we believe it */
1473 if (ic->duration == AV_NOPTS_VALUE &&
1474 ic->bit_rate != 0 &&
1475 ic->file_size != 0) {
1476 filesize = ic->file_size;
1477 if (filesize > 0) {
1478 duration = (int64_t)((8 * AV_TIME_BASE * (double)filesize) / (double)ic->bit_rate);
1479 for(i = 0; i < ic->nb_streams; i++) {
1480 st = ic->streams[i];
1481 if (st->start_time == AV_NOPTS_VALUE ||
1482 st->duration == AV_NOPTS_VALUE) {
1483 st->start_time = 0;
1484 st->duration = duration;
1485 }
1486 }
1487 }
1488 }
1489 }
1490
1491 #define DURATION_MAX_READ_SIZE 250000
1492
1493 /* only usable for MPEG-PS streams */
1494 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1495 {
1496 AVPacket pkt1, *pkt = &pkt1;
1497 AVStream *st;
1498 int read_size, i, ret;
1499 int64_t start_time, end_time, end_time1;
1500 int64_t filesize, offset, duration;
1501
1502 /* free previous packet */
1503 if (ic->cur_st && ic->cur_st->parser)
1504 av_free_packet(&ic->cur_pkt);
1505 ic->cur_st = NULL;
1506
1507 /* flush packet queue */
1508 flush_packet_queue(ic);
1509
1510 for(i=0;i<ic->nb_streams;i++) {
1511 st = ic->streams[i];
1512 if (st->parser) {
1513 av_parser_close(st->parser);
1514 st->parser= NULL;
1515 }
1516 }
1517
1518 /* we read the first packets to get the first PTS (not fully
1519 accurate, but it is enough now) */
1520 url_fseek(&ic->pb, 0, SEEK_SET);
1521 read_size = 0;
1522 for(;;) {
1523 if (read_size >= DURATION_MAX_READ_SIZE)
1524 break;
1525 /* if all info is available, we can stop */
1526 for(i = 0;i < ic->nb_streams; i++) {
1527 st = ic->streams[i];
1528 if (st->start_time == AV_NOPTS_VALUE)
1529 break;
1530 }
1531 if (i == ic->nb_streams)
1532 break;
1533
1534 ret = av_read_packet(ic, pkt);
1535 if (ret != 0)
1536 break;
1537 read_size += pkt->size;
1538 st = ic->streams[pkt->stream_index];
1539 if (pkt->pts != AV_NOPTS_VALUE) {
1540 if (st->start_time == AV_NOPTS_VALUE)
1541 st->start_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
1542 }
1543 av_free_packet(pkt);
1544 }
1545
1546 /* we compute the minimum start_time and use it as default */
1547 start_time = MAXINT64;
1548 for(i = 0; i < ic->nb_streams; i++) {
1549 st = ic->streams[i];
1550 if (st->start_time != AV_NOPTS_VALUE &&
1551 st->start_time < start_time)
1552 start_time = st->start_time;
1553 }
1554 if (start_time != MAXINT64)
1555 ic->start_time = start_time;
1556
1557 /* estimate the end time (duration) */
1558 /* XXX: may need to support wrapping */
1559 filesize = ic->file_size;
1560 offset = filesize - DURATION_MAX_READ_SIZE;
1561 if (offset < 0)
1562 offset = 0;
1563
1564 url_fseek(&ic->pb, offset, SEEK_SET);
1565 read_size = 0;
1566 for(;;) {
1567 if (read_size >= DURATION_MAX_READ_SIZE)
1568 break;
1569 /* if all info is available, we can stop */
1570 for(i = 0;i < ic->nb_streams; i++) {
1571 st = ic->streams[i];
1572 if (st->duration == AV_NOPTS_VALUE)
1573 break;
1574 }
1575 if (i == ic->nb_streams)
1576 break;
1577
1578 ret = av_read_packet(ic, pkt);
1579 if (ret != 0)
1580 break;
1581 read_size += pkt->size;
1582 st = ic->streams[pkt->stream_index];
1583 if (pkt->pts != AV_NOPTS_VALUE) {
1584 end_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
1585 duration = end_time - st->start_time;
1586 if (duration > 0) {
1587 if (st->duration == AV_NOPTS_VALUE ||
1588 st->duration < duration)
1589 st->duration = duration;
1590 }
1591 }
1592 av_free_packet(pkt);
1593 }
1594
1595 /* estimate total duration */
1596 end_time = MININT64;
1597 for(i = 0;i < ic->nb_streams; i++) {
1598 st = ic->streams[i];
1599 if (st->duration != AV_NOPTS_VALUE) {
1600 end_time1 = st->start_time + st->duration;
1601 if (end_time1 > end_time)
1602 end_time = end_time1;
1603 }
1604 }
1605
1606 /* update start_time (new stream may have been created, so we do
1607 it at the end */
1608 if (ic->start_time != AV_NOPTS_VALUE) {
1609 for(i = 0; i < ic->nb_streams; i++) {
1610 st = ic->streams[i];
1611 if (st->start_time == AV_NOPTS_VALUE)
1612 st->start_time = ic->start_time;
1613 }
1614 }
1615
1616 if (end_time != MININT64) {
1617 /* put dummy values for duration if needed */
1618 for(i = 0;i < ic->nb_streams; i++) {
1619 st = ic->streams[i];
1620 if (st->duration == AV_NOPTS_VALUE &&
1621 st->start_time != AV_NOPTS_VALUE)
1622 st->duration = end_time - st->start_time;
1623 }
1624 ic->duration = end_time - ic->start_time;
1625 }
1626
1627 url_fseek(&ic->pb, 0, SEEK_SET);
1628 }
1629
1630 static void av_estimate_timings(AVFormatContext *ic)
1631 {
1632 URLContext *h;
1633 int64_t file_size;
1634
1635 /* get the file size, if possible */
1636 if (ic->iformat->flags & AVFMT_NOFILE) {
1637 file_size = 0;
1638 } else {
1639 h = url_fileno(&ic->pb);
1640 file_size = url_filesize(h);
1641 if (file_size < 0)
1642 file_size = 0;
1643 }
1644 ic->file_size = file_size;
1645
1646 if ((ic->iformat == &mpegps_demux || ic->iformat == &mpegts_demux) && file_size && !ic->pb.is_streamed) {
1647 /* get accurate estimate from the PTSes */
1648 av_estimate_timings_from_pts(ic);
1649 } else if (av_has_timings(ic)) {
1650 /* at least one components has timings - we use them for all
1651 the components */
1652 fill_all_stream_timings(ic);
1653 } else {
1654 /* less precise: use bit rate info */
1655 av_estimate_timings_from_bit_rate(ic);
1656 }
1657 av_update_stream_timings(ic);
1658
1659 #if 0
1660 {
1661 int i;
1662 AVStream *st;
1663 for(i = 0;i < ic->nb_streams; i++) {
1664 st = ic->streams[i];
1665 printf("%d: start_time: %0.3f duration: %0.3f\n",
1666 i, (double)st->start_time / AV_TIME_BASE,
1667 (double)st->duration / AV_TIME_BASE);
1668 }
1669 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1670 (double)ic->start_time / AV_TIME_BASE,
1671 (double)ic->duration / AV_TIME_BASE,
1672 ic->bit_rate / 1000);
1673 }
1674 #endif
1675 }
1676
1677 static int has_codec_parameters(AVCodecContext *enc)
1678 {
1679 int val;
1680 switch(enc->codec_type) {
1681 case CODEC_TYPE_AUDIO:
1682 val = enc->sample_rate;
1683 break;
1684 case CODEC_TYPE_VIDEO:
1685 val = enc->width;
1686 break;
1687 default:
1688 val = 1;
1689 break;
1690 }
1691 return (val != 0);
1692 }
1693
1694 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1695 {
1696 int16_t *samples;
1697 AVCodec *codec;
1698 int got_picture, ret;
1699 AVFrame picture;
1700
1701 codec = avcodec_find_decoder(st->codec.codec_id);
1702 if (!codec)
1703 return -1;
1704 ret = avcodec_open(&st->codec, codec);
1705 if (ret < 0)
1706 return ret;
1707 switch(st->codec.codec_type) {
1708 case CODEC_TYPE_VIDEO:
1709 ret = avcodec_decode_video(&st->codec, &picture,
1710 &got_picture, (uint8_t *)data, size);
1711 break;
1712 case CODEC_TYPE_AUDIO:
1713 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1714 if (!samples)
1715 goto fail;
1716 ret = avcodec_decode_audio(&st->codec, samples,
1717 &got_picture, (uint8_t *)data, size);
1718 av_free(samples);
1719 break;
1720 default:
1721 break;
1722 }
1723 fail:
1724 avcodec_close(&st->codec);
1725 return ret;
1726 }
1727
1728 /* absolute maximum size we read until we abort */
1729 #define MAX_READ_SIZE 5000000
1730
1731 /* maximum duration until we stop analysing the stream */
1732 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 1.0))
1733
1734 /**
1735 * Read the beginning of a media file to get stream information. This
1736 * is useful for file formats with no headers such as MPEG. This
1737 * function also compute the real frame rate in case of mpeg2 repeat
1738 * frame mode.
1739 *
1740 * @param ic media file handle
1741 * @return >=0 if OK. AVERROR_xxx if error.
1742 */
1743 int av_find_stream_info(AVFormatContext *ic)
1744 {
1745 int i, count, ret, read_size;
1746 AVStream *st;
1747 AVPacket pkt1, *pkt;
1748 AVPacketList *pktl=NULL, **ppktl;
1749 int64_t last_dts[MAX_STREAMS];
1750 int64_t best_duration[MAX_STREAMS];
1751
1752 for(i=0;i<MAX_STREAMS;i++){
1753 last_dts[i]= AV_NOPTS_VALUE;
1754 best_duration[i]= INT64_MAX;
1755 }
1756
1757 count = 0;
1758 read_size = 0;
1759 ppktl = &ic->packet_buffer;
1760 for(;;) {
1761 /* check if one codec still needs to be handled */
1762 for(i=0;i<ic->nb_streams;i++) {
1763 st = ic->streams[i];
1764 if (!has_codec_parameters(&st->codec))
1765 break;
1766 /* variable fps and no guess at the real fps */
1767 if( st->codec.frame_rate >= 1000LL*st->codec.frame_rate_base
1768 && best_duration[i]== INT64_MAX)
1769 break;
1770 }
1771 if (i == ic->nb_streams) {
1772 /* NOTE: if the format has no header, then we need to read
1773 some packets to get most of the streams, so we cannot
1774 stop here */
1775 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1776 /* if we found the info for all the codecs, we can stop */
1777 ret = count;
1778 break;
1779 }
1780 } else {
1781 /* we did not get all the codec info, but we read too much data */
1782 if (read_size >= MAX_READ_SIZE) {
1783 ret = count;
1784 break;
1785 }
1786 }
1787
1788 /* NOTE: a new stream can be added there if no header in file
1789 (AVFMTCTX_NOHEADER) */
1790 ret = av_read_frame_internal(ic, &pkt1);
1791 if (ret < 0) {
1792 /* EOF or error */
1793 ret = -1; /* we could not have all the codec parameters before EOF */
1794 for(i=0;i<ic->nb_streams;i++) {
1795 st = ic->streams[i];
1796 if (!has_codec_parameters(&st->codec))
1797 break;
1798 }
1799 if ((ic->ctx_flags & AVFMTCTX_NOHEADER) &&
1800 i == ic->nb_streams)
1801 ret = 0;
1802 break;
1803 }
1804
1805 pktl = av_mallocz(sizeof(AVPacketList));
1806 if (!pktl) {
1807 ret = AVERROR_NOMEM;
1808 break;
1809 }
1810
1811 /* add the packet in the buffered packet list */
1812 *ppktl = pktl;
1813 ppktl = &pktl->next;
1814
1815 pkt = &pktl->pkt;
1816 *pkt = pkt1;
1817
1818 /* duplicate the packet */
1819 if (av_dup_packet(pkt) < 0) {
1820 ret = AVERROR_NOMEM;
1821 break;
1822 }
1823
1824 read_size += pkt->size;
1825
1826 st = ic->streams[pkt->stream_index];
1827 st->codec_info_duration += pkt->duration;
1828 if (pkt->duration != 0)
1829 st->codec_info_nb_frames++;
1830
1831 if(st->codec.codec_type == CODEC_TYPE_VIDEO){
1832 int64_t last= last_dts[pkt->stream_index];
1833
1834 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && last < pkt->dts &&
1835 best_duration[pkt->stream_index] > pkt->dts - last){
1836 best_duration[pkt->stream_index] = pkt->dts - last;
1837 }
1838 last_dts[pkt->stream_index]= pkt->dts;
1839 }
1840 /* if still no information, we try to open the codec and to
1841 decompress the frame. We try to avoid that in most cases as
1842 it takes longer and uses more memory. For MPEG4, we need to
1843 decompress for Quicktime. */
1844 if (!has_codec_parameters(&st->codec) &&
1845 (st->codec.codec_id == CODEC_ID_FLV1 ||
1846 st->codec.codec_id == CODEC_ID_H264 ||
1847 st->codec.codec_id == CODEC_ID_H263 ||
1848 st->codec.codec_id == CODEC_ID_H261 ||
1849 st->codec.codec_id == CODEC_ID_VORBIS ||
1850 st->codec.codec_id == CODEC_ID_MJPEG ||
1851 st->codec.codec_id == CODEC_ID_PNG ||
1852 st->codec.codec_id == CODEC_ID_PAM ||
1853 st->codec.codec_id == CODEC_ID_PGM ||
1854 st->codec.codec_id == CODEC_ID_PGMYUV ||
1855 st->codec.codec_id == CODEC_ID_PBM ||
1856 st->codec.codec_id == CODEC_ID_PPM ||
1857 st->codec.codec_id == CODEC_ID_SHORTEN ||
1858 (st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing)))
1859 try_decode_frame(st, pkt->data, pkt->size);
1860
1861 if (st->codec_info_duration >= MAX_STREAM_DURATION) {
1862 break;
1863 }
1864 count++;
1865 }
1866
1867 for(i=0;i<ic->nb_streams;i++) {
1868 st = ic->streams[i];
1869 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1870 if(st->codec.codec_id == CODEC_ID_RAWVIDEO && !st->codec.codec_tag && !st->codec.bits_per_sample)
1871 st->codec.codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec.pix_fmt);
1872
1873 if(best_duration[i] < INT64_MAX && st->codec.frame_rate_base*1000 <= st->codec.frame_rate){
1874 int int_fps;
1875
1876 st->r_frame_rate= st->codec.frame_rate;
1877 st->r_frame_rate_base= av_rescale(best_duration[i], st->codec.frame_rate, AV_TIME_BASE);
1878 av_reduce(&st->r_frame_rate, &st->r_frame_rate_base, st->r_frame_rate, st->r_frame_rate_base, 1<<15);
1879
1880 int_fps= av_rescale(st->r_frame_rate, 1, st->r_frame_rate_base);
1881
1882 if(int_fps>0 && av_rescale(st->r_frame_rate, 1, int_fps) == st->r_frame_rate_base){
1883 st->r_frame_rate= int_fps;
1884 st->r_frame_rate_base= 1;
1885 }
1886 }
1887
1888 /* set real frame rate info */
1889 /* compute the real frame rate for telecine */
1890 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
1891 st->codec.codec_id == CODEC_ID_MPEG2VIDEO) &&
1892 st->codec.sub_id == 2) {
1893 if (st->codec_info_nb_frames >= 20) {
1894 float coded_frame_rate, est_frame_rate;
1895 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
1896 (double)st->codec_info_duration ;
1897 coded_frame_rate = (double)st->codec.frame_rate /
1898 (double)st->codec.frame_rate_base;
1899 #if 0
1900 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
1901 coded_frame_rate, est_frame_rate);
1902 #endif
1903 /* if we detect that it could be a telecine, we
1904 signal it. It would be better to do it at a
1905 higher level as it can change in a film */
1906 if (coded_frame_rate >= 24.97 &&
1907 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
1908 st->r_frame_rate = 24000;
1909 st->r_frame_rate_base = 1001;
1910 }
1911 }
1912 }
1913 /* if no real frame rate, use the codec one */
1914 if (!st->r_frame_rate){
1915 st->r_frame_rate = st->codec.frame_rate;
1916 st->r_frame_rate_base = st->codec.frame_rate_base;
1917 }
1918 }
1919 }
1920
1921 av_estimate_timings(ic);
1922 #if 0
1923 /* correct DTS for b frame streams with no timestamps */
1924 for(i=0;i<ic->nb_streams;i++) {
1925 st = ic->streams[i];
1926 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1927 if(b-frames){
1928 ppktl = &ic->packet_buffer;
1929 while(ppkt1){
1930 if(ppkt1->stream_index != i)
1931 continue;
1932 if(ppkt1->pkt->dts < 0)
1933 break;
1934 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1935 break;
1936 ppkt1->pkt->dts -= delta;
1937 ppkt1= ppkt1->next;
1938 }
1939 if(ppkt1)
1940 continue;
1941 st->cur_dts -= delta;
1942 }
1943 }
1944 }
1945 #endif
1946 return ret;
1947 }
1948
1949 /*******************************************************/
1950
1951 /**
1952 * start playing a network based stream (e.g. RTSP stream) at the
1953 * current position
1954 */
1955 int av_read_play(AVFormatContext *s)
1956 {
1957 if (!s->iformat->read_play)
1958 return AVERROR_NOTSUPP;
1959 return s->iformat->read_play(s);
1960 }
1961
1962 /**
1963 * pause a network based stream (e.g. RTSP stream). Use av_read_play()
1964 * to resume it.
1965 */
1966 int av_read_pause(AVFormatContext *s)
1967 {
1968 if (!s->iformat->read_pause)
1969 return AVERROR_NOTSUPP;
1970 return s->iformat->read_pause(s);
1971 }
1972
1973 /**
1974 * Close a media file (but not its codecs)
1975 *
1976 * @param s media file handle
1977 */
1978 void av_close_input_file(AVFormatContext *s)
1979 {
1980 int i, must_open_file;
1981 AVStream *st;
1982
1983 /* free previous packet */
1984 if (s->cur_st && s->cur_st->parser)
1985 av_free_packet(&s->cur_pkt);
1986
1987 if (s->iformat->read_close)
1988 s->iformat->read_close(s);
1989 for(i=0;i<s->nb_streams;i++) {
1990 /* free all data in a stream component */
1991 st = s->streams[i];
1992 if (st->parser) {
1993 av_parser_close(st->parser);
1994 }
1995 av_free(st->index_entries);
1996 av_free(st);
1997 }
1998 flush_packet_queue(s);
1999 must_open_file = 1;
2000 if (s->iformat->flags & AVFMT_NOFILE) {
2001 must_open_file = 0;
2002 }
2003 if (must_open_file) {
2004 url_fclose(&s->pb);
2005 }
2006 av_freep(&s->priv_data);
2007 av_free(s);
2008 }
2009
2010 /**
2011 * Add a new stream to a media file. Can only be called in the
2012 * read_header function. If the flag AVFMTCTX_NOHEADER is in the
2013 * format context, then new streams can be added in read_packet too.
2014 *
2015 *
2016 * @param s media file handle
2017 * @param id file format dependent stream id
2018 */
2019 AVStream *av_new_stream(AVFormatContext *s, int id)
2020 {
2021 AVStream *st;
2022
2023 if (s->nb_streams >= MAX_STREAMS)
2024 return NULL;
2025
2026 st = av_mallocz(sizeof(AVStream));
2027 if (!st)
2028 return NULL;
2029 avcodec_get_context_defaults(&st->codec);
2030 if (s->iformat) {
2031 /* no default bitrate if decoding */
2032 st->codec.bit_rate = 0;
2033 }
2034 st->index = s->nb_streams;
2035 st->id = id;
2036 st->start_time = AV_NOPTS_VALUE;
2037 st->duration = AV_NOPTS_VALUE;
2038 st->cur_dts = AV_NOPTS_VALUE;
2039
2040 /* default pts settings is MPEG like */
2041 av_set_pts_info(st, 33, 1, 90000);
2042 st->last_IP_pts = AV_NOPTS_VALUE;
2043
2044 s->streams[s->nb_streams++] = st;
2045 return st;
2046 }
2047
2048 /************************************************************/
2049 /* output media file */
2050
2051 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2052 {
2053 int ret;
2054
2055 if (s->oformat->priv_data_size > 0) {
2056 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2057 if (!s->priv_data)
2058 return AVERROR_NOMEM;
2059 } else
2060 s->priv_data = NULL;
2061
2062 if (s->oformat->set_parameters) {
2063 ret = s->oformat->set_parameters(s, ap);
2064 if (ret < 0)
2065 return ret;
2066 }
2067 return 0;
2068 }
2069
2070 /**
2071 * allocate the stream private data and write the stream header to an
2072 * output media file
2073 *
2074 * @param s media file handle
2075 * @return 0 if OK. AVERROR_xxx if error.
2076 */
2077 int av_write_header(AVFormatContext *s)
2078 {
2079 int ret, i;
2080 AVStream *st;
2081
2082 ret = s->oformat->write_header(s);
2083 if (ret < 0)
2084 return ret;
2085
2086 /* init PTS generation */
2087 for(i=0;i<s->nb_streams;i++) {
2088 st = s->streams[i];
2089
2090 switch (st->codec.codec_type) {
2091 case CODEC_TYPE_AUDIO:
2092 av_frac_init(&st->pts, 0, 0,
2093 (int64_t)st->time_base.num * st->codec.sample_rate);
2094 break;
2095 case CODEC_TYPE_VIDEO:
2096 av_frac_init(&st->pts, 0, 0,
2097 (int64_t)st->time_base.num * st->codec.frame_rate);
2098 break;
2099 default:
2100 break;
2101 }
2102 }
2103 return 0;
2104 }
2105
2106 //FIXME merge with compute_pkt_fields
2107 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2108 int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
2109 int num, den, frame_size;
2110
2111 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2112
2113 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2114 return -1;*/
2115
2116 if(pkt->pts != AV_NOPTS_VALUE)
2117 pkt->pts = av_rescale(pkt->pts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
2118 if(pkt->dts != AV_NOPTS_VALUE)
2119 pkt->dts = av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
2120
2121 /* duration field */
2122 pkt->duration = av_rescale(pkt->duration, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
2123 if (pkt->duration == 0) {
2124 compute_frame_duration(&num, &den, st, NULL, pkt);
2125 if (den && num) {
2126 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2127 }
2128 }
2129
2130 //XXX/FIXME this is a temporary hack until all encoders output pts
2131 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2132 pkt->dts=
2133 // pkt->pts= st->cur_dts;
2134 pkt->pts= st->pts.val;
2135 }
2136
2137 //calculate dts from pts
2138 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2139 if(b_frames){
2140 if(st->last_IP_pts == AV_NOPTS_VALUE){
2141 st->last_IP_pts= -pkt->duration;
2142 }
2143 if(st->last_IP_pts < pkt->pts){
2144 pkt->dts= st->last_IP_pts;
2145 st->last_IP_pts= pkt->pts;
2146 }else
2147 pkt->dts= pkt->pts;
2148 }else
2149 pkt->dts= pkt->pts;
2150 }
2151
2152 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2153 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %Ld >= %Ld\n", st->cur_dts, pkt->dts);
2154 return -1;
2155 }
2156 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2157 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2158 return -1;
2159 }
2160
2161 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2162 st->cur_dts= pkt->dts;
2163 st->pts.val= pkt->dts;
2164
2165 /* update pts */
2166 switch (st->codec.codec_type) {
2167 case CODEC_TYPE_AUDIO:
2168 frame_size = get_audio_frame_size(&st->codec, pkt->size);
2169
2170 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2171 but it would be better if we had the real timestamps from the encoder */
2172 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2173 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2174 }
2175 break;
2176 case CODEC_TYPE_VIDEO:
2177 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.frame_rate_base);
2178 break;
2179 default:
2180 break;
2181 }
2182 return 0;
2183 }
2184
2185 static void truncate_ts(AVStream *st, AVPacket *pkt){
2186 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2187
2188 // if(pkt->dts < 0)
2189 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2190
2191 pkt->pts &= pts_mask;
2192 pkt->dts &= pts_mask;
2193 }
2194
2195 /**
2196 * Write a packet to an output media file. The packet shall contain
2197 * one audio or video frame.
2198 *
2199 * @param s media file handle
2200 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2201 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2202 */
2203 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2204 {
2205 int ret;
2206
2207 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2208 if(ret<0)
2209 return ret;
2210
2211 truncate_ts(s->streams[pkt->stream_index], pkt);
2212
2213 ret= s->oformat->write_packet(s, pkt);
2214 if(!ret)
2215 ret= url_ferror(&s->pb);
2216 return ret;
2217 }
2218
2219 /**
2220 * interleave_packet implementation which will interleave per DTS.
2221 */
2222 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2223 AVPacketList *pktl, **next_point, *this_pktl;
2224 int stream_count=0;
2225 int streams[MAX_STREAMS];
2226
2227 if(pkt){
2228 AVStream *st= s->streams[ pkt->stream_index];
2229
2230 assert(pkt->destruct != av_destruct_packet); //FIXME
2231
2232 this_pktl = av_mallocz(sizeof(AVPacketList));
2233 this_pktl->pkt= *pkt;
2234 av_dup_packet(&this_pktl->pkt);
2235
2236 next_point = &s->packet_buffer;
2237 while(*next_point){
2238 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2239 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2240 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2241 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2242 break;
2243 next_point= &(*next_point)->next;
2244 }
2245 this_pktl->next= *next_point;
2246 *next_point= this_pktl;
2247 }
2248
2249 memset(streams, 0, sizeof(streams));
2250 pktl= s->packet_buffer;
2251 while(pktl){
2252 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2253 if(streams[ pktl->pkt.stream_index ] == 0)
2254 stream_count++;
2255 streams[ pktl->pkt.stream_index ]++;
2256 pktl= pktl->next;
2257 }
2258
2259 if(s->nb_streams == stream_count || (flush && stream_count)){
2260 pktl= s->packet_buffer;
2261 *out= pktl->pkt;
2262
2263 s->packet_buffer= pktl->next;
2264 av_freep(&pktl);
2265 return 1;
2266 }else{
2267 av_init_packet(out);
2268 return 0;
2269 }
2270 }
2271
2272 /**
2273 * Interleaves a AVPacket correctly so it can be muxed.
2274 * @param out the interleaved packet will be output here
2275 * @param in the input packet
2276 * @param flush 1 if no further packets are available as input and all
2277 * remaining packets should be output
2278 * @return 1 if a packet was output, 0 if no packet could be output,
2279 * < 0 if an error occured
2280 */
2281 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2282 if(s->oformat->interleave_packet)
2283 return s->oformat->interleave_packet(s, out, in, flush);
2284 else
2285 return av_interleave_packet_per_dts(s, out, in, flush);
2286 }
2287
2288 /**
2289 * Writes a packet to an output media file ensuring correct interleaving.
2290 * The packet shall contain one audio or video frame.
2291 * If the packets are already correctly interleaved the application should
2292 * call av_write_frame() instead as its slightly faster, its also important
2293 * to keep in mind that completly non interleaved input will need huge amounts
2294 * of memory to interleave with this, so its prefereable to interleave at the
2295 * demuxer level
2296 *
2297 * @param s media file handle
2298 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2299 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2300 */
2301 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2302 AVStream *st= s->streams[ pkt->stream_index];
2303
2304 if(compute_pkt_fields2(st, pkt) < 0)
2305 return -1;
2306
2307 //FIXME/XXX/HACK drop zero sized packets
2308 if(st->codec.codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2309 return 0;
2310
2311 if(pkt->dts == AV_NOPTS_VALUE)
2312 return -1;
2313
2314 for(;;){
2315 AVPacket opkt;
2316 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2317 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2318 return ret;
2319
2320 truncate_ts(s->streams[opkt.stream_index], &opkt);
2321 ret= s->oformat->write_packet(s, &opkt);
2322
2323 av_free_packet(&opkt);
2324 pkt= NULL;
2325
2326 if(ret<0)
2327 return ret;
2328 if(url_ferror(&s->pb))
2329 return url_ferror(&s->pb);
2330 }
2331 }
2332
2333 /**
2334 * write the stream trailer to an output media file and and free the
2335 * file private data.
2336 *
2337 * @param s media file handle
2338 * @return 0 if OK. AVERROR_xxx if error. */
2339 int av_write_trailer(AVFormatContext *s)
2340 {
2341 int ret, i;
2342
2343 for(;;){
2344 AVPacket pkt;
2345 ret= av_interleave_packet(s, &pkt, NULL, 1);
2346 if(ret<0) //FIXME cleanup needed for ret<0 ?
2347 goto fail;
2348 if(!ret)
2349 break;
2350
2351 truncate_ts(s->streams[pkt.stream_index], &pkt);
2352 ret= s->oformat->write_packet(s, &pkt);
2353
2354 av_free_packet(&pkt);
2355
2356 if(ret<0)
2357 goto fail;
2358 if(url_ferror(&s->pb))
2359 goto fail;
2360 }
2361
2362 ret = s->oformat->write_trailer(s);
2363 fail:
2364 if(ret == 0)
2365 ret=url_ferror(&s->pb);
2366 for(i=0;i<s->nb_streams;i++)
2367 av_freep(&s->streams[i]->priv_data);
2368 av_freep(&s->priv_data);
2369 return ret;
2370 }
2371
2372 /* "user interface" functions */
2373
2374 void dump_format(AVFormatContext *ic,
2375 int index,
2376 const char *url,
2377 int is_output)
2378 {
2379 int i, flags;
2380 char buf[256];
2381
2382 av_log(NULL, AV_LOG_DEBUG, "%s #%d, %s, %s '%s':\n",
2383 is_output ? "Output" : "Input",
2384 index,
2385 is_output ? ic->oformat->name : ic->iformat->name,
2386 is_output ? "to" : "from", url);
2387 if (!is_output) {
2388 av_log(NULL, AV_LOG_DEBUG, " Duration: ");
2389 if (ic->duration != AV_NOPTS_VALUE) {
2390 int hours, mins, secs, us;
2391 secs = ic->duration / AV_TIME_BASE;
2392 us = ic->duration % AV_TIME_BASE;
2393 mins = secs / 60;
2394 secs %= 60;
2395 hours = mins / 60;
2396 mins %= 60;
2397 av_log(NULL, AV_LOG_DEBUG, "%02d:%02d:%02d.%01d", hours, mins, secs,
2398 (10 * us) / AV_TIME_BASE);
2399 } else {
2400 av_log(NULL, AV_LOG_DEBUG, "N/A");
2401 }
2402 if (ic->start_time != AV_NOPTS_VALUE) {
2403 int secs, us;
2404 av_log(NULL, AV_LOG_DEBUG, ", start: ");
2405 secs = ic->start_time / AV_TIME_BASE;
2406 us = ic->start_time % AV_TIME_BASE;
2407 av_log(NULL, AV_LOG_DEBUG, "%d.%06d",
2408 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2409 }
2410 av_log(NULL, AV_LOG_DEBUG, ", bitrate: ");
2411 if (ic->bit_rate) {
2412 av_log(NULL, AV_LOG_DEBUG,"%d kb/s", ic->bit_rate / 1000);
2413 } else {
2414 av_log(NULL, AV_LOG_DEBUG, "N/A");
2415 }
2416 av_log(NULL, AV_LOG_DEBUG, "\n");
2417 }
2418 for(i=0;i<ic->nb_streams;i++) {
2419 AVStream *st = ic->streams[i];
2420 avcodec_string(buf, sizeof(buf), &st->codec, is_output);
2421 av_log(NULL, AV_LOG_DEBUG, " Stream #%d.%d", index, i);
2422 /* the pid is an important information, so we display it */
2423 /* XXX: add a generic system */
2424 if (is_output)
2425 flags = ic->oformat->flags;
2426 else
2427 flags = ic->iformat->flags;
2428 if (flags & AVFMT_SHOW_IDS) {
2429 av_log(NULL, AV_LOG_DEBUG, "[0x%x]", st->id);
2430 }
2431 av_log(NULL, AV_LOG_DEBUG, ": %s\n", buf);
2432 }
2433 }
2434
2435 typedef struct {
2436 const char *abv;
2437 int width, height;
2438 int frame_rate, frame_rate_base;
2439 } AbvEntry;
2440
2441 static AbvEntry frame_abvs[] = {
2442 { "ntsc", 720, 480, 30000, 1001 },
2443 { "pal", 720, 576, 25, 1 },
2444 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2445 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2446 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2447 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2448 { "film", 352, 240, 24, 1 },
2449 { "ntsc-film", 352, 240, 24000, 1001 },
2450 { "sqcif", 128, 96, 0, 0 },
2451 { "qcif", 176, 144, 0, 0 },
2452 { "cif", 352, 288, 0, 0 },
2453 { "4cif", 704, 576, 0, 0 },
2454 };
2455
2456 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2457 {
2458 int i;
2459 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2460 const char *p;
2461 int frame_width = 0, frame_height = 0;
2462
2463 for(i=0;i<n;i++) {
2464 if (!strcmp(frame_abvs[i].abv, str)) {
2465 frame_width = frame_abvs[i].width;
2466 frame_height = frame_abvs[i].height;
2467 break;
2468 }
2469 }
2470 if (i == n) {
2471 p = str;
2472 frame_width = strtol(p, (char **)&p, 10);
2473 if (*p)
2474 p++;
2475 frame_height = strtol(p, (char **)&p, 10);
2476 }
2477 if (frame_width <= 0 || frame_height <= 0)
2478 return -1;
2479 *width_ptr = frame_width;
2480 *height_ptr = frame_height;
2481 return 0;
2482 }
2483
2484 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2485 {
2486 int i;
2487 char* cp;
2488
2489 /* First, we check our abbreviation table */
2490 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2491 if (!strcmp(frame_abvs[i].abv, arg)) {
2492 *frame_rate = frame_abvs[i].frame_rate;
2493 *frame_rate_base = frame_abvs[i].frame_rate_base;
2494 return 0;
2495 }
2496
2497 /* Then, we try to parse it as fraction */
2498 cp = strchr(arg, '/');
2499 if (!cp)
2500 cp = strchr(arg, ':');
2501 if (cp) {
2502 char* cpp;
2503 *frame_rate = strtol(arg, &cpp, 10);
2504 if (cpp != arg || cpp == cp)
2505 *frame_rate_base = strtol(cp+1, &cpp, 10);
2506 else
2507 *frame_rate = 0;
2508 }
2509 else {
2510 /* Finally we give up and parse it as double */
2511 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2512 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2513 }
2514 if (!*frame_rate || !*frame_rate_base)
2515 return -1;
2516 else
2517 return 0;
2518 }
2519
2520 /* Syntax:
2521 * - If not a duration:
2522 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2523 * Time is localtime unless Z is suffixed to the end. In this case GMT
2524 * Return the date in micro seconds since 1970
2525 * - If duration:
2526 * HH[:MM[:SS[.m...]]]
2527 * S+[.m...]
2528 */
2529 int64_t parse_date(const char *datestr, int duration)
2530 {
2531 const char *p;
2532 int64_t t;
2533 struct tm dt;
2534 int i;
2535 static const char *date_fmt[] = {
2536 "%Y-%m-%d",
2537 "%Y%m%d",
2538 };
2539 static const char *time_fmt[] = {
2540 "%H:%M:%S",
2541 "%H%M%S",
2542 };
2543 const char *q;
2544 int is_utc, len;
2545 char lastch;
2546 int negative = 0;
2547
2548 #undef time
2549 time_t now = time(0);
2550
2551 len = strlen(datestr);
2552 if (len > 0)
2553 lastch = datestr[len - 1];
2554 else
2555 lastch = '\0';
2556 is_utc = (lastch == 'z' || lastch == 'Z');
2557
2558 memset(&dt, 0, sizeof(dt));
2559
2560 p = datestr;
2561 q = NULL;
2562 if (!duration) {
2563 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2564 q = small_strptime(p, date_fmt[i], &dt);
2565 if (q) {
2566 break;
2567 }
2568 }
2569
2570 if (!q) {
2571 if (is_utc) {
2572 dt = *gmtime(&now);
2573 } else {
2574 dt = *localtime(&now);
2575 }
2576 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2577 } else {
2578 p = q;
2579 }
2580
2581 if (*p == 'T' || *p == 't' || *p == ' ')
2582 p++;
2583
2584 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2585 q = small_strptime(p, time_fmt[i], &dt);
2586 if (q) {
2587 break;
2588 }
2589 }
2590 } else {
2591 if (p[0] == '-') {
2592 negative = 1;
2593 ++p;
2594 }
2595 q = small_strptime(p, time_fmt[0], &dt);
2596 if (!q) {
2597 dt.tm_sec = strtol(p, (char **)&q, 10);
2598 dt.tm_min = 0;
2599 dt.tm_hour = 0;
2600 }
2601 }
2602
2603 /* Now we have all the fields that we can get */
2604 if (!q) {
2605 if (duration)
2606 return 0;
2607 else
2608 return now * int64_t_C(1000000);
2609 }
2610
2611 if (duration) {
2612 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2613 } else {
2614 dt.tm_isdst = -1; /* unknown */
2615 if (is_utc) {
2616 t = mktimegm(&dt);
2617 } else {
2618 t = mktime(&dt);
2619 }
2620 }
2621
2622 t *= 1000000;
2623
2624 if (*q == '.') {
2625 int val, n;
2626 q++;
2627 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2628 if (!isdigit(*q))
2629 break;
2630 val += n * (*q - '0');
2631 }
2632 t += val;
2633 }
2634 return negative ? -t : t;
2635 }
2636
2637 /* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. Return
2638 1 if found */
2639 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2640 {
2641 const char *p;
2642 char tag[128], *q;
2643
2644 p = info;
2645 if (*p == '?')
2646 p++;
2647 for(;;) {
2648 q = tag;
2649 while (*p != '\0' && *p != '=' && *p != '&') {
2650 if ((q - tag) < sizeof(tag) - 1)
2651 *q++ = *p;
2652 p++;
2653 }
2654 *q = '\0';
2655 q = arg;
2656 if (*p == '=') {
2657 p++;
2658 while (*p != '&' && *p != '\0') {
2659 if ((q - arg) < arg_size - 1) {
2660 if (*p == '+')
2661 *q++ = ' ';
2662 else
2663 *q++ = *p;
2664 }
2665 p++;
2666 }
2667 *q = '\0';
2668 }
2669 if (!strcmp(tag, tag1))
2670 return 1;
2671 if (*p != '&')
2672 break;
2673 p++;
2674 }
2675 return 0;
2676 }
2677
2678 /* Return in 'buf' the path with '%d' replaced by number. Also handles
2679 the '%0nd' format where 'n' is the total number of digits and
2680 '%%'. Return 0 if OK, and -1 if format error */
2681 int get_frame_filename(char *buf, int buf_size,
2682 const char *path, int number)
2683 {
2684 const char *p;
2685 char *q, buf1[20], c;
2686 int nd, len, percentd_found;
2687
2688 q = buf;
2689 p = path;
2690 percentd_found = 0;
2691 for(;;) {
2692 c = *p++;
2693 if (c == '\0')
2694 break;
2695 if (c == '%') {
2696 do {
2697 nd = 0;
2698 while (isdigit(*p)) {
2699 nd = nd * 10 + *p++ - '0';
2700 }
2701 c = *p++;
2702 } while (isdigit(c));
2703
2704 switch(c) {
2705 case '%':
2706 goto addchar;
2707 case 'd':
2708 if (percentd_found)
2709 goto fail;
2710 percentd_found = 1;
2711 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2712 len = strlen(buf1);
2713 if ((q - buf + len) > buf_size - 1)
2714 goto fail;
2715 memcpy(q, buf1, len);
2716 q += len;
2717 break;
2718 default:
2719 goto fail;
2720 }
2721 } else {
2722 addchar:
2723 if ((q - buf) < buf_size - 1)
2724 *q++ = c;
2725 }
2726 }
2727 if (!percentd_found)
2728 goto fail;
2729 *q = '\0';
2730 return 0;
2731 fail:
2732 *q = '\0';
2733 return -1;
2734 }
2735
2736 /**
2737 * Print nice hexa dump of a buffer
2738 * @param f stream for output
2739 * @param buf buffer
2740 * @param size buffer size
2741 */
2742 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2743 {
2744 int len, i, j, c;
2745
2746 for(i=0;i<size;i+=16) {
2747 len = size - i;
2748 if (len > 16)
2749 len = 16;
2750 fprintf(f, "%08x ", i);
2751 for(j=0;j<16;j++) {
2752 if (j < len)
2753 fprintf(f, " %02x", buf[i+j]);
2754 else
2755 fprintf(f, " ");
2756 }
2757 fprintf(f, " ");
2758 for(j=0;j<len;j++) {
2759 c = buf[i+j];
2760 if (c < ' ' || c > '~')
2761 c = '.';
2762 fprintf(f, "%c", c);
2763 }
2764 fprintf(f, "\n");
2765 }
2766 }
2767
2768 /**
2769 * Print on 'f' a nice dump of a packet
2770 * @param f stream for output
2771 * @param pkt packet to dump
2772 * @param dump_payload true if the payload must be displayed too
2773 */
2774 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2775 {
2776 fprintf(f, "stream #%d:\n", pkt->stream_index);
2777 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2778 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2779 /* DTS is _always_ valid after av_read_frame() */
2780 fprintf(f, " dts=");
2781 if (pkt->dts == AV_NOPTS_VALUE)
2782 fprintf(f, "N/A");
2783 else
2784 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2785 /* PTS may be not known if B frames are present */
2786 fprintf(f, " pts=");
2787 if (pkt->pts == AV_NOPTS_VALUE)
2788 fprintf(f, "N/A");
2789 else
2790 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2791 fprintf(f, "\n");
2792 fprintf(f, " size=%d\n", pkt->size);
2793 if (dump_payload)
2794 av_hex_dump(f, pkt->data, pkt->size);
2795 }
2796
2797 void url_split(char *proto, int proto_size,
2798 char *authorization, int authorization_size,
2799 char *hostname, int hostname_size,
2800 int *port_ptr,
2801 char *path, int path_size,
2802 const char *url)
2803 {
2804 const char *p;
2805 char *q;
2806 int port;
2807
2808 port = -1;
2809
2810 p = url;
2811 q = proto;
2812 while (*p != ':' && *p != '\0') {
2813 if ((q - proto) < proto_size - 1)
2814 *q++ = *p;
2815 p++;
2816 }
2817 if (proto_size > 0)
2818 *q = '\0';
2819 if (authorization_size > 0)
2820 authorization[0] = '\0';
2821 if (*p == '\0') {
2822 if (proto_size > 0)
2823 proto[0] = '\0';
2824 if (hostname_size > 0)
2825 hostname[0] = '\0';
2826 p = url;
2827 } else {
2828 char *at,*slash; // PETR: position of '@' character and '/' character
2829
2830 p++;
2831 if (*p == '/')
2832 p++;
2833 if (*p == '/')
2834 p++;
2835 at = strchr(p,'@'); // PETR: get the position of '@'
2836 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2837 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2838
2839 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2840
2841 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2842 if (*p == '@') { // PETR: passed '@'
2843 if (authorization_size > 0)
2844 *q = '\0';
2845 q = hostname;
2846 at = NULL;
2847 } else if (!at) { // PETR: hostname
2848 if ((q - hostname) < hostname_size - 1)
2849 *q++ = *p;
2850 } else {
2851 if ((q - authorization) < authorization_size - 1)
2852 *q++ = *p;
2853 }
2854 p++;
2855 }
2856 if (hostname_size > 0)
2857 *q = '\0';
2858 if (*p == ':') {
2859 p++;
2860 port = strtoul(p, (char **)&p, 10);
2861 }
2862 }
2863 if (port_ptr)
2864 *port_ptr = port;
2865 pstrcpy(path, path_size, p);
2866 }
2867
2868 /**
2869 * Set the pts for a given stream
2870 * @param s stream
2871 * @param pts_wrap_bits number of bits effectively used by the pts
2872 * (used for wrap control, 33 is the value for MPEG)
2873 * @param pts_num numerator to convert to seconds (MPEG: 1)
2874 * @param pts_den denominator to convert to seconds (MPEG: 90000)
2875 */
2876 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2877 int pts_num, int pts_den)
2878 {
2879 s->pts_wrap_bits = pts_wrap_bits;
2880 s->time_base.num = pts_num;
2881 s->time_base.den = pts_den;
2882 }
2883
2884 /* fraction handling */
2885
2886 /**
2887 * f = val + (num / den) + 0.5. 'num' is normalized so that it is such
2888 * as 0 <= num < den.
2889 *
2890 * @param f fractional number
2891 * @param val integer value
2892 * @param num must be >= 0
2893 * @param den must be >= 1
2894 */
2895 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2896 {
2897 num += (den >> 1);
2898 if (num >= den) {
2899 val += num / den;
2900 num = num % den;
2901 }
2902 f->val = val;
2903 f->num = num;
2904 f->den = den;
2905 }
2906
2907 /* set f to (val + 0.5) */
2908 void av_frac_set(AVFrac *f, int64_t val)
2909 {
2910 f->val = val;
2911 f->num = f->den >> 1;
2912 }
2913
2914 /**
2915 * Fractionnal addition to f: f = f + (incr / f->den)
2916 *
2917 * @param f fractional number
2918 * @param incr increment, can be positive or negative
2919 */
2920 void av_frac_add(AVFrac *f, int64_t incr)
2921 {
2922 int64_t num, den;
2923
2924 num = f->num + incr;
2925 den = f->den;
2926 if (num < 0) {
2927 f->val += num / den;
2928 num = num % den;
2929 if (num < 0) {
2930 num += den;
2931 f->val--;
2932 }
2933 } else if (num >= den) {
2934 f->val += num / den;
2935 num = num % den;
2936 }
2937 f->num = num;
2938 }
2939
2940 /**
2941 * register a new image format
2942 * @param img_fmt Image format descriptor
2943 */
2944 void av_register_image_format(AVImageFormat *img_fmt)
2945 {
2946 AVImageFormat **p;
2947
2948 p = &first_image_format;
2949 while (*p != NULL) p = &(*p)->next;
2950 *p = img_fmt;
2951 img_fmt->next = NULL;
2952 }
2953
2954 /* guess image format */
2955 AVImageFormat *av_probe_image_format(AVProbeData *pd)
2956 {
2957 AVImageFormat *fmt1, *fmt;
2958 int score, score_max;
2959
2960 fmt = NULL;
2961 score_max = 0;
2962 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2963 if (fmt1->img_probe) {
2964 score = fmt1->img_probe(pd);
2965 if (score > score_max) {
2966 score_max = score;
2967 fmt = fmt1;
2968 }
2969 }
2970 }
2971 return fmt;
2972 }
2973
2974 AVImageFormat *guess_image_format(const char *filename)
2975 {
2976 AVImageFormat *fmt1;
2977
2978 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2979 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
2980 return fmt1;
2981 }
2982 return NULL;
2983 }
2984
2985 /**
2986 * Read an image from a stream.
2987 * @param gb byte stream containing the image
2988 * @param fmt image format, NULL if probing is required
2989 */
2990 int av_read_image(ByteIOContext *pb, const char *filename,
2991 AVImageFormat *fmt,
2992 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
2993 {
2994 char buf[PROBE_BUF_SIZE];
2995 AVProbeData probe_data, *pd = &probe_data;
2996 offset_t pos;
2997 int ret;
2998
2999 if (!fmt) {
3000 pd->filename = filename;
3001 pd->buf = buf;
3002 pos = url_ftell(pb);
3003 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
3004 url_fseek(pb, pos, SEEK_SET);
3005 fmt = av_probe_image_format(pd);
3006 }
3007 if (!fmt)
3008 return AVERROR_NOFMT;
3009 ret = fmt->img_read(pb, alloc_cb, opaque);
3010 return ret;
3011 }
3012
3013 /**
3014 * Write an image to a stream.
3015 * @param pb byte stream for the image output
3016 * @param fmt image format
3017 * @param img image data and informations
3018 */
3019 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
3020 {
3021 return fmt->img_write(pb, img);
3022 }
3023