Fix wrong comment, patch by Michel Bardiaux mbardiaux mediaxim be.
[libav.git] / libavformat / utils.c
1 /*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "allformats.h"
23 #include "opt.h"
24
25 #undef NDEBUG
26 #include <assert.h>
27
28 /**
29 * @file libavformat/utils.c
30 * Various utility functions for using ffmpeg library.
31 */
32
33 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
34 static void av_frac_add(AVFrac *f, int64_t incr);
35
36 /** head of registered input format linked list. */
37 AVInputFormat *first_iformat = NULL;
38 /** head of registered output format linked list. */
39 AVOutputFormat *first_oformat = NULL;
40
41 void av_register_input_format(AVInputFormat *format)
42 {
43 AVInputFormat **p;
44 p = &first_iformat;
45 while (*p != NULL) p = &(*p)->next;
46 *p = format;
47 format->next = NULL;
48 }
49
50 void av_register_output_format(AVOutputFormat *format)
51 {
52 AVOutputFormat **p;
53 p = &first_oformat;
54 while (*p != NULL) p = &(*p)->next;
55 *p = format;
56 format->next = NULL;
57 }
58
59 int match_ext(const char *filename, const char *extensions)
60 {
61 const char *ext, *p;
62 char ext1[32], *q;
63
64 if(!filename)
65 return 0;
66
67 ext = strrchr(filename, '.');
68 if (ext) {
69 ext++;
70 p = extensions;
71 for(;;) {
72 q = ext1;
73 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
74 *q++ = *p++;
75 *q = '\0';
76 if (!strcasecmp(ext1, ext))
77 return 1;
78 if (*p == '\0')
79 break;
80 p++;
81 }
82 }
83 return 0;
84 }
85
86 AVOutputFormat *guess_format(const char *short_name, const char *filename,
87 const char *mime_type)
88 {
89 AVOutputFormat *fmt, *fmt_found;
90 int score_max, score;
91
92 /* specific test for image sequences */
93 #ifdef CONFIG_IMAGE2_MUXER
94 if (!short_name && filename &&
95 av_filename_number_test(filename) &&
96 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
97 return guess_format("image2", NULL, NULL);
98 }
99 #endif
100 /* find the proper file type */
101 fmt_found = NULL;
102 score_max = 0;
103 fmt = first_oformat;
104 while (fmt != NULL) {
105 score = 0;
106 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
107 score += 100;
108 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
109 score += 10;
110 if (filename && fmt->extensions &&
111 match_ext(filename, fmt->extensions)) {
112 score += 5;
113 }
114 if (score > score_max) {
115 score_max = score;
116 fmt_found = fmt;
117 }
118 fmt = fmt->next;
119 }
120 return fmt_found;
121 }
122
123 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
124 const char *mime_type)
125 {
126 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
127
128 if (fmt) {
129 AVOutputFormat *stream_fmt;
130 char stream_format_name[64];
131
132 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
133 stream_fmt = guess_format(stream_format_name, NULL, NULL);
134
135 if (stream_fmt)
136 fmt = stream_fmt;
137 }
138
139 return fmt;
140 }
141
142 /**
143 * Guesses the codec id based upon muxer and filename.
144 */
145 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
146 const char *filename, const char *mime_type, enum CodecType type){
147 if(type == CODEC_TYPE_VIDEO){
148 enum CodecID codec_id= CODEC_ID_NONE;
149
150 #ifdef CONFIG_IMAGE2_MUXER
151 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
152 codec_id= av_guess_image2_codec(filename);
153 }
154 #endif
155 if(codec_id == CODEC_ID_NONE)
156 codec_id= fmt->video_codec;
157 return codec_id;
158 }else if(type == CODEC_TYPE_AUDIO)
159 return fmt->audio_codec;
160 else
161 return CODEC_ID_NONE;
162 }
163
164 /**
165 * finds AVInputFormat based on input format's short name.
166 */
167 AVInputFormat *av_find_input_format(const char *short_name)
168 {
169 AVInputFormat *fmt;
170 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
171 if (!strcmp(fmt->name, short_name))
172 return fmt;
173 }
174 return NULL;
175 }
176
177 /* memory handling */
178
179 /**
180 * Default packet destructor.
181 */
182 void av_destruct_packet(AVPacket *pkt)
183 {
184 av_free(pkt->data);
185 pkt->data = NULL; pkt->size = 0;
186 }
187
188 /**
189 * Allocate the payload of a packet and intialized its fields to default values.
190 *
191 * @param pkt packet
192 * @param size wanted payload size
193 * @return 0 if OK. AVERROR_xxx otherwise.
194 */
195 int av_new_packet(AVPacket *pkt, int size)
196 {
197 uint8_t *data;
198 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
199 return AVERROR_NOMEM;
200 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
201 if (!data)
202 return AVERROR_NOMEM;
203 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
204
205 av_init_packet(pkt);
206 pkt->data = data;
207 pkt->size = size;
208 pkt->destruct = av_destruct_packet;
209 return 0;
210 }
211
212 /**
213 * Allocate and read the payload of a packet and intialized its fields to default values.
214 *
215 * @param pkt packet
216 * @param size wanted payload size
217 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
218 */
219 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
220 {
221 int ret= av_new_packet(pkt, size);
222
223 if(ret<0)
224 return ret;
225
226 pkt->pos= url_ftell(s);
227
228 ret= get_buffer(s, pkt->data, size);
229 if(ret<=0)
230 av_free_packet(pkt);
231 else
232 pkt->size= ret;
233
234 return ret;
235 }
236
237 /* This is a hack - the packet memory allocation stuff is broken. The
238 packet is allocated if it was not really allocated */
239 int av_dup_packet(AVPacket *pkt)
240 {
241 if (pkt->destruct != av_destruct_packet) {
242 uint8_t *data;
243 /* we duplicate the packet and don't forget to put the padding
244 again */
245 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
246 return AVERROR_NOMEM;
247 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
248 if (!data) {
249 return AVERROR_NOMEM;
250 }
251 memcpy(data, pkt->data, pkt->size);
252 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
253 pkt->data = data;
254 pkt->destruct = av_destruct_packet;
255 }
256 return 0;
257 }
258
259 /**
260 * Check whether filename actually is a numbered sequence generator.
261 *
262 * @param filename possible numbered sequence string
263 * @return 1 if a valid numbered sequence string, 0 otherwise.
264 */
265 int av_filename_number_test(const char *filename)
266 {
267 char buf[1024];
268 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
269 }
270
271 /**
272 * Guess file format.
273 *
274 * @param is_opened whether the file is already opened, determines whether
275 * demuxers with or without AVFMT_NOFILE are probed
276 */
277 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
278 {
279 AVInputFormat *fmt1, *fmt;
280 int score, score_max;
281
282 fmt = NULL;
283 score_max = 0;
284 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
285 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
286 continue;
287 score = 0;
288 if (fmt1->read_probe) {
289 score = fmt1->read_probe(pd);
290 } else if (fmt1->extensions) {
291 if (match_ext(pd->filename, fmt1->extensions)) {
292 score = 50;
293 }
294 }
295 if (score > score_max) {
296 score_max = score;
297 fmt = fmt1;
298 }
299 }
300 return fmt;
301 }
302
303 /************************************************************/
304 /* input media file */
305
306 /**
307 * Open a media file from an IO stream. 'fmt' must be specified.
308 */
309 static const char* format_to_name(void* ptr)
310 {
311 AVFormatContext* fc = (AVFormatContext*) ptr;
312 if(fc->iformat) return fc->iformat->name;
313 else if(fc->oformat) return fc->oformat->name;
314 else return "NULL";
315 }
316
317 #define OFFSET(x) offsetof(AVFormatContext,x)
318 #define DEFAULT 0 //should be NAN but it doesnt work as its not a constant in glibc as required by ANSI/ISO C
319 //these names are too long to be readable
320 #define E AV_OPT_FLAG_ENCODING_PARAM
321 #define D AV_OPT_FLAG_DECODING_PARAM
322
323 static const AVOption options[]={
324 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
325 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
326 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
327 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
328 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
329 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
330 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
331 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
332 {NULL},
333 };
334
335 #undef E
336 #undef D
337 #undef DEFAULT
338
339 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
340
341 static void avformat_get_context_defaults(AVFormatContext *s)
342 {
343 memset(s, 0, sizeof(AVFormatContext));
344
345 s->av_class = &av_format_context_class;
346
347 av_opt_set_defaults(s);
348 }
349
350 AVFormatContext *av_alloc_format_context(void)
351 {
352 AVFormatContext *ic;
353 ic = av_malloc(sizeof(AVFormatContext));
354 if (!ic) return ic;
355 avformat_get_context_defaults(ic);
356 ic->av_class = &av_format_context_class;
357 return ic;
358 }
359
360 /**
361 * Allocates all the structures needed to read an input stream.
362 * This does not open the needed codecs for decoding the stream[s].
363 */
364 int av_open_input_stream(AVFormatContext **ic_ptr,
365 ByteIOContext *pb, const char *filename,
366 AVInputFormat *fmt, AVFormatParameters *ap)
367 {
368 int err;
369 AVFormatContext *ic;
370 AVFormatParameters default_ap;
371
372 if(!ap){
373 ap=&default_ap;
374 memset(ap, 0, sizeof(default_ap));
375 }
376
377 if(!ap->prealloced_context)
378 ic = av_alloc_format_context();
379 else
380 ic = *ic_ptr;
381 if (!ic) {
382 err = AVERROR_NOMEM;
383 goto fail;
384 }
385 ic->iformat = fmt;
386 if (pb)
387 ic->pb = *pb;
388 ic->duration = AV_NOPTS_VALUE;
389 ic->start_time = AV_NOPTS_VALUE;
390 pstrcpy(ic->filename, sizeof(ic->filename), filename);
391
392 /* allocate private data */
393 if (fmt->priv_data_size > 0) {
394 ic->priv_data = av_mallocz(fmt->priv_data_size);
395 if (!ic->priv_data) {
396 err = AVERROR_NOMEM;
397 goto fail;
398 }
399 } else {
400 ic->priv_data = NULL;
401 }
402
403 err = ic->iformat->read_header(ic, ap);
404 if (err < 0)
405 goto fail;
406
407 if (pb && !ic->data_offset)
408 ic->data_offset = url_ftell(&ic->pb);
409
410 *ic_ptr = ic;
411 return 0;
412 fail:
413 if (ic) {
414 av_freep(&ic->priv_data);
415 }
416 av_free(ic);
417 *ic_ptr = NULL;
418 return err;
419 }
420
421 /** Size of probe buffer, for guessing file type from file contents. */
422 #define PROBE_BUF_MIN 2048
423 #define PROBE_BUF_MAX (1<<20)
424
425 /**
426 * Open a media file as input. The codec are not opened. Only the file
427 * header (if present) is read.
428 *
429 * @param ic_ptr the opened media file handle is put here
430 * @param filename filename to open.
431 * @param fmt if non NULL, force the file format to use
432 * @param buf_size optional buffer size (zero if default is OK)
433 * @param ap additionnal parameters needed when opening the file (NULL if default)
434 * @return 0 if OK. AVERROR_xxx otherwise.
435 */
436 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
437 AVInputFormat *fmt,
438 int buf_size,
439 AVFormatParameters *ap)
440 {
441 int err, must_open_file, file_opened, probe_size;
442 AVProbeData probe_data, *pd = &probe_data;
443 ByteIOContext pb1, *pb = &pb1;
444
445 file_opened = 0;
446 pd->filename = "";
447 if (filename)
448 pd->filename = filename;
449 pd->buf = NULL;
450 pd->buf_size = 0;
451
452 if (!fmt) {
453 /* guess format if no file can be opened */
454 fmt = av_probe_input_format(pd, 0);
455 }
456
457 /* do not open file if the format does not need it. XXX: specific
458 hack needed to handle RTSP/TCP */
459 must_open_file = 1;
460 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
461 must_open_file = 0;
462 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
463 }
464
465 if (!fmt || must_open_file) {
466 /* if no file needed do not try to open one */
467 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
468 err = AVERROR_IO;
469 goto fail;
470 }
471 file_opened = 1;
472 if (buf_size > 0) {
473 url_setbufsize(pb, buf_size);
474 }
475
476 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
477 /* read probe data */
478 pd->buf= av_realloc(pd->buf, probe_size);
479 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
480 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
481 url_fclose(pb);
482 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
483 file_opened = 0;
484 err = AVERROR_IO;
485 goto fail;
486 }
487 }
488 /* guess file format */
489 fmt = av_probe_input_format(pd, 1);
490 }
491 av_freep(&pd->buf);
492 }
493
494 /* if still no format found, error */
495 if (!fmt) {
496 err = AVERROR_NOFMT;
497 goto fail;
498 }
499
500 /* XXX: suppress this hack for redirectors */
501 #ifdef CONFIG_NETWORK
502 if (fmt == &redir_demuxer) {
503 err = redir_open(ic_ptr, pb);
504 url_fclose(pb);
505 return err;
506 }
507 #endif
508
509 /* check filename in case of an image number is expected */
510 if (fmt->flags & AVFMT_NEEDNUMBER) {
511 if (!av_filename_number_test(filename)) {
512 err = AVERROR_NUMEXPECTED;
513 goto fail;
514 }
515 }
516 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
517 if (err)
518 goto fail;
519 return 0;
520 fail:
521 av_freep(&pd->buf);
522 if (file_opened)
523 url_fclose(pb);
524 *ic_ptr = NULL;
525 return err;
526
527 }
528
529 /*******************************************************/
530
531 /**
532 * Read a transport packet from a media file.
533 *
534 * This function is absolete and should never be used.
535 * Use av_read_frame() instead.
536 *
537 * @param s media file handle
538 * @param pkt is filled
539 * @return 0 if OK. AVERROR_xxx if error.
540 */
541 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
542 {
543 return s->iformat->read_packet(s, pkt);
544 }
545
546 /**********************************************************/
547
548 /**
549 * Get the number of samples of an audio frame. Return (-1) if error.
550 */
551 static int get_audio_frame_size(AVCodecContext *enc, int size)
552 {
553 int frame_size;
554
555 if (enc->frame_size <= 1) {
556 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
557
558 if (bits_per_sample) {
559 if (enc->channels == 0)
560 return -1;
561 frame_size = (size << 3) / (bits_per_sample * enc->channels);
562 } else {
563 /* used for example by ADPCM codecs */
564 if (enc->bit_rate == 0)
565 return -1;
566 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
567 }
568 } else {
569 frame_size = enc->frame_size;
570 }
571 return frame_size;
572 }
573
574
575 /**
576 * Return the frame duration in seconds, return 0 if not available.
577 */
578 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
579 AVCodecParserContext *pc, AVPacket *pkt)
580 {
581 int frame_size;
582
583 *pnum = 0;
584 *pden = 0;
585 switch(st->codec->codec_type) {
586 case CODEC_TYPE_VIDEO:
587 if(st->time_base.num*1000LL > st->time_base.den){
588 *pnum = st->time_base.num;
589 *pden = st->time_base.den;
590 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
591 *pnum = st->codec->time_base.num;
592 *pden = st->codec->time_base.den;
593 if (pc && pc->repeat_pict) {
594 *pden *= 2;
595 *pnum = (*pnum) * (2 + pc->repeat_pict);
596 }
597 }
598 break;
599 case CODEC_TYPE_AUDIO:
600 frame_size = get_audio_frame_size(st->codec, pkt->size);
601 if (frame_size < 0)
602 break;
603 *pnum = frame_size;
604 *pden = st->codec->sample_rate;
605 break;
606 default:
607 break;
608 }
609 }
610
611 static int is_intra_only(AVCodecContext *enc){
612 if(enc->codec_type == CODEC_TYPE_AUDIO){
613 return 1;
614 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
615 switch(enc->codec_id){
616 case CODEC_ID_MJPEG:
617 case CODEC_ID_MJPEGB:
618 case CODEC_ID_LJPEG:
619 case CODEC_ID_RAWVIDEO:
620 case CODEC_ID_DVVIDEO:
621 case CODEC_ID_HUFFYUV:
622 case CODEC_ID_FFVHUFF:
623 case CODEC_ID_ASV1:
624 case CODEC_ID_ASV2:
625 case CODEC_ID_VCR1:
626 return 1;
627 default: break;
628 }
629 }
630 return 0;
631 }
632
633 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
634 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
635 int64_t delta= last_ts - mask/2;
636 return ((lsb - delta)&mask) + delta;
637 }
638
639 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
640 AVCodecParserContext *pc, AVPacket *pkt)
641 {
642 int num, den, presentation_delayed;
643 /* handle wrapping */
644 if(st->cur_dts != AV_NOPTS_VALUE){
645 if(pkt->pts != AV_NOPTS_VALUE)
646 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
647 if(pkt->dts != AV_NOPTS_VALUE)
648 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
649 }
650
651 if (pkt->duration == 0) {
652 compute_frame_duration(&num, &den, st, pc, pkt);
653 if (den && num) {
654 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
655 }
656 }
657
658 if(is_intra_only(st->codec))
659 pkt->flags |= PKT_FLAG_KEY;
660
661 /* do we have a video B frame ? */
662 presentation_delayed = 0;
663 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
664 /* XXX: need has_b_frame, but cannot get it if the codec is
665 not initialized */
666 if (( st->codec->codec_id == CODEC_ID_H264
667 || st->codec->has_b_frames) &&
668 pc && pc->pict_type != FF_B_TYPE)
669 presentation_delayed = 1;
670 /* this may be redundant, but it shouldnt hurt */
671 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
672 presentation_delayed = 1;
673 }
674
675 if(st->cur_dts == AV_NOPTS_VALUE){
676 if(presentation_delayed) st->cur_dts = -pkt->duration;
677 else st->cur_dts = 0;
678 }
679
680 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
681 /* interpolate PTS and DTS if they are not present */
682 if (presentation_delayed) {
683 /* DTS = decompression time stamp */
684 /* PTS = presentation time stamp */
685 if (pkt->dts == AV_NOPTS_VALUE) {
686 /* if we know the last pts, use it */
687 if(st->last_IP_pts != AV_NOPTS_VALUE)
688 st->cur_dts = pkt->dts = st->last_IP_pts;
689 else
690 pkt->dts = st->cur_dts;
691 } else {
692 st->cur_dts = pkt->dts;
693 }
694 /* this is tricky: the dts must be incremented by the duration
695 of the frame we are displaying, i.e. the last I or P frame */
696 if (st->last_IP_duration == 0)
697 st->cur_dts += pkt->duration;
698 else
699 st->cur_dts += st->last_IP_duration;
700 st->last_IP_duration = pkt->duration;
701 st->last_IP_pts= pkt->pts;
702 /* cannot compute PTS if not present (we can compute it only
703 by knowing the futur */
704 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
705 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
706 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
707 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
708 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
709 pkt->pts += pkt->duration;
710 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
711 }
712 }
713
714 /* presentation is not delayed : PTS and DTS are the same */
715 if (pkt->pts == AV_NOPTS_VALUE) {
716 if (pkt->dts == AV_NOPTS_VALUE) {
717 pkt->pts = st->cur_dts;
718 pkt->dts = st->cur_dts;
719 }
720 else {
721 st->cur_dts = pkt->dts;
722 pkt->pts = pkt->dts;
723 }
724 } else {
725 st->cur_dts = pkt->pts;
726 pkt->dts = pkt->pts;
727 }
728 st->cur_dts += pkt->duration;
729 }
730 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
731
732 /* update flags */
733 if (pc) {
734 pkt->flags = 0;
735 /* key frame computation */
736 switch(st->codec->codec_type) {
737 case CODEC_TYPE_VIDEO:
738 if (pc->pict_type == FF_I_TYPE)
739 pkt->flags |= PKT_FLAG_KEY;
740 break;
741 case CODEC_TYPE_AUDIO:
742 pkt->flags |= PKT_FLAG_KEY;
743 break;
744 default:
745 break;
746 }
747 }
748 }
749
750 void av_destruct_packet_nofree(AVPacket *pkt)
751 {
752 pkt->data = NULL; pkt->size = 0;
753 }
754
755 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
756 {
757 AVStream *st;
758 int len, ret, i;
759
760 for(;;) {
761 /* select current input stream component */
762 st = s->cur_st;
763 if (st) {
764 if (!st->need_parsing || !st->parser) {
765 /* no parsing needed: we just output the packet as is */
766 /* raw data support */
767 *pkt = s->cur_pkt;
768 compute_pkt_fields(s, st, NULL, pkt);
769 s->cur_st = NULL;
770 break;
771 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
772 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
773 s->cur_ptr, s->cur_len,
774 s->cur_pkt.pts, s->cur_pkt.dts);
775 s->cur_pkt.pts = AV_NOPTS_VALUE;
776 s->cur_pkt.dts = AV_NOPTS_VALUE;
777 /* increment read pointer */
778 s->cur_ptr += len;
779 s->cur_len -= len;
780
781 /* return packet if any */
782 if (pkt->size) {
783 got_packet:
784 pkt->duration = 0;
785 pkt->stream_index = st->index;
786 pkt->pts = st->parser->pts;
787 pkt->dts = st->parser->dts;
788 pkt->destruct = av_destruct_packet_nofree;
789 compute_pkt_fields(s, st, st->parser, pkt);
790 break;
791 }
792 } else {
793 /* free packet */
794 av_free_packet(&s->cur_pkt);
795 s->cur_st = NULL;
796 }
797 } else {
798 /* read next packet */
799 ret = av_read_packet(s, &s->cur_pkt);
800 if (ret < 0) {
801 if (ret == -EAGAIN)
802 return ret;
803 /* return the last frames, if any */
804 for(i = 0; i < s->nb_streams; i++) {
805 st = s->streams[i];
806 if (st->parser && st->need_parsing) {
807 av_parser_parse(st->parser, st->codec,
808 &pkt->data, &pkt->size,
809 NULL, 0,
810 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
811 if (pkt->size)
812 goto got_packet;
813 }
814 }
815 /* no more packets: really terminates parsing */
816 return ret;
817 }
818
819 st = s->streams[s->cur_pkt.stream_index];
820 if(st->codec->debug & FF_DEBUG_PTS)
821 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
822 s->cur_pkt.stream_index,
823 s->cur_pkt.pts,
824 s->cur_pkt.dts,
825 s->cur_pkt.size);
826
827 s->cur_st = st;
828 s->cur_ptr = s->cur_pkt.data;
829 s->cur_len = s->cur_pkt.size;
830 if (st->need_parsing && !st->parser) {
831 st->parser = av_parser_init(st->codec->codec_id);
832 if (!st->parser) {
833 /* no parser available : just output the raw packets */
834 st->need_parsing = 0;
835 }else if(st->need_parsing == 2){
836 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
837 }
838 }
839 }
840 }
841 if(st->codec->debug & FF_DEBUG_PTS)
842 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
843 pkt->stream_index,
844 pkt->pts,
845 pkt->dts,
846 pkt->size);
847
848 return 0;
849 }
850
851 /**
852 * Return the next frame of a stream.
853 *
854 * The returned packet is valid
855 * until the next av_read_frame() or until av_close_input_file() and
856 * must be freed with av_free_packet. For video, the packet contains
857 * exactly one frame. For audio, it contains an integer number of
858 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
859 * data). If the audio frames have a variable size (e.g. MPEG audio),
860 * then it contains one frame.
861 *
862 * pkt->pts, pkt->dts and pkt->duration are always set to correct
863 * values in AV_TIME_BASE unit (and guessed if the format cannot
864 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
865 * has B frames, so it is better to rely on pkt->dts if you do not
866 * decompress the payload.
867 *
868 * @return 0 if OK, < 0 if error or end of file.
869 */
870 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
871 {
872 AVPacketList *pktl;
873 int eof=0;
874 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
875
876 for(;;){
877 pktl = s->packet_buffer;
878 if (pktl) {
879 AVPacket *next_pkt= &pktl->pkt;
880
881 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
882 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
883 if( pktl->pkt.stream_index == next_pkt->stream_index
884 && next_pkt->dts < pktl->pkt.dts
885 && pktl->pkt.pts != pktl->pkt.dts //not b frame
886 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
887 next_pkt->pts= pktl->pkt.dts;
888 }
889 pktl= pktl->next;
890 }
891 pktl = s->packet_buffer;
892 }
893
894 if( next_pkt->pts != AV_NOPTS_VALUE
895 || next_pkt->dts == AV_NOPTS_VALUE
896 || !genpts || eof){
897 /* read packet from packet buffer, if there is data */
898 *pkt = *next_pkt;
899 s->packet_buffer = pktl->next;
900 av_free(pktl);
901 return 0;
902 }
903 }
904 if(genpts){
905 AVPacketList **plast_pktl= &s->packet_buffer;
906 int ret= av_read_frame_internal(s, pkt);
907 if(ret<0){
908 if(pktl && ret != -EAGAIN){
909 eof=1;
910 continue;
911 }else
912 return ret;
913 }
914
915 /* duplicate the packet */
916 if (av_dup_packet(pkt) < 0)
917 return AVERROR_NOMEM;
918
919 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
920
921 pktl = av_mallocz(sizeof(AVPacketList));
922 if (!pktl)
923 return AVERROR_NOMEM;
924
925 /* add the packet in the buffered packet list */
926 *plast_pktl = pktl;
927 pktl->pkt= *pkt;
928 }else{
929 assert(!s->packet_buffer);
930 return av_read_frame_internal(s, pkt);
931 }
932 }
933 }
934
935 /* XXX: suppress the packet queue */
936 static void flush_packet_queue(AVFormatContext *s)
937 {
938 AVPacketList *pktl;
939
940 for(;;) {
941 pktl = s->packet_buffer;
942 if (!pktl)
943 break;
944 s->packet_buffer = pktl->next;
945 av_free_packet(&pktl->pkt);
946 av_free(pktl);
947 }
948 }
949
950 /*******************************************************/
951 /* seek support */
952
953 int av_find_default_stream_index(AVFormatContext *s)
954 {
955 int i;
956 AVStream *st;
957
958 if (s->nb_streams <= 0)
959 return -1;
960 for(i = 0; i < s->nb_streams; i++) {
961 st = s->streams[i];
962 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
963 return i;
964 }
965 }
966 return 0;
967 }
968
969 /**
970 * Flush the frame reader.
971 */
972 static void av_read_frame_flush(AVFormatContext *s)
973 {
974 AVStream *st;
975 int i;
976
977 flush_packet_queue(s);
978
979 /* free previous packet */
980 if (s->cur_st) {
981 if (s->cur_st->parser)
982 av_free_packet(&s->cur_pkt);
983 s->cur_st = NULL;
984 }
985 /* fail safe */
986 s->cur_ptr = NULL;
987 s->cur_len = 0;
988
989 /* for each stream, reset read state */
990 for(i = 0; i < s->nb_streams; i++) {
991 st = s->streams[i];
992
993 if (st->parser) {
994 av_parser_close(st->parser);
995 st->parser = NULL;
996 }
997 st->last_IP_pts = AV_NOPTS_VALUE;
998 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
999 }
1000 }
1001
1002 /**
1003 * Updates cur_dts of all streams based on given timestamp and AVStream.
1004 *
1005 * Stream ref_st unchanged, others set cur_dts in their native timebase
1006 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1007 * @param timestamp new dts expressed in time_base of param ref_st
1008 * @param ref_st reference stream giving time_base of param timestamp
1009 */
1010 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1011 int i;
1012
1013 for(i = 0; i < s->nb_streams; i++) {
1014 AVStream *st = s->streams[i];
1015
1016 st->cur_dts = av_rescale(timestamp,
1017 st->time_base.den * (int64_t)ref_st->time_base.num,
1018 st->time_base.num * (int64_t)ref_st->time_base.den);
1019 }
1020 }
1021
1022 /**
1023 * Add a index entry into a sorted list updateing if it is already there.
1024 *
1025 * @param timestamp timestamp in the timebase of the given stream
1026 */
1027 int av_add_index_entry(AVStream *st,
1028 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1029 {
1030 AVIndexEntry *entries, *ie;
1031 int index;
1032
1033 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1034 return -1;
1035
1036 entries = av_fast_realloc(st->index_entries,
1037 &st->index_entries_allocated_size,
1038 (st->nb_index_entries + 1) *
1039 sizeof(AVIndexEntry));
1040 if(!entries)
1041 return -1;
1042
1043 st->index_entries= entries;
1044
1045 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1046
1047 if(index<0){
1048 index= st->nb_index_entries++;
1049 ie= &entries[index];
1050 assert(index==0 || ie[-1].timestamp < timestamp);
1051 }else{
1052 ie= &entries[index];
1053 if(ie->timestamp != timestamp){
1054 if(ie->timestamp <= timestamp)
1055 return -1;
1056 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1057 st->nb_index_entries++;
1058 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1059 distance= ie->min_distance;
1060 }
1061
1062 ie->pos = pos;
1063 ie->timestamp = timestamp;
1064 ie->min_distance= distance;
1065 ie->size= size;
1066 ie->flags = flags;
1067
1068 return index;
1069 }
1070
1071 /**
1072 * build an index for raw streams using a parser.
1073 */
1074 static void av_build_index_raw(AVFormatContext *s)
1075 {
1076 AVPacket pkt1, *pkt = &pkt1;
1077 int ret;
1078 AVStream *st;
1079
1080 st = s->streams[0];
1081 av_read_frame_flush(s);
1082 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1083
1084 for(;;) {
1085 ret = av_read_frame(s, pkt);
1086 if (ret < 0)
1087 break;
1088 if (pkt->stream_index == 0 && st->parser &&
1089 (pkt->flags & PKT_FLAG_KEY)) {
1090 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1091 0, 0, AVINDEX_KEYFRAME);
1092 }
1093 av_free_packet(pkt);
1094 }
1095 }
1096
1097 /**
1098 * Returns TRUE if we deal with a raw stream.
1099 *
1100 * Raw codec data and parsing needed.
1101 */
1102 static int is_raw_stream(AVFormatContext *s)
1103 {
1104 AVStream *st;
1105
1106 if (s->nb_streams != 1)
1107 return 0;
1108 st = s->streams[0];
1109 if (!st->need_parsing)
1110 return 0;
1111 return 1;
1112 }
1113
1114 /**
1115 * Gets the index for a specific timestamp.
1116 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1117 * the timestamp which is <= the requested one, if backward is 0
1118 * then it will be >=
1119 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1120 * @return < 0 if no such timestamp could be found
1121 */
1122 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1123 int flags)
1124 {
1125 AVIndexEntry *entries= st->index_entries;
1126 int nb_entries= st->nb_index_entries;
1127 int a, b, m;
1128 int64_t timestamp;
1129
1130 a = - 1;
1131 b = nb_entries;
1132
1133 while (b - a > 1) {
1134 m = (a + b) >> 1;
1135 timestamp = entries[m].timestamp;
1136 if(timestamp >= wanted_timestamp)
1137 b = m;
1138 if(timestamp <= wanted_timestamp)
1139 a = m;
1140 }
1141 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1142
1143 if(!(flags & AVSEEK_FLAG_ANY)){
1144 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1145 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1146 }
1147 }
1148
1149 if(m == nb_entries)
1150 return -1;
1151 return m;
1152 }
1153
1154 #define DEBUG_SEEK
1155
1156 /**
1157 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1158 * this isnt supposed to be called directly by a user application, but by demuxers
1159 * @param target_ts target timestamp in the time base of the given stream
1160 * @param stream_index stream number
1161 */
1162 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1163 AVInputFormat *avif= s->iformat;
1164 int64_t pos_min, pos_max, pos, pos_limit;
1165 int64_t ts_min, ts_max, ts;
1166 int index;
1167 AVStream *st;
1168
1169 if (stream_index < 0)
1170 return -1;
1171
1172 #ifdef DEBUG_SEEK
1173 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1174 #endif
1175
1176 ts_max=
1177 ts_min= AV_NOPTS_VALUE;
1178 pos_limit= -1; //gcc falsely says it may be uninitalized
1179
1180 st= s->streams[stream_index];
1181 if(st->index_entries){
1182 AVIndexEntry *e;
1183
1184 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1185 index= FFMAX(index, 0);
1186 e= &st->index_entries[index];
1187
1188 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1189 pos_min= e->pos;
1190 ts_min= e->timestamp;
1191 #ifdef DEBUG_SEEK
1192 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1193 pos_min,ts_min);
1194 #endif
1195 }else{
1196 assert(index==0);
1197 }
1198
1199 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1200 assert(index < st->nb_index_entries);
1201 if(index >= 0){
1202 e= &st->index_entries[index];
1203 assert(e->timestamp >= target_ts);
1204 pos_max= e->pos;
1205 ts_max= e->timestamp;
1206 pos_limit= pos_max - e->min_distance;
1207 #ifdef DEBUG_SEEK
1208 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1209 pos_max,pos_limit, ts_max);
1210 #endif
1211 }
1212 }
1213
1214 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1215 if(pos<0)
1216 return -1;
1217
1218 /* do the seek */
1219 url_fseek(&s->pb, pos, SEEK_SET);
1220
1221 av_update_cur_dts(s, st, ts);
1222
1223 return 0;
1224 }
1225
1226 /**
1227 * Does a binary search using read_timestamp().
1228 * this isnt supposed to be called directly by a user application, but by demuxers
1229 * @param target_ts target timestamp in the time base of the given stream
1230 * @param stream_index stream number
1231 */
1232 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1233 int64_t pos, ts;
1234 int64_t start_pos, filesize;
1235 int no_change;
1236
1237 #ifdef DEBUG_SEEK
1238 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1239 #endif
1240
1241 if(ts_min == AV_NOPTS_VALUE){
1242 pos_min = s->data_offset;
1243 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1244 if (ts_min == AV_NOPTS_VALUE)
1245 return -1;
1246 }
1247
1248 if(ts_max == AV_NOPTS_VALUE){
1249 int step= 1024;
1250 filesize = url_fsize(&s->pb);
1251 pos_max = filesize - 1;
1252 do{
1253 pos_max -= step;
1254 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1255 step += step;
1256 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1257 if (ts_max == AV_NOPTS_VALUE)
1258 return -1;
1259
1260 for(;;){
1261 int64_t tmp_pos= pos_max + 1;
1262 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1263 if(tmp_ts == AV_NOPTS_VALUE)
1264 break;
1265 ts_max= tmp_ts;
1266 pos_max= tmp_pos;
1267 if(tmp_pos >= filesize)
1268 break;
1269 }
1270 pos_limit= pos_max;
1271 }
1272
1273 if(ts_min > ts_max){
1274 return -1;
1275 }else if(ts_min == ts_max){
1276 pos_limit= pos_min;
1277 }
1278
1279 no_change=0;
1280 while (pos_min < pos_limit) {
1281 #ifdef DEBUG_SEEK
1282 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1283 pos_min, pos_max,
1284 ts_min, ts_max);
1285 #endif
1286 assert(pos_limit <= pos_max);
1287
1288 if(no_change==0){
1289 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1290 // interpolate position (better than dichotomy)
1291 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1292 + pos_min - approximate_keyframe_distance;
1293 }else if(no_change==1){
1294 // bisection, if interpolation failed to change min or max pos last time
1295 pos = (pos_min + pos_limit)>>1;
1296 }else{
1297 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1298 pos=pos_min;
1299 }
1300 if(pos <= pos_min)
1301 pos= pos_min + 1;
1302 else if(pos > pos_limit)
1303 pos= pos_limit;
1304 start_pos= pos;
1305
1306 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1307 if(pos == pos_max)
1308 no_change++;
1309 else
1310 no_change=0;
1311 #ifdef DEBUG_SEEK
1312 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1313 #endif
1314 assert(ts != AV_NOPTS_VALUE);
1315 if (target_ts <= ts) {
1316 pos_limit = start_pos - 1;
1317 pos_max = pos;
1318 ts_max = ts;
1319 }
1320 if (target_ts >= ts) {
1321 pos_min = pos;
1322 ts_min = ts;
1323 }
1324 }
1325
1326 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1327 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1328 #ifdef DEBUG_SEEK
1329 pos_min = pos;
1330 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1331 pos_min++;
1332 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1333 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1334 pos, ts_min, target_ts, ts_max);
1335 #endif
1336 *ts_ret= ts;
1337 return pos;
1338 }
1339
1340 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1341 int64_t pos_min, pos_max;
1342 #if 0
1343 AVStream *st;
1344
1345 if (stream_index < 0)
1346 return -1;
1347
1348 st= s->streams[stream_index];
1349 #endif
1350
1351 pos_min = s->data_offset;
1352 pos_max = url_fsize(&s->pb) - 1;
1353
1354 if (pos < pos_min) pos= pos_min;
1355 else if(pos > pos_max) pos= pos_max;
1356
1357 url_fseek(&s->pb, pos, SEEK_SET);
1358
1359 #if 0
1360 av_update_cur_dts(s, st, ts);
1361 #endif
1362 return 0;
1363 }
1364
1365 static int av_seek_frame_generic(AVFormatContext *s,
1366 int stream_index, int64_t timestamp, int flags)
1367 {
1368 int index;
1369 AVStream *st;
1370 AVIndexEntry *ie;
1371
1372 if (!s->index_built) {
1373 if (is_raw_stream(s)) {
1374 av_build_index_raw(s);
1375 } else {
1376 return -1;
1377 }
1378 s->index_built = 1;
1379 }
1380
1381 st = s->streams[stream_index];
1382 index = av_index_search_timestamp(st, timestamp, flags);
1383 if (index < 0)
1384 return -1;
1385
1386 /* now we have found the index, we can seek */
1387 ie = &st->index_entries[index];
1388 av_read_frame_flush(s);
1389 url_fseek(&s->pb, ie->pos, SEEK_SET);
1390
1391 av_update_cur_dts(s, st, ie->timestamp);
1392
1393 return 0;
1394 }
1395
1396 /**
1397 * Seek to the key frame at timestamp.
1398 * 'timestamp' in 'stream_index'.
1399 * @param stream_index If stream_index is (-1), a default
1400 * stream is selected, and timestamp is automatically converted
1401 * from AV_TIME_BASE units to the stream specific time_base.
1402 * @param timestamp timestamp in AVStream.time_base units
1403 * or if there is no stream specified then in AV_TIME_BASE units
1404 * @param flags flags which select direction and seeking mode
1405 * @return >= 0 on success
1406 */
1407 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1408 {
1409 int ret;
1410 AVStream *st;
1411
1412 av_read_frame_flush(s);
1413
1414 if(flags & AVSEEK_FLAG_BYTE)
1415 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1416
1417 if(stream_index < 0){
1418 stream_index= av_find_default_stream_index(s);
1419 if(stream_index < 0)
1420 return -1;
1421
1422 st= s->streams[stream_index];
1423 /* timestamp for default must be expressed in AV_TIME_BASE units */
1424 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1425 }
1426 st= s->streams[stream_index];
1427
1428 /* first, we try the format specific seek */
1429 if (s->iformat->read_seek)
1430 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1431 else
1432 ret = -1;
1433 if (ret >= 0) {
1434 return 0;
1435 }
1436
1437 if(s->iformat->read_timestamp)
1438 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1439 else
1440 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1441 }
1442
1443 /*******************************************************/
1444
1445 /**
1446 * Returns TRUE if the stream has accurate timings in any stream.
1447 *
1448 * @return TRUE if the stream has accurate timings for at least one component.
1449 */
1450 static int av_has_timings(AVFormatContext *ic)
1451 {
1452 int i;
1453 AVStream *st;
1454
1455 for(i = 0;i < ic->nb_streams; i++) {
1456 st = ic->streams[i];
1457 if (st->start_time != AV_NOPTS_VALUE &&
1458 st->duration != AV_NOPTS_VALUE)
1459 return 1;
1460 }
1461 return 0;
1462 }
1463
1464 /**
1465 * Estimate the stream timings from the one of each components.
1466 *
1467 * Also computes the global bitrate if possible.
1468 */
1469 static void av_update_stream_timings(AVFormatContext *ic)
1470 {
1471 int64_t start_time, start_time1, end_time, end_time1;
1472 int i;
1473 AVStream *st;
1474
1475 start_time = INT64_MAX;
1476 end_time = INT64_MIN;
1477 for(i = 0;i < ic->nb_streams; i++) {
1478 st = ic->streams[i];
1479 if (st->start_time != AV_NOPTS_VALUE) {
1480 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1481 if (start_time1 < start_time)
1482 start_time = start_time1;
1483 if (st->duration != AV_NOPTS_VALUE) {
1484 end_time1 = start_time1
1485 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1486 if (end_time1 > end_time)
1487 end_time = end_time1;
1488 }
1489 }
1490 }
1491 if (start_time != INT64_MAX) {
1492 ic->start_time = start_time;
1493 if (end_time != INT64_MIN) {
1494 ic->duration = end_time - start_time;
1495 if (ic->file_size > 0) {
1496 /* compute the bit rate */
1497 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1498 (double)ic->duration;
1499 }
1500 }
1501 }
1502
1503 }
1504
1505 static void fill_all_stream_timings(AVFormatContext *ic)
1506 {
1507 int i;
1508 AVStream *st;
1509
1510 av_update_stream_timings(ic);
1511 for(i = 0;i < ic->nb_streams; i++) {
1512 st = ic->streams[i];
1513 if (st->start_time == AV_NOPTS_VALUE) {
1514 if(ic->start_time != AV_NOPTS_VALUE)
1515 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1516 if(ic->duration != AV_NOPTS_VALUE)
1517 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1518 }
1519 }
1520 }
1521
1522 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1523 {
1524 int64_t filesize, duration;
1525 int bit_rate, i;
1526 AVStream *st;
1527
1528 /* if bit_rate is already set, we believe it */
1529 if (ic->bit_rate == 0) {
1530 bit_rate = 0;
1531 for(i=0;i<ic->nb_streams;i++) {
1532 st = ic->streams[i];
1533 bit_rate += st->codec->bit_rate;
1534 }
1535 ic->bit_rate = bit_rate;
1536 }
1537
1538 /* if duration is already set, we believe it */
1539 if (ic->duration == AV_NOPTS_VALUE &&
1540 ic->bit_rate != 0 &&
1541 ic->file_size != 0) {
1542 filesize = ic->file_size;
1543 if (filesize > 0) {
1544 for(i = 0; i < ic->nb_streams; i++) {
1545 st = ic->streams[i];
1546 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1547 if (st->start_time == AV_NOPTS_VALUE ||
1548 st->duration == AV_NOPTS_VALUE) {
1549 st->start_time = 0;
1550 st->duration = duration;
1551 }
1552 }
1553 }
1554 }
1555 }
1556
1557 #define DURATION_MAX_READ_SIZE 250000
1558
1559 /* only usable for MPEG-PS streams */
1560 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1561 {
1562 AVPacket pkt1, *pkt = &pkt1;
1563 AVStream *st;
1564 int read_size, i, ret;
1565 int64_t end_time;
1566 int64_t filesize, offset, duration;
1567
1568 /* free previous packet */
1569 if (ic->cur_st && ic->cur_st->parser)
1570 av_free_packet(&ic->cur_pkt);
1571 ic->cur_st = NULL;
1572
1573 /* flush packet queue */
1574 flush_packet_queue(ic);
1575
1576 for(i=0;i<ic->nb_streams;i++) {
1577 st = ic->streams[i];
1578 if (st->parser) {
1579 av_parser_close(st->parser);
1580 st->parser= NULL;
1581 }
1582 }
1583
1584 /* we read the first packets to get the first PTS (not fully
1585 accurate, but it is enough now) */
1586 url_fseek(&ic->pb, 0, SEEK_SET);
1587 read_size = 0;
1588 for(;;) {
1589 if (read_size >= DURATION_MAX_READ_SIZE)
1590 break;
1591 /* if all info is available, we can stop */
1592 for(i = 0;i < ic->nb_streams; i++) {
1593 st = ic->streams[i];
1594 if (st->start_time == AV_NOPTS_VALUE)
1595 break;
1596 }
1597 if (i == ic->nb_streams)
1598 break;
1599
1600 ret = av_read_packet(ic, pkt);
1601 if (ret != 0)
1602 break;
1603 read_size += pkt->size;
1604 st = ic->streams[pkt->stream_index];
1605 if (pkt->pts != AV_NOPTS_VALUE) {
1606 if (st->start_time == AV_NOPTS_VALUE)
1607 st->start_time = pkt->pts;
1608 }
1609 av_free_packet(pkt);
1610 }
1611
1612 /* estimate the end time (duration) */
1613 /* XXX: may need to support wrapping */
1614 filesize = ic->file_size;
1615 offset = filesize - DURATION_MAX_READ_SIZE;
1616 if (offset < 0)
1617 offset = 0;
1618
1619 url_fseek(&ic->pb, offset, SEEK_SET);
1620 read_size = 0;
1621 for(;;) {
1622 if (read_size >= DURATION_MAX_READ_SIZE)
1623 break;
1624 /* if all info is available, we can stop */
1625 for(i = 0;i < ic->nb_streams; i++) {
1626 st = ic->streams[i];
1627 if (st->duration == AV_NOPTS_VALUE)
1628 break;
1629 }
1630 if (i == ic->nb_streams)
1631 break;
1632
1633 ret = av_read_packet(ic, pkt);
1634 if (ret != 0)
1635 break;
1636 read_size += pkt->size;
1637 st = ic->streams[pkt->stream_index];
1638 if (pkt->pts != AV_NOPTS_VALUE) {
1639 end_time = pkt->pts;
1640 duration = end_time - st->start_time;
1641 if (duration > 0) {
1642 if (st->duration == AV_NOPTS_VALUE ||
1643 st->duration < duration)
1644 st->duration = duration;
1645 }
1646 }
1647 av_free_packet(pkt);
1648 }
1649
1650 fill_all_stream_timings(ic);
1651
1652 url_fseek(&ic->pb, 0, SEEK_SET);
1653 }
1654
1655 static void av_estimate_timings(AVFormatContext *ic)
1656 {
1657 int64_t file_size;
1658
1659 /* get the file size, if possible */
1660 if (ic->iformat->flags & AVFMT_NOFILE) {
1661 file_size = 0;
1662 } else {
1663 file_size = url_fsize(&ic->pb);
1664 if (file_size < 0)
1665 file_size = 0;
1666 }
1667 ic->file_size = file_size;
1668
1669 if ((!strcmp(ic->iformat->name, "mpeg") ||
1670 !strcmp(ic->iformat->name, "mpegts")) &&
1671 file_size && !ic->pb.is_streamed) {
1672 /* get accurate estimate from the PTSes */
1673 av_estimate_timings_from_pts(ic);
1674 } else if (av_has_timings(ic)) {
1675 /* at least one components has timings - we use them for all
1676 the components */
1677 fill_all_stream_timings(ic);
1678 } else {
1679 /* less precise: use bit rate info */
1680 av_estimate_timings_from_bit_rate(ic);
1681 }
1682 av_update_stream_timings(ic);
1683
1684 #if 0
1685 {
1686 int i;
1687 AVStream *st;
1688 for(i = 0;i < ic->nb_streams; i++) {
1689 st = ic->streams[i];
1690 printf("%d: start_time: %0.3f duration: %0.3f\n",
1691 i, (double)st->start_time / AV_TIME_BASE,
1692 (double)st->duration / AV_TIME_BASE);
1693 }
1694 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1695 (double)ic->start_time / AV_TIME_BASE,
1696 (double)ic->duration / AV_TIME_BASE,
1697 ic->bit_rate / 1000);
1698 }
1699 #endif
1700 }
1701
1702 static int has_codec_parameters(AVCodecContext *enc)
1703 {
1704 int val;
1705 switch(enc->codec_type) {
1706 case CODEC_TYPE_AUDIO:
1707 val = enc->sample_rate;
1708 break;
1709 case CODEC_TYPE_VIDEO:
1710 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1711 break;
1712 default:
1713 val = 1;
1714 break;
1715 }
1716 return (val != 0);
1717 }
1718
1719 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1720 {
1721 int16_t *samples;
1722 AVCodec *codec;
1723 int got_picture, ret=0;
1724 AVFrame picture;
1725
1726 if(!st->codec->codec){
1727 codec = avcodec_find_decoder(st->codec->codec_id);
1728 if (!codec)
1729 return -1;
1730 ret = avcodec_open(st->codec, codec);
1731 if (ret < 0)
1732 return ret;
1733 }
1734
1735 if(!has_codec_parameters(st->codec)){
1736 switch(st->codec->codec_type) {
1737 case CODEC_TYPE_VIDEO:
1738 ret = avcodec_decode_video(st->codec, &picture,
1739 &got_picture, (uint8_t *)data, size);
1740 break;
1741 case CODEC_TYPE_AUDIO:
1742 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1743 if (!samples)
1744 goto fail;
1745 ret = avcodec_decode_audio(st->codec, samples,
1746 &got_picture, (uint8_t *)data, size);
1747 av_free(samples);
1748 break;
1749 default:
1750 break;
1751 }
1752 }
1753 fail:
1754 return ret;
1755 }
1756
1757 /* absolute maximum size we read until we abort */
1758 #define MAX_READ_SIZE 5000000
1759
1760 /* maximum duration until we stop analysing the stream */
1761 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 3.0))
1762
1763 #define MAX_STD_TIMEBASES (60*12+5)
1764 static int get_std_framerate(int i){
1765 if(i<60*12) return i*1001;
1766 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1767 }
1768
1769 /**
1770 * Read the beginning of a media file to get stream information. This
1771 * is useful for file formats with no headers such as MPEG. This
1772 * function also compute the real frame rate in case of mpeg2 repeat
1773 * frame mode.
1774 *
1775 * @param ic media file handle
1776 * @return >=0 if OK. AVERROR_xxx if error.
1777 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1778 */
1779 int av_find_stream_info(AVFormatContext *ic)
1780 {
1781 int i, count, ret, read_size, j;
1782 AVStream *st;
1783 AVPacket pkt1, *pkt;
1784 AVPacketList *pktl=NULL, **ppktl;
1785 int64_t last_dts[MAX_STREAMS];
1786 int duration_count[MAX_STREAMS]={0};
1787 double duration_error[MAX_STREAMS][MAX_STD_TIMEBASES]={{0}}; //FIXME malloc()?
1788
1789 for(i=0;i<ic->nb_streams;i++) {
1790 st = ic->streams[i];
1791 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1792 /* if(!st->time_base.num)
1793 st->time_base= */
1794 if(!st->codec->time_base.num)
1795 st->codec->time_base= st->time_base;
1796 }
1797 //only for the split stuff
1798 if (!st->parser) {
1799 st->parser = av_parser_init(st->codec->codec_id);
1800 if(st->need_parsing == 2 && st->parser){
1801 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1802 }
1803 }
1804 }
1805
1806 for(i=0;i<MAX_STREAMS;i++){
1807 last_dts[i]= AV_NOPTS_VALUE;
1808 }
1809
1810 count = 0;
1811 read_size = 0;
1812 ppktl = &ic->packet_buffer;
1813 for(;;) {
1814 /* check if one codec still needs to be handled */
1815 for(i=0;i<ic->nb_streams;i++) {
1816 st = ic->streams[i];
1817 if (!has_codec_parameters(st->codec))
1818 break;
1819 /* variable fps and no guess at the real fps */
1820 if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
1821 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1822 break;
1823 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1824 break;
1825 }
1826 if (i == ic->nb_streams) {
1827 /* NOTE: if the format has no header, then we need to read
1828 some packets to get most of the streams, so we cannot
1829 stop here */
1830 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1831 /* if we found the info for all the codecs, we can stop */
1832 ret = count;
1833 break;
1834 }
1835 }
1836 /* we did not get all the codec info, but we read too much data */
1837 if (read_size >= MAX_READ_SIZE) {
1838 ret = count;
1839 break;
1840 }
1841
1842 /* NOTE: a new stream can be added there if no header in file
1843 (AVFMTCTX_NOHEADER) */
1844 ret = av_read_frame_internal(ic, &pkt1);
1845 if (ret < 0) {
1846 /* EOF or error */
1847 ret = -1; /* we could not have all the codec parameters before EOF */
1848 for(i=0;i<ic->nb_streams;i++) {
1849 st = ic->streams[i];
1850 if (!has_codec_parameters(st->codec)){
1851 char buf[256];
1852 avcodec_string(buf, sizeof(buf), st->codec, 0);
1853 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1854 } else {
1855 ret = 0;
1856 }
1857 }
1858 break;
1859 }
1860
1861 pktl = av_mallocz(sizeof(AVPacketList));
1862 if (!pktl) {
1863 ret = AVERROR_NOMEM;
1864 break;
1865 }
1866
1867 /* add the packet in the buffered packet list */
1868 *ppktl = pktl;
1869 ppktl = &pktl->next;
1870
1871 pkt = &pktl->pkt;
1872 *pkt = pkt1;
1873
1874 /* duplicate the packet */
1875 if (av_dup_packet(pkt) < 0) {
1876 ret = AVERROR_NOMEM;
1877 break;
1878 }
1879
1880 read_size += pkt->size;
1881
1882 st = ic->streams[pkt->stream_index];
1883 if(st->codec_info_nb_frames>1) //FIXME move codec_info_nb_frames and codec_info_duration from AVStream into this func
1884 st->codec_info_duration += pkt->duration;
1885 if (pkt->duration != 0)
1886 st->codec_info_nb_frames++;
1887
1888 {
1889 int index= pkt->stream_index;
1890 int64_t last= last_dts[index];
1891 int64_t duration= pkt->dts - last;
1892
1893 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1894 double dur= duration * av_q2d(st->time_base);
1895
1896 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1897 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1898 if(duration_count[index] < 2)
1899 memset(duration_error, 0, sizeof(duration_error));
1900 for(i=1; i<MAX_STD_TIMEBASES; i++){
1901 int framerate= get_std_framerate(i);
1902 int ticks= lrintf(dur*framerate/(1001*12));
1903 double error= dur - ticks*1001*12/(double)framerate;
1904 duration_error[index][i] += error*error;
1905 }
1906 duration_count[index]++;
1907
1908 if(st->codec_info_nb_frames == 0 && 0)
1909 st->codec_info_duration += duration;
1910 }
1911 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1912 last_dts[pkt->stream_index]= pkt->dts;
1913 }
1914 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1915 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1916 if(i){
1917 st->codec->extradata_size= i;
1918 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1919 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1920 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1921 }
1922 }
1923
1924 /* if still no information, we try to open the codec and to
1925 decompress the frame. We try to avoid that in most cases as
1926 it takes longer and uses more memory. For MPEG4, we need to
1927 decompress for Quicktime. */
1928 if (!has_codec_parameters(st->codec) /*&&
1929 (st->codec->codec_id == CODEC_ID_FLV1 ||
1930 st->codec->codec_id == CODEC_ID_H264 ||
1931 st->codec->codec_id == CODEC_ID_H263 ||
1932 st->codec->codec_id == CODEC_ID_H261 ||
1933 st->codec->codec_id == CODEC_ID_VORBIS ||
1934 st->codec->codec_id == CODEC_ID_MJPEG ||
1935 st->codec->codec_id == CODEC_ID_PNG ||
1936 st->codec->codec_id == CODEC_ID_PAM ||
1937 st->codec->codec_id == CODEC_ID_PGM ||
1938 st->codec->codec_id == CODEC_ID_PGMYUV ||
1939 st->codec->codec_id == CODEC_ID_PBM ||
1940 st->codec->codec_id == CODEC_ID_PPM ||
1941 st->codec->codec_id == CODEC_ID_SHORTEN ||
1942 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1943 try_decode_frame(st, pkt->data, pkt->size);
1944
1945 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
1946 break;
1947 }
1948 count++;
1949 }
1950
1951 // close codecs which where opened in try_decode_frame()
1952 for(i=0;i<ic->nb_streams;i++) {
1953 st = ic->streams[i];
1954 if(st->codec->codec)
1955 avcodec_close(st->codec);
1956 }
1957 for(i=0;i<ic->nb_streams;i++) {
1958 st = ic->streams[i];
1959 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1960 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1961 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1962
1963 if(duration_count[i]
1964 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&&
1965 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1966 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
1967 double best_error= 2*av_q2d(st->time_base);
1968 best_error= best_error*best_error*duration_count[i]*1000*12*30;
1969
1970 for(j=1; j<MAX_STD_TIMEBASES; j++){
1971 double error= duration_error[i][j] * get_std_framerate(j);
1972 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1973 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
1974 if(error < best_error){
1975 best_error= error;
1976 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
1977 }
1978 }
1979 }
1980
1981 if (!st->r_frame_rate.num){
1982 if( st->codec->time_base.den * (int64_t)st->time_base.num
1983 <= st->codec->time_base.num * (int64_t)st->time_base.den){
1984 st->r_frame_rate.num = st->codec->time_base.den;
1985 st->r_frame_rate.den = st->codec->time_base.num;
1986 }else{
1987 st->r_frame_rate.num = st->time_base.den;
1988 st->r_frame_rate.den = st->time_base.num;
1989 }
1990 }
1991 }
1992 }
1993
1994 av_estimate_timings(ic);
1995 #if 0
1996 /* correct DTS for b frame streams with no timestamps */
1997 for(i=0;i<ic->nb_streams;i++) {
1998 st = ic->streams[i];
1999 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2000 if(b-frames){
2001 ppktl = &ic->packet_buffer;
2002 while(ppkt1){
2003 if(ppkt1->stream_index != i)
2004 continue;
2005 if(ppkt1->pkt->dts < 0)
2006 break;
2007 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2008 break;
2009 ppkt1->pkt->dts -= delta;
2010 ppkt1= ppkt1->next;
2011 }
2012 if(ppkt1)
2013 continue;
2014 st->cur_dts -= delta;
2015 }
2016 }
2017 }
2018 #endif
2019 return ret;
2020 }
2021
2022 /*******************************************************/
2023
2024 /**
2025 * start playing a network based stream (e.g. RTSP stream) at the
2026 * current position
2027 */
2028 int av_read_play(AVFormatContext *s)
2029 {
2030 if (!s->iformat->read_play)
2031 return AVERROR_NOTSUPP;
2032 return s->iformat->read_play(s);
2033 }
2034
2035 /**
2036 * Pause a network based stream (e.g. RTSP stream).
2037 *
2038 * Use av_read_play() to resume it.
2039 */
2040 int av_read_pause(AVFormatContext *s)
2041 {
2042 if (!s->iformat->read_pause)
2043 return AVERROR_NOTSUPP;
2044 return s->iformat->read_pause(s);
2045 }
2046
2047 /**
2048 * Close a media file (but not its codecs).
2049 *
2050 * @param s media file handle
2051 */
2052 void av_close_input_file(AVFormatContext *s)
2053 {
2054 int i, must_open_file;
2055 AVStream *st;
2056
2057 /* free previous packet */
2058 if (s->cur_st && s->cur_st->parser)
2059 av_free_packet(&s->cur_pkt);
2060
2061 if (s->iformat->read_close)
2062 s->iformat->read_close(s);
2063 for(i=0;i<s->nb_streams;i++) {
2064 /* free all data in a stream component */
2065 st = s->streams[i];
2066 if (st->parser) {
2067 av_parser_close(st->parser);
2068 }
2069 av_free(st->index_entries);
2070 av_free(st->codec->extradata);
2071 av_free(st->codec);
2072 av_free(st);
2073 }
2074 flush_packet_queue(s);
2075 must_open_file = 1;
2076 if (s->iformat->flags & AVFMT_NOFILE) {
2077 must_open_file = 0;
2078 }
2079 if (must_open_file) {
2080 url_fclose(&s->pb);
2081 }
2082 av_freep(&s->priv_data);
2083 av_free(s);
2084 }
2085
2086 /**
2087 * Add a new stream to a media file.
2088 *
2089 * Can only be called in the read_header() function. If the flag
2090 * AVFMTCTX_NOHEADER is in the format context, then new streams
2091 * can be added in read_packet too.
2092 *
2093 * @param s media file handle
2094 * @param id file format dependent stream id
2095 */
2096 AVStream *av_new_stream(AVFormatContext *s, int id)
2097 {
2098 AVStream *st;
2099 int i;
2100
2101 if (s->nb_streams >= MAX_STREAMS)
2102 return NULL;
2103
2104 st = av_mallocz(sizeof(AVStream));
2105 if (!st)
2106 return NULL;
2107
2108 st->codec= avcodec_alloc_context();
2109 if (s->iformat) {
2110 /* no default bitrate if decoding */
2111 st->codec->bit_rate = 0;
2112 }
2113 st->index = s->nb_streams;
2114 st->id = id;
2115 st->start_time = AV_NOPTS_VALUE;
2116 st->duration = AV_NOPTS_VALUE;
2117 st->cur_dts = AV_NOPTS_VALUE;
2118
2119 /* default pts settings is MPEG like */
2120 av_set_pts_info(st, 33, 1, 90000);
2121 st->last_IP_pts = AV_NOPTS_VALUE;
2122 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2123 st->pts_buffer[i]= AV_NOPTS_VALUE;
2124
2125 s->streams[s->nb_streams++] = st;
2126 return st;
2127 }
2128
2129 /************************************************************/
2130 /* output media file */
2131
2132 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2133 {
2134 int ret;
2135
2136 if (s->oformat->priv_data_size > 0) {
2137 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2138 if (!s->priv_data)
2139 return AVERROR_NOMEM;
2140 } else
2141 s->priv_data = NULL;
2142
2143 if (s->oformat->set_parameters) {
2144 ret = s->oformat->set_parameters(s, ap);
2145 if (ret < 0)
2146 return ret;
2147 }
2148 return 0;
2149 }
2150
2151 /**
2152 * allocate the stream private data and write the stream header to an
2153 * output media file
2154 *
2155 * @param s media file handle
2156 * @return 0 if OK. AVERROR_xxx if error.
2157 */
2158 int av_write_header(AVFormatContext *s)
2159 {
2160 int ret, i;
2161 AVStream *st;
2162
2163 // some sanity checks
2164 for(i=0;i<s->nb_streams;i++) {
2165 st = s->streams[i];
2166
2167 switch (st->codec->codec_type) {
2168 case CODEC_TYPE_AUDIO:
2169 if(st->codec->sample_rate<=0){
2170 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2171 return -1;
2172 }
2173 break;
2174 case CODEC_TYPE_VIDEO:
2175 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2176 av_log(s, AV_LOG_ERROR, "time base not set\n");
2177 return -1;
2178 }
2179 if(st->codec->width<=0 || st->codec->height<=0){
2180 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2181 return -1;
2182 }
2183 break;
2184 }
2185
2186 if(s->oformat->codec_tag){
2187 if(st->codec->codec_tag){
2188 //FIXME
2189 //check that tag + id is in the table
2190 //if neither is in the table -> ok
2191 //if tag is in the table with another id -> FAIL
2192 //if id is in the table with another tag -> FAIL unless strict < ?
2193 }else
2194 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2195 }
2196 }
2197
2198 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2199 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2200 if (!s->priv_data)
2201 return AVERROR_NOMEM;
2202 }
2203
2204 if(s->oformat->write_header){
2205 ret = s->oformat->write_header(s);
2206 if (ret < 0)
2207 return ret;
2208 }
2209
2210 /* init PTS generation */
2211 for(i=0;i<s->nb_streams;i++) {
2212 int64_t den = AV_NOPTS_VALUE;
2213 st = s->streams[i];
2214
2215 switch (st->codec->codec_type) {
2216 case CODEC_TYPE_AUDIO:
2217 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2218 break;
2219 case CODEC_TYPE_VIDEO:
2220 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2221 break;
2222 default:
2223 break;
2224 }
2225 if (den != AV_NOPTS_VALUE) {
2226 if (den <= 0)
2227 return AVERROR_INVALIDDATA;
2228 av_frac_init(&st->pts, 0, 0, den);
2229 }
2230 }
2231 return 0;
2232 }
2233
2234 //FIXME merge with compute_pkt_fields
2235 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2236 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2237 int num, den, frame_size, i;
2238
2239 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2240
2241 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2242 return -1;*/
2243
2244 /* duration field */
2245 if (pkt->duration == 0) {
2246 compute_frame_duration(&num, &den, st, NULL, pkt);
2247 if (den && num) {
2248 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2249 }
2250 }
2251
2252 //XXX/FIXME this is a temporary hack until all encoders output pts
2253 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2254 pkt->dts=
2255 // pkt->pts= st->cur_dts;
2256 pkt->pts= st->pts.val;
2257 }
2258
2259 //calculate dts from pts
2260 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2261 st->pts_buffer[0]= pkt->pts;
2262 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2263 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2264 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2265 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2266
2267 pkt->dts= st->pts_buffer[0];
2268 }
2269
2270 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2271 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2272 return -1;
2273 }
2274 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2275 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2276 return -1;
2277 }
2278
2279 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2280 st->cur_dts= pkt->dts;
2281 st->pts.val= pkt->dts;
2282
2283 /* update pts */
2284 switch (st->codec->codec_type) {
2285 case CODEC_TYPE_AUDIO:
2286 frame_size = get_audio_frame_size(st->codec, pkt->size);
2287
2288 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2289 but it would be better if we had the real timestamps from the encoder */
2290 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2291 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2292 }
2293 break;
2294 case CODEC_TYPE_VIDEO:
2295 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2296 break;
2297 default:
2298 break;
2299 }
2300 return 0;
2301 }
2302
2303 static void truncate_ts(AVStream *st, AVPacket *pkt){
2304 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2305
2306 // if(pkt->dts < 0)
2307 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2308
2309 if (pkt->pts != AV_NOPTS_VALUE)
2310 pkt->pts &= pts_mask;
2311 if (pkt->dts != AV_NOPTS_VALUE)
2312 pkt->dts &= pts_mask;
2313 }
2314
2315 /**
2316 * Write a packet to an output media file.
2317 *
2318 * The packet shall contain one audio or video frame.
2319 *
2320 * @param s media file handle
2321 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2322 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2323 */
2324 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2325 {
2326 int ret;
2327
2328 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2329 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2330 return ret;
2331
2332 truncate_ts(s->streams[pkt->stream_index], pkt);
2333
2334 ret= s->oformat->write_packet(s, pkt);
2335 if(!ret)
2336 ret= url_ferror(&s->pb);
2337 return ret;
2338 }
2339
2340 /**
2341 * Interleave a packet per DTS in an output media file.
2342 *
2343 * Packets with pkt->destruct == av_destruct_packet will be freed inside this function,
2344 * so they cannot be used after it, note calling av_free_packet() on them is still safe.
2345 *
2346 * @param s media file handle
2347 * @param out the interleaved packet will be output here
2348 * @param in the input packet
2349 * @param flush 1 if no further packets are available as input and all
2350 * remaining packets should be output
2351 * @return 1 if a packet was output, 0 if no packet could be output,
2352 * < 0 if an error occured
2353 */
2354 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2355 AVPacketList *pktl, **next_point, *this_pktl;
2356 int stream_count=0;
2357 int streams[MAX_STREAMS];
2358
2359 if(pkt){
2360 AVStream *st= s->streams[ pkt->stream_index];
2361
2362 // assert(pkt->destruct != av_destruct_packet); //FIXME
2363
2364 this_pktl = av_mallocz(sizeof(AVPacketList));
2365 this_pktl->pkt= *pkt;
2366 if(pkt->destruct == av_destruct_packet)
2367 pkt->destruct= NULL; // non shared -> must keep original from being freed
2368 else
2369 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2370
2371 next_point = &s->packet_buffer;
2372 while(*next_point){
2373 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2374 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2375 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2376 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2377 break;
2378 next_point= &(*next_point)->next;
2379 }
2380 this_pktl->next= *next_point;
2381 *next_point= this_pktl;
2382 }
2383
2384 memset(streams, 0, sizeof(streams));
2385 pktl= s->packet_buffer;
2386 while(pktl){
2387 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2388 if(streams[ pktl->pkt.stream_index ] == 0)
2389 stream_count++;
2390 streams[ pktl->pkt.stream_index ]++;
2391 pktl= pktl->next;
2392 }
2393
2394 if(s->nb_streams == stream_count || (flush && stream_count)){
2395 pktl= s->packet_buffer;
2396 *out= pktl->pkt;
2397
2398 s->packet_buffer= pktl->next;
2399 av_freep(&pktl);
2400 return 1;
2401 }else{
2402 av_init_packet(out);
2403 return 0;
2404 }
2405 }
2406
2407 /**
2408 * Interleaves a AVPacket correctly so it can be muxed.
2409 * @param out the interleaved packet will be output here
2410 * @param in the input packet
2411 * @param flush 1 if no further packets are available as input and all
2412 * remaining packets should be output
2413 * @return 1 if a packet was output, 0 if no packet could be output,
2414 * < 0 if an error occured
2415 */
2416 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2417 if(s->oformat->interleave_packet)
2418 return s->oformat->interleave_packet(s, out, in, flush);
2419 else
2420 return av_interleave_packet_per_dts(s, out, in, flush);
2421 }
2422
2423 /**
2424 * Writes a packet to an output media file ensuring correct interleaving.
2425 *
2426 * The packet must contain one audio or video frame.
2427 * If the packets are already correctly interleaved the application should
2428 * call av_write_frame() instead as its slightly faster, its also important
2429 * to keep in mind that completly non interleaved input will need huge amounts
2430 * of memory to interleave with this, so its prefereable to interleave at the
2431 * demuxer level
2432 *
2433 * @param s media file handle
2434 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2435 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2436 */
2437 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2438 AVStream *st= s->streams[ pkt->stream_index];
2439
2440 //FIXME/XXX/HACK drop zero sized packets
2441 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2442 return 0;
2443
2444 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2445 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2446 return -1;
2447
2448 if(pkt->dts == AV_NOPTS_VALUE)
2449 return -1;
2450
2451 for(;;){
2452 AVPacket opkt;
2453 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2454 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2455 return ret;
2456
2457 truncate_ts(s->streams[opkt.stream_index], &opkt);
2458 ret= s->oformat->write_packet(s, &opkt);
2459
2460 av_free_packet(&opkt);
2461 pkt= NULL;
2462
2463 if(ret<0)
2464 return ret;
2465 if(url_ferror(&s->pb))
2466 return url_ferror(&s->pb);
2467 }
2468 }
2469
2470 /**
2471 * @brief Write the stream trailer to an output media file and
2472 * free the file private data.
2473 *
2474 * @param s media file handle
2475 * @return 0 if OK. AVERROR_xxx if error.
2476 */
2477 int av_write_trailer(AVFormatContext *s)
2478 {
2479 int ret, i;
2480
2481 for(;;){
2482 AVPacket pkt;
2483 ret= av_interleave_packet(s, &pkt, NULL, 1);
2484 if(ret<0) //FIXME cleanup needed for ret<0 ?
2485 goto fail;
2486 if(!ret)
2487 break;
2488
2489 truncate_ts(s->streams[pkt.stream_index], &pkt);
2490 ret= s->oformat->write_packet(s, &pkt);
2491
2492 av_free_packet(&pkt);
2493
2494 if(ret<0)
2495 goto fail;
2496 if(url_ferror(&s->pb))
2497 goto fail;
2498 }
2499
2500 if(s->oformat->write_trailer)
2501 ret = s->oformat->write_trailer(s);
2502 fail:
2503 if(ret == 0)
2504 ret=url_ferror(&s->pb);
2505 for(i=0;i<s->nb_streams;i++)
2506 av_freep(&s->streams[i]->priv_data);
2507 av_freep(&s->priv_data);
2508 return ret;
2509 }
2510
2511 /* "user interface" functions */
2512
2513 void dump_format(AVFormatContext *ic,
2514 int index,
2515 const char *url,
2516 int is_output)
2517 {
2518 int i, flags;
2519 char buf[256];
2520
2521 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2522 is_output ? "Output" : "Input",
2523 index,
2524 is_output ? ic->oformat->name : ic->iformat->name,
2525 is_output ? "to" : "from", url);
2526 if (!is_output) {
2527 av_log(NULL, AV_LOG_INFO, " Duration: ");
2528 if (ic->duration != AV_NOPTS_VALUE) {
2529 int hours, mins, secs, us;
2530 secs = ic->duration / AV_TIME_BASE;
2531 us = ic->duration % AV_TIME_BASE;
2532 mins = secs / 60;
2533 secs %= 60;
2534 hours = mins / 60;
2535 mins %= 60;
2536 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2537 (10 * us) / AV_TIME_BASE);
2538 } else {
2539 av_log(NULL, AV_LOG_INFO, "N/A");
2540 }
2541 if (ic->start_time != AV_NOPTS_VALUE) {
2542 int secs, us;
2543 av_log(NULL, AV_LOG_INFO, ", start: ");
2544 secs = ic->start_time / AV_TIME_BASE;
2545 us = ic->start_time % AV_TIME_BASE;
2546 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2547 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2548 }
2549 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2550 if (ic->bit_rate) {
2551 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2552 } else {
2553 av_log(NULL, AV_LOG_INFO, "N/A");
2554 }
2555 av_log(NULL, AV_LOG_INFO, "\n");
2556 }
2557 for(i=0;i<ic->nb_streams;i++) {
2558 AVStream *st = ic->streams[i];
2559 int g= ff_gcd(st->time_base.num, st->time_base.den);
2560 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2561 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2562 /* the pid is an important information, so we display it */
2563 /* XXX: add a generic system */
2564 if (is_output)
2565 flags = ic->oformat->flags;
2566 else
2567 flags = ic->iformat->flags;
2568 if (flags & AVFMT_SHOW_IDS) {
2569 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2570 }
2571 if (strlen(st->language) > 0) {
2572 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2573 }
2574 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2575 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2576 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2577 if(st->r_frame_rate.den && st->r_frame_rate.num)
2578 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2579 /* else if(st->time_base.den && st->time_base.num)
2580 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2581 else
2582 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2583 }
2584 av_log(NULL, AV_LOG_INFO, "\n");
2585 }
2586 }
2587
2588 typedef struct {
2589 const char *abv;
2590 int width, height;
2591 int frame_rate, frame_rate_base;
2592 } AbvEntry;
2593
2594 static AbvEntry frame_abvs[] = {
2595 { "ntsc", 720, 480, 30000, 1001 },
2596 { "pal", 720, 576, 25, 1 },
2597 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2598 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2599 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2600 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2601 { "film", 352, 240, 24, 1 },
2602 { "ntsc-film", 352, 240, 24000, 1001 },
2603 { "sqcif", 128, 96, 0, 0 },
2604 { "qcif", 176, 144, 0, 0 },
2605 { "cif", 352, 288, 0, 0 },
2606 { "4cif", 704, 576, 0, 0 },
2607 };
2608
2609 /**
2610 * parses width and height out of string str.
2611 */
2612 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2613 {
2614 int i;
2615 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2616 const char *p;
2617 int frame_width = 0, frame_height = 0;
2618
2619 for(i=0;i<n;i++) {
2620 if (!strcmp(frame_abvs[i].abv, str)) {
2621 frame_width = frame_abvs[i].width;
2622 frame_height = frame_abvs[i].height;
2623 break;
2624 }
2625 }
2626 if (i == n) {
2627 p = str;
2628 frame_width = strtol(p, (char **)&p, 10);
2629 if (*p)
2630 p++;
2631 frame_height = strtol(p, (char **)&p, 10);
2632 }
2633 if (frame_width <= 0 || frame_height <= 0)
2634 return -1;
2635 *width_ptr = frame_width;
2636 *height_ptr = frame_height;
2637 return 0;
2638 }
2639
2640 /**
2641 * Converts frame rate from string to a fraction.
2642 *
2643 * First we try to get an exact integer or fractional frame rate.
2644 * If this fails we convert the frame rate to a double and return
2645 * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
2646 */
2647 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2648 {
2649 int i;
2650 char* cp;
2651
2652 /* First, we check our abbreviation table */
2653 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2654 if (!strcmp(frame_abvs[i].abv, arg)) {
2655 *frame_rate = frame_abvs[i].frame_rate;
2656 *frame_rate_base = frame_abvs[i].frame_rate_base;
2657 return 0;
2658 }
2659
2660 /* Then, we try to parse it as fraction */
2661 cp = strchr(arg, '/');
2662 if (!cp)
2663 cp = strchr(arg, ':');
2664 if (cp) {
2665 char* cpp;
2666 *frame_rate = strtol(arg, &cpp, 10);
2667 if (cpp != arg || cpp == cp)
2668 *frame_rate_base = strtol(cp+1, &cpp, 10);
2669 else
2670 *frame_rate = 0;
2671 }
2672 else {
2673 /* Finally we give up and parse it as double */
2674 AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
2675 *frame_rate_base = time_base.den;
2676 *frame_rate = time_base.num;
2677 }
2678 if (!*frame_rate || !*frame_rate_base)
2679 return -1;
2680 else
2681 return 0;
2682 }
2683
2684 /**
2685 * Converts date string to number of seconds since Jan 1st, 1970.
2686 *
2687 * @code
2688 * Syntax:
2689 * - If not a duration:
2690 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2691 * Time is localtime unless Z is suffixed to the end. In this case GMT
2692 * Return the date in micro seconds since 1970
2693 *
2694 * - If a duration:
2695 * HH[:MM[:SS[.m...]]]
2696 * S+[.m...]
2697 * @endcode
2698 */
2699 #ifndef CONFIG_WINCE
2700 int64_t parse_date(const char *datestr, int duration)
2701 {
2702 const char *p;
2703 int64_t t;
2704 struct tm dt;
2705 int i;
2706 static const char *date_fmt[] = {
2707 "%Y-%m-%d",
2708 "%Y%m%d",
2709 };
2710 static const char *time_fmt[] = {
2711 "%H:%M:%S",
2712 "%H%M%S",
2713 };
2714 const char *q;
2715 int is_utc, len;
2716 char lastch;
2717 int negative = 0;
2718
2719 #undef time
2720 time_t now = time(0);
2721
2722 len = strlen(datestr);
2723 if (len > 0)
2724 lastch = datestr[len - 1];
2725 else
2726 lastch = '\0';
2727 is_utc = (lastch == 'z' || lastch == 'Z');
2728
2729 memset(&dt, 0, sizeof(dt));
2730
2731 p = datestr;
2732 q = NULL;
2733 if (!duration) {
2734 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2735 q = small_strptime(p, date_fmt[i], &dt);
2736 if (q) {
2737 break;
2738 }
2739 }
2740
2741 if (!q) {
2742 if (is_utc) {
2743 dt = *gmtime(&now);
2744 } else {
2745 dt = *localtime(&now);
2746 }
2747 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2748 } else {
2749 p = q;
2750 }
2751
2752 if (*p == 'T' || *p == 't' || *p == ' ')
2753 p++;
2754
2755 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2756 q = small_strptime(p, time_fmt[i], &dt);
2757 if (q) {
2758 break;
2759 }
2760 }
2761 } else {
2762 if (p[0] == '-') {
2763 negative = 1;
2764 ++p;
2765 }
2766 q = small_strptime(p, time_fmt[0], &dt);
2767 if (!q) {
2768 dt.tm_sec = strtol(p, (char **)&q, 10);
2769 dt.tm_min = 0;
2770 dt.tm_hour = 0;
2771 }
2772 }
2773
2774 /* Now we have all the fields that we can get */
2775 if (!q) {
2776 if (duration)
2777 return 0;
2778 else
2779 return now * INT64_C(1000000);
2780 }
2781
2782 if (duration) {
2783 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2784 } else {
2785 dt.tm_isdst = -1; /* unknown */
2786 if (is_utc) {
2787 t = mktimegm(&dt);
2788 } else {
2789 t = mktime(&dt);
2790 }
2791 }
2792
2793 t *= 1000000;
2794
2795 if (*q == '.') {
2796 int val, n;
2797 q++;
2798 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2799 if (!isdigit(*q))
2800 break;
2801 val += n * (*q - '0');
2802 }
2803 t += val;
2804 }
2805 return negative ? -t : t;
2806 }
2807 #endif /* CONFIG_WINCE */
2808
2809 /**
2810 * Attempts to find a specific tag in a URL.
2811 *
2812 * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
2813 * Return 1 if found.
2814 */
2815 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2816 {
2817 const char *p;
2818 char tag[128], *q;
2819
2820 p = info;
2821 if (*p == '?')
2822 p++;
2823 for(;;) {
2824 q = tag;
2825 while (*p != '\0' && *p != '=' && *p != '&') {
2826 if ((q - tag) < sizeof(tag) - 1)
2827 *q++ = *p;
2828 p++;
2829 }
2830 *q = '\0';
2831 q = arg;
2832 if (*p == '=') {
2833 p++;
2834 while (*p != '&' && *p != '\0') {
2835 if ((q - arg) < arg_size - 1) {
2836 if (*p == '+')
2837 *q++ = ' ';
2838 else
2839 *q++ = *p;
2840 }
2841 p++;
2842 }
2843 *q = '\0';
2844 }
2845 if (!strcmp(tag, tag1))
2846 return 1;
2847 if (*p != '&')
2848 break;
2849 p++;
2850 }
2851 return 0;
2852 }
2853
2854 /**
2855 * Returns in 'buf' the path with '%d' replaced by number.
2856
2857 * Also handles the '%0nd' format where 'n' is the total number
2858 * of digits and '%%'.
2859 *
2860 * @param buf destination buffer
2861 * @param buf_size destination buffer size
2862 * @param path numbered sequence string
2863 * @number frame number
2864 * @return 0 if OK, -1 if format error.
2865 */
2866 int av_get_frame_filename(char *buf, int buf_size,
2867 const char *path, int number)
2868 {
2869 const char *p;
2870 char *q, buf1[20], c;
2871 int nd, len, percentd_found;
2872
2873 q = buf;
2874 p = path;
2875 percentd_found = 0;
2876 for(;;) {
2877 c = *p++;
2878 if (c == '\0')
2879 break;
2880 if (c == '%') {
2881 do {
2882 nd = 0;
2883 while (isdigit(*p)) {
2884 nd = nd * 10 + *p++ - '0';
2885 }
2886 c = *p++;
2887 } while (isdigit(c));
2888
2889 switch(c) {
2890 case '%':
2891 goto addchar;
2892 case 'd':
2893 if (percentd_found)
2894 goto fail;
2895 percentd_found = 1;
2896 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2897 len = strlen(buf1);
2898 if ((q - buf + len) > buf_size - 1)
2899 goto fail;
2900 memcpy(q, buf1, len);
2901 q += len;
2902 break;
2903 default:
2904 goto fail;
2905 }
2906 } else {
2907 addchar:
2908 if ((q - buf) < buf_size - 1)
2909 *q++ = c;
2910 }
2911 }
2912 if (!percentd_found)
2913 goto fail;
2914 *q = '\0';
2915 return 0;
2916 fail:
2917 *q = '\0';
2918 return -1;
2919 }
2920
2921 /**
2922 * Print nice hexa dump of a buffer
2923 * @param f stream for output
2924 * @param buf buffer
2925 * @param size buffer size
2926 */
2927 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2928 {
2929 int len, i, j, c;
2930
2931 for(i=0;i<size;i+=16) {
2932 len = size - i;
2933 if (len > 16)
2934 len = 16;
2935 fprintf(f, "%08x ", i);
2936 for(j=0;j<16;j++) {
2937 if (j < len)
2938 fprintf(f, " %02x", buf[i+j]);
2939 else
2940 fprintf(f, " ");
2941 }
2942 fprintf(f, " ");
2943 for(j=0;j<len;j++) {
2944 c = buf[i+j];
2945 if (c < ' ' || c > '~')
2946 c = '.';
2947 fprintf(f, "%c", c);
2948 }
2949 fprintf(f, "\n");
2950 }
2951 }
2952
2953 /**
2954 * Print on 'f' a nice dump of a packet
2955 * @param f stream for output
2956 * @param pkt packet to dump
2957 * @param dump_payload true if the payload must be displayed too
2958 */
2959 //FIXME needs to know the time_base
2960 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2961 {
2962 fprintf(f, "stream #%d:\n", pkt->stream_index);
2963 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2964 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2965 /* DTS is _always_ valid after av_read_frame() */
2966 fprintf(f, " dts=");
2967 if (pkt->dts == AV_NOPTS_VALUE)
2968 fprintf(f, "N/A");
2969 else
2970 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2971 /* PTS may be not known if B frames are present */
2972 fprintf(f, " pts=");
2973 if (pkt->pts == AV_NOPTS_VALUE)
2974 fprintf(f, "N/A");
2975 else
2976 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2977 fprintf(f, "\n");
2978 fprintf(f, " size=%d\n", pkt->size);
2979 if (dump_payload)
2980 av_hex_dump(f, pkt->data, pkt->size);
2981 }
2982
2983 void url_split(char *proto, int proto_size,
2984 char *authorization, int authorization_size,
2985 char *hostname, int hostname_size,
2986 int *port_ptr,
2987 char *path, int path_size,
2988 const char *url)
2989 {
2990 const char *p;
2991 char *q;
2992 int port;
2993
2994 port = -1;
2995
2996 p = url;
2997 q = proto;
2998 while (*p != ':' && *p != '\0') {
2999 if ((q - proto) < proto_size - 1)
3000 *q++ = *p;
3001 p++;
3002 }
3003 if (proto_size > 0)
3004 *q = '\0';
3005 if (authorization_size > 0)
3006 authorization[0] = '\0';
3007 if (*p == '\0') {
3008 if (proto_size > 0)
3009 proto[0] = '\0';
3010 if (hostname_size > 0)
3011 hostname[0] = '\0';
3012 p = url;
3013 } else {
3014 char *at,*slash; // PETR: position of '@' character and '/' character
3015
3016 p++;
3017 if (*p == '/')
3018 p++;
3019 if (*p == '/')
3020 p++;
3021 at = strchr(p,'@'); // PETR: get the position of '@'
3022 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
3023 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
3024
3025 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
3026
3027 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
3028 if (*p == '@') { // PETR: passed '@'
3029 if (authorization_size > 0)
3030 *q = '\0';
3031 q = hostname;
3032 at = NULL;
3033 } else if (!at) { // PETR: hostname
3034 if ((q - hostname) < hostname_size - 1)
3035 *q++ = *p;
3036 } else {
3037 if ((q - authorization) < authorization_size - 1)
3038 *q++ = *p;
3039 }
3040 p++;
3041 }
3042 if (hostname_size > 0)
3043 *q = '\0';
3044 if (*p == ':') {
3045 p++;
3046 port = strtoul(p, (char **)&p, 10);
3047 }
3048 }
3049 if (port_ptr)
3050 *port_ptr = port;
3051 pstrcpy(path, path_size, p);
3052 }
3053
3054 /**
3055 * Set the pts for a given stream.
3056 *
3057 * @param s stream
3058 * @param pts_wrap_bits number of bits effectively used by the pts
3059 * (used for wrap control, 33 is the value for MPEG)
3060 * @param pts_num numerator to convert to seconds (MPEG: 1)
3061 * @param pts_den denominator to convert to seconds (MPEG: 90000)
3062 */
3063 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3064 int pts_num, int pts_den)
3065 {
3066 s->pts_wrap_bits = pts_wrap_bits;
3067 s->time_base.num = pts_num;
3068 s->time_base.den = pts_den;
3069 }
3070
3071 /* fraction handling */
3072
3073 /**
3074 * f = val + (num / den) + 0.5.
3075 *
3076 * 'num' is normalized so that it is such as 0 <= num < den.
3077 *
3078 * @param f fractional number
3079 * @param val integer value
3080 * @param num must be >= 0
3081 * @param den must be >= 1
3082 */
3083 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3084 {
3085 num += (den >> 1);
3086 if (num >= den) {
3087 val += num / den;
3088 num = num % den;
3089 }
3090 f->val = val;
3091 f->num = num;
3092 f->den = den;
3093 }
3094
3095 /**
3096 * Fractionnal addition to f: f = f + (incr / f->den).
3097 *
3098 * @param f fractional number
3099 * @param incr increment, can be positive or negative
3100 */
3101 static void av_frac_add(AVFrac *f, int64_t incr)
3102 {
3103 int64_t num, den;
3104
3105 num = f->num + incr;
3106 den = f->den;
3107 if (num < 0) {
3108 f->val += num / den;
3109 num = num % den;
3110 if (num < 0) {
3111 num += den;
3112 f->val--;
3113 }
3114 } else if (num >= den) {
3115 f->val += num / den;
3116 num = num % den;
3117 }
3118 f->num = num;
3119 }