c4f67c9cab942dc858f98971974d8680e6fef614
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "opt.h"
23 #include "avstring.h"
24 #include "riff.h"
25 #include <sys/time.h>
26 #include <time.h>
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 /**
32 * @file libavformat/utils.c
33 * various utility functions for use within FFmpeg
34 */
35
36 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
37 static void av_frac_add(AVFrac *f, int64_t incr);
38
39 /** head of registered input format linked list */
40 AVInputFormat *first_iformat = NULL;
41 /** head of registered output format linked list */
42 AVOutputFormat *first_oformat = NULL;
43
44 AVInputFormat *av_iformat_next(AVInputFormat *f)
45 {
46 if(f) return f->next;
47 else return first_iformat;
48 }
49
50 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
51 {
52 if(f) return f->next;
53 else return first_oformat;
54 }
55
56 void av_register_input_format(AVInputFormat *format)
57 {
58 AVInputFormat **p;
59 p = &first_iformat;
60 while (*p != NULL) p = &(*p)->next;
61 *p = format;
62 format->next = NULL;
63 }
64
65 void av_register_output_format(AVOutputFormat *format)
66 {
67 AVOutputFormat **p;
68 p = &first_oformat;
69 while (*p != NULL) p = &(*p)->next;
70 *p = format;
71 format->next = NULL;
72 }
73
74 int match_ext(const char *filename, const char *extensions)
75 {
76 const char *ext, *p;
77 char ext1[32], *q;
78
79 if(!filename)
80 return 0;
81
82 ext = strrchr(filename, '.');
83 if (ext) {
84 ext++;
85 p = extensions;
86 for(;;) {
87 q = ext1;
88 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
89 *q++ = *p++;
90 *q = '\0';
91 if (!strcasecmp(ext1, ext))
92 return 1;
93 if (*p == '\0')
94 break;
95 p++;
96 }
97 }
98 return 0;
99 }
100
101 AVOutputFormat *guess_format(const char *short_name, const char *filename,
102 const char *mime_type)
103 {
104 AVOutputFormat *fmt, *fmt_found;
105 int score_max, score;
106
107 /* specific test for image sequences */
108 #ifdef CONFIG_IMAGE2_MUXER
109 if (!short_name && filename &&
110 av_filename_number_test(filename) &&
111 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
112 return guess_format("image2", NULL, NULL);
113 }
114 #endif
115 /* Find the proper file type. */
116 fmt_found = NULL;
117 score_max = 0;
118 fmt = first_oformat;
119 while (fmt != NULL) {
120 score = 0;
121 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
122 score += 100;
123 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
124 score += 10;
125 if (filename && fmt->extensions &&
126 match_ext(filename, fmt->extensions)) {
127 score += 5;
128 }
129 if (score > score_max) {
130 score_max = score;
131 fmt_found = fmt;
132 }
133 fmt = fmt->next;
134 }
135 return fmt_found;
136 }
137
138 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
139 const char *mime_type)
140 {
141 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
142
143 if (fmt) {
144 AVOutputFormat *stream_fmt;
145 char stream_format_name[64];
146
147 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
148 stream_fmt = guess_format(stream_format_name, NULL, NULL);
149
150 if (stream_fmt)
151 fmt = stream_fmt;
152 }
153
154 return fmt;
155 }
156
157 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
158 const char *filename, const char *mime_type, enum CodecType type){
159 if(type == CODEC_TYPE_VIDEO){
160 enum CodecID codec_id= CODEC_ID_NONE;
161
162 #ifdef CONFIG_IMAGE2_MUXER
163 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
164 codec_id= av_guess_image2_codec(filename);
165 }
166 #endif
167 if(codec_id == CODEC_ID_NONE)
168 codec_id= fmt->video_codec;
169 return codec_id;
170 }else if(type == CODEC_TYPE_AUDIO)
171 return fmt->audio_codec;
172 else
173 return CODEC_ID_NONE;
174 }
175
176 AVInputFormat *av_find_input_format(const char *short_name)
177 {
178 AVInputFormat *fmt;
179 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
180 if (!strcmp(fmt->name, short_name))
181 return fmt;
182 }
183 return NULL;
184 }
185
186 /* memory handling */
187
188 void av_destruct_packet(AVPacket *pkt)
189 {
190 av_free(pkt->data);
191 pkt->data = NULL; pkt->size = 0;
192 }
193
194 void av_init_packet(AVPacket *pkt)
195 {
196 pkt->pts = AV_NOPTS_VALUE;
197 pkt->dts = AV_NOPTS_VALUE;
198 pkt->pos = -1;
199 pkt->duration = 0;
200 pkt->flags = 0;
201 pkt->stream_index = 0;
202 pkt->destruct= av_destruct_packet_nofree;
203 }
204
205 int av_new_packet(AVPacket *pkt, int size)
206 {
207 uint8_t *data;
208 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
209 return AVERROR(ENOMEM);
210 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
211 if (!data)
212 return AVERROR(ENOMEM);
213 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
214
215 av_init_packet(pkt);
216 pkt->data = data;
217 pkt->size = size;
218 pkt->destruct = av_destruct_packet;
219 return 0;
220 }
221
222 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
223 {
224 int ret= av_new_packet(pkt, size);
225
226 if(ret<0)
227 return ret;
228
229 pkt->pos= url_ftell(s);
230
231 ret= get_buffer(s, pkt->data, size);
232 if(ret<=0)
233 av_free_packet(pkt);
234 else
235 pkt->size= ret;
236
237 return ret;
238 }
239
240 int av_dup_packet(AVPacket *pkt)
241 {
242 if (pkt->destruct != av_destruct_packet) {
243 uint8_t *data;
244 /* We duplicate the packet and don't forget to add the padding again. */
245 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
246 return AVERROR(ENOMEM);
247 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
248 if (!data) {
249 return AVERROR(ENOMEM);
250 }
251 memcpy(data, pkt->data, pkt->size);
252 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
253 pkt->data = data;
254 pkt->destruct = av_destruct_packet;
255 }
256 return 0;
257 }
258
259 int av_filename_number_test(const char *filename)
260 {
261 char buf[1024];
262 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
263 }
264
265 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
266 {
267 AVInputFormat *fmt1, *fmt;
268 int score;
269
270 fmt = NULL;
271 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
272 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
273 continue;
274 score = 0;
275 if (fmt1->read_probe) {
276 score = fmt1->read_probe(pd);
277 } else if (fmt1->extensions) {
278 if (match_ext(pd->filename, fmt1->extensions)) {
279 score = 50;
280 }
281 }
282 if (score > *score_max) {
283 *score_max = score;
284 fmt = fmt1;
285 }else if (score == *score_max)
286 fmt = NULL;
287 }
288 return fmt;
289 }
290
291 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
292 int score=0;
293 return av_probe_input_format2(pd, is_opened, &score);
294 }
295
296 /************************************************************/
297 /* input media file */
298
299 /**
300 * Open a media file from an IO stream. 'fmt' must be specified.
301 */
302 static const char* format_to_name(void* ptr)
303 {
304 AVFormatContext* fc = (AVFormatContext*) ptr;
305 if(fc->iformat) return fc->iformat->name;
306 else if(fc->oformat) return fc->oformat->name;
307 else return "NULL";
308 }
309
310 #define OFFSET(x) offsetof(AVFormatContext,x)
311 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
312 //these names are too long to be readable
313 #define E AV_OPT_FLAG_ENCODING_PARAM
314 #define D AV_OPT_FLAG_DECODING_PARAM
315
316 static const AVOption options[]={
317 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
318 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
319 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
320 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
321 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
322 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
323 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
324 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
325 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
326 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
327 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
328 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
329 {NULL},
330 };
331
332 #undef E
333 #undef D
334 #undef DEFAULT
335
336 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
337
338 static void avformat_get_context_defaults(AVFormatContext *s)
339 {
340 memset(s, 0, sizeof(AVFormatContext));
341
342 s->av_class = &av_format_context_class;
343
344 av_opt_set_defaults(s);
345 }
346
347 AVFormatContext *av_alloc_format_context(void)
348 {
349 AVFormatContext *ic;
350 ic = av_malloc(sizeof(AVFormatContext));
351 if (!ic) return ic;
352 avformat_get_context_defaults(ic);
353 ic->av_class = &av_format_context_class;
354 return ic;
355 }
356
357 int av_open_input_stream(AVFormatContext **ic_ptr,
358 ByteIOContext *pb, const char *filename,
359 AVInputFormat *fmt, AVFormatParameters *ap)
360 {
361 int err;
362 AVFormatContext *ic;
363 AVFormatParameters default_ap;
364
365 if(!ap){
366 ap=&default_ap;
367 memset(ap, 0, sizeof(default_ap));
368 }
369
370 if(!ap->prealloced_context)
371 ic = av_alloc_format_context();
372 else
373 ic = *ic_ptr;
374 if (!ic) {
375 err = AVERROR(ENOMEM);
376 goto fail;
377 }
378 ic->iformat = fmt;
379 ic->pb = pb;
380 ic->duration = AV_NOPTS_VALUE;
381 ic->start_time = AV_NOPTS_VALUE;
382 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
383
384 /* allocate private data */
385 if (fmt->priv_data_size > 0) {
386 ic->priv_data = av_mallocz(fmt->priv_data_size);
387 if (!ic->priv_data) {
388 err = AVERROR(ENOMEM);
389 goto fail;
390 }
391 } else {
392 ic->priv_data = NULL;
393 }
394
395 err = ic->iformat->read_header(ic, ap);
396 if (err < 0)
397 goto fail;
398
399 if (pb && !ic->data_offset)
400 ic->data_offset = url_ftell(ic->pb);
401
402 *ic_ptr = ic;
403 return 0;
404 fail:
405 if (ic) {
406 av_freep(&ic->priv_data);
407 }
408 av_free(ic);
409 *ic_ptr = NULL;
410 return err;
411 }
412
413 /** size of probe buffer, for guessing file type from file contents */
414 #define PROBE_BUF_MIN 2048
415 #define PROBE_BUF_MAX (1<<20)
416
417 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
418 AVInputFormat *fmt,
419 int buf_size,
420 AVFormatParameters *ap)
421 {
422 int err, probe_size;
423 AVProbeData probe_data, *pd = &probe_data;
424 ByteIOContext *pb = NULL;
425
426 pd->filename = "";
427 if (filename)
428 pd->filename = filename;
429 pd->buf = NULL;
430 pd->buf_size = 0;
431
432 if (!fmt) {
433 /* guess format if no file can be opened */
434 fmt = av_probe_input_format(pd, 0);
435 }
436
437 /* Do not open file if the format does not need it. XXX: specific
438 hack needed to handle RTSP/TCP */
439 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
440 /* if no file needed do not try to open one */
441 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
442 goto fail;
443 }
444 if (buf_size > 0) {
445 url_setbufsize(pb, buf_size);
446 }
447
448 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
449 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
450 /* read probe data */
451 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
452 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
453 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
454 if (url_fseek(pb, 0, SEEK_SET) < 0) {
455 url_fclose(pb);
456 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
457 pb = NULL;
458 err = AVERROR(EIO);
459 goto fail;
460 }
461 }
462 /* guess file format */
463 fmt = av_probe_input_format2(pd, 1, &score);
464 }
465 av_freep(&pd->buf);
466 }
467
468 /* if still no format found, error */
469 if (!fmt) {
470 err = AVERROR_NOFMT;
471 goto fail;
472 }
473
474 /* check filename in case an image number is expected */
475 if (fmt->flags & AVFMT_NEEDNUMBER) {
476 if (!av_filename_number_test(filename)) {
477 err = AVERROR_NUMEXPECTED;
478 goto fail;
479 }
480 }
481 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
482 if (err)
483 goto fail;
484 return 0;
485 fail:
486 av_freep(&pd->buf);
487 if (pb)
488 url_fclose(pb);
489 *ic_ptr = NULL;
490 return err;
491
492 }
493
494 /*******************************************************/
495
496 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
497 {
498 int ret;
499 AVStream *st;
500 av_init_packet(pkt);
501 ret= s->iformat->read_packet(s, pkt);
502 if (ret < 0)
503 return ret;
504 st= s->streams[pkt->stream_index];
505
506 switch(st->codec->codec_type){
507 case CODEC_TYPE_VIDEO:
508 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
509 break;
510 case CODEC_TYPE_AUDIO:
511 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
512 break;
513 case CODEC_TYPE_SUBTITLE:
514 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
515 break;
516 }
517
518 return ret;
519 }
520
521 /**********************************************************/
522
523 /**
524 * Get the number of samples of an audio frame. Return -1 on error.
525 */
526 static int get_audio_frame_size(AVCodecContext *enc, int size)
527 {
528 int frame_size;
529
530 if (enc->frame_size <= 1) {
531 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
532
533 if (bits_per_sample) {
534 if (enc->channels == 0)
535 return -1;
536 frame_size = (size << 3) / (bits_per_sample * enc->channels);
537 } else {
538 /* used for example by ADPCM codecs */
539 if (enc->bit_rate == 0)
540 return -1;
541 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
542 }
543 } else {
544 frame_size = enc->frame_size;
545 }
546 return frame_size;
547 }
548
549
550 /**
551 * Return the frame duration in seconds. Return 0 if not available.
552 */
553 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
554 AVCodecParserContext *pc, AVPacket *pkt)
555 {
556 int frame_size;
557
558 *pnum = 0;
559 *pden = 0;
560 switch(st->codec->codec_type) {
561 case CODEC_TYPE_VIDEO:
562 if(st->time_base.num*1000LL > st->time_base.den){
563 *pnum = st->time_base.num;
564 *pden = st->time_base.den;
565 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
566 *pnum = st->codec->time_base.num;
567 *pden = st->codec->time_base.den;
568 if (pc && pc->repeat_pict) {
569 *pden *= 2;
570 *pnum = (*pnum) * (2 + pc->repeat_pict);
571 }
572 }
573 break;
574 case CODEC_TYPE_AUDIO:
575 frame_size = get_audio_frame_size(st->codec, pkt->size);
576 if (frame_size < 0)
577 break;
578 *pnum = frame_size;
579 *pden = st->codec->sample_rate;
580 break;
581 default:
582 break;
583 }
584 }
585
586 static int is_intra_only(AVCodecContext *enc){
587 if(enc->codec_type == CODEC_TYPE_AUDIO){
588 return 1;
589 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
590 switch(enc->codec_id){
591 case CODEC_ID_MJPEG:
592 case CODEC_ID_MJPEGB:
593 case CODEC_ID_LJPEG:
594 case CODEC_ID_RAWVIDEO:
595 case CODEC_ID_DVVIDEO:
596 case CODEC_ID_HUFFYUV:
597 case CODEC_ID_FFVHUFF:
598 case CODEC_ID_ASV1:
599 case CODEC_ID_ASV2:
600 case CODEC_ID_VCR1:
601 return 1;
602 default: break;
603 }
604 }
605 return 0;
606 }
607
608 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
609 int64_t dts, int64_t pts)
610 {
611 AVStream *st= s->streams[stream_index];
612 AVPacketList *pktl= s->packet_buffer;
613
614 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)
615 return;
616
617 st->first_dts= dts - st->cur_dts;
618 st->cur_dts= dts;
619
620 for(; pktl; pktl= pktl->next){
621 if(pktl->pkt.stream_index != stream_index)
622 continue;
623 //FIXME think more about this check
624 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
625 pktl->pkt.pts += st->first_dts;
626
627 if(pktl->pkt.dts != AV_NOPTS_VALUE)
628 pktl->pkt.dts += st->first_dts;
629
630 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
631 st->start_time= pktl->pkt.pts;
632 }
633 if (st->start_time == AV_NOPTS_VALUE)
634 st->start_time = pts;
635 }
636
637 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
638 {
639 AVPacketList *pktl= s->packet_buffer;
640
641 assert(pkt->duration && !st->cur_dts);
642
643 for(; pktl; pktl= pktl->next){
644 if(pktl->pkt.stream_index != pkt->stream_index)
645 continue;
646 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
647 && !pktl->pkt.duration){
648 pktl->pkt.pts= pktl->pkt.dts= st->cur_dts;
649 st->cur_dts += pkt->duration;
650 pktl->pkt.duration= pkt->duration;
651 }else
652 break;
653 }
654 }
655
656 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
657 AVCodecParserContext *pc, AVPacket *pkt)
658 {
659 int num, den, presentation_delayed, delay, i;
660 int64_t offset;
661
662 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
663 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
664 pkt->dts -= 1LL<<st->pts_wrap_bits;
665 }
666
667 if (pkt->duration == 0) {
668 compute_frame_duration(&num, &den, st, pc, pkt);
669 if (den && num) {
670 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
671
672 if(st->cur_dts == 0 && pkt->duration != 0)
673 update_initial_durations(s, st, pkt);
674 }
675 }
676
677 /* correct timestamps with byte offset if demuxers only have timestamps
678 on packet boundaries */
679 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
680 /* this will estimate bitrate based on this frame's duration and size */
681 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
682 if(pkt->pts != AV_NOPTS_VALUE)
683 pkt->pts += offset;
684 if(pkt->dts != AV_NOPTS_VALUE)
685 pkt->dts += offset;
686 }
687
688 /* do we have a video B-frame ? */
689 delay= st->codec->has_b_frames;
690 presentation_delayed = 0;
691 /* XXX: need has_b_frame, but cannot get it if the codec is
692 not initialized */
693 if (delay &&
694 pc && pc->pict_type != FF_B_TYPE)
695 presentation_delayed = 1;
696 /* This may be redundant, but it should not hurt. */
697 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
698 presentation_delayed = 1;
699
700 if(st->cur_dts == AV_NOPTS_VALUE){
701 st->cur_dts = 0; //FIXME maybe set it to 0 during init
702 }
703
704 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
705 /* interpolate PTS and DTS if they are not present */
706 if(delay <=1){
707 if (presentation_delayed) {
708 /* DTS = decompression timestamp */
709 /* PTS = presentation timestamp */
710 if (pkt->dts == AV_NOPTS_VALUE)
711 pkt->dts = st->last_IP_pts;
712 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
713 if (pkt->dts == AV_NOPTS_VALUE)
714 pkt->dts = st->cur_dts;
715
716 /* this is tricky: the dts must be incremented by the duration
717 of the frame we are displaying, i.e. the last I- or P-frame */
718 if (st->last_IP_duration == 0)
719 st->last_IP_duration = pkt->duration;
720 st->cur_dts = pkt->dts + st->last_IP_duration;
721 st->last_IP_duration = pkt->duration;
722 st->last_IP_pts= pkt->pts;
723 /* cannot compute PTS if not present (we can compute it only
724 by knowing the future */
725 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
726 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
727 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
728 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
729 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
730 pkt->pts += pkt->duration;
731 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
732 }
733 }
734
735 /* presentation is not delayed : PTS and DTS are the same */
736 if(pkt->pts == AV_NOPTS_VALUE)
737 pkt->pts = pkt->dts;
738 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
739 if(pkt->pts == AV_NOPTS_VALUE)
740 pkt->pts = st->cur_dts;
741 pkt->dts = pkt->pts;
742 st->cur_dts = pkt->pts + pkt->duration;
743 }
744 }
745
746 if(pkt->pts != AV_NOPTS_VALUE){
747 st->pts_buffer[0]= pkt->pts;
748 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
749 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
750 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
751 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
752 if(pkt->dts == AV_NOPTS_VALUE)
753 pkt->dts= st->pts_buffer[0];
754 if(delay>1){
755 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
756 }
757 if(pkt->dts > st->cur_dts)
758 st->cur_dts = pkt->dts;
759 }
760
761 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
762
763 /* update flags */
764 if(is_intra_only(st->codec))
765 pkt->flags |= PKT_FLAG_KEY;
766 else if (pc) {
767 pkt->flags = 0;
768 /* keyframe computation */
769 if (pc->pict_type == FF_I_TYPE)
770 pkt->flags |= PKT_FLAG_KEY;
771 }
772 }
773
774 void av_destruct_packet_nofree(AVPacket *pkt)
775 {
776 pkt->data = NULL; pkt->size = 0;
777 }
778
779 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
780 {
781 AVStream *st;
782 int len, ret, i;
783
784 av_init_packet(pkt);
785
786 for(;;) {
787 /* select current input stream component */
788 st = s->cur_st;
789 if (st) {
790 if (!st->need_parsing || !st->parser) {
791 /* no parsing needed: we just output the packet as is */
792 /* raw data support */
793 *pkt = s->cur_pkt;
794 compute_pkt_fields(s, st, NULL, pkt);
795 s->cur_st = NULL;
796 break;
797 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
798 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
799 s->cur_ptr, s->cur_len,
800 s->cur_pkt.pts, s->cur_pkt.dts);
801 s->cur_pkt.pts = AV_NOPTS_VALUE;
802 s->cur_pkt.dts = AV_NOPTS_VALUE;
803 /* increment read pointer */
804 s->cur_ptr += len;
805 s->cur_len -= len;
806
807 /* return packet if any */
808 if (pkt->size) {
809 got_packet:
810 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
811 pkt->duration = 0;
812 pkt->stream_index = st->index;
813 pkt->pts = st->parser->pts;
814 pkt->dts = st->parser->dts;
815 pkt->destruct = av_destruct_packet_nofree;
816 compute_pkt_fields(s, st, st->parser, pkt);
817
818 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
819 ff_reduce_index(s, st->index);
820 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
821 0, 0, AVINDEX_KEYFRAME);
822 }
823
824 break;
825 }
826 } else {
827 /* free packet */
828 av_free_packet(&s->cur_pkt);
829 s->cur_st = NULL;
830 }
831 } else {
832 /* read next packet */
833 ret = av_read_packet(s, &s->cur_pkt);
834 if (ret < 0) {
835 if (ret == AVERROR(EAGAIN))
836 return ret;
837 /* return the last frames, if any */
838 for(i = 0; i < s->nb_streams; i++) {
839 st = s->streams[i];
840 if (st->parser && st->need_parsing) {
841 av_parser_parse(st->parser, st->codec,
842 &pkt->data, &pkt->size,
843 NULL, 0,
844 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
845 if (pkt->size)
846 goto got_packet;
847 }
848 }
849 /* no more packets: really terminate parsing */
850 return ret;
851 }
852
853 st = s->streams[s->cur_pkt.stream_index];
854 if(st->codec->debug & FF_DEBUG_PTS)
855 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
856 s->cur_pkt.stream_index,
857 s->cur_pkt.pts,
858 s->cur_pkt.dts,
859 s->cur_pkt.size);
860
861 s->cur_st = st;
862 s->cur_ptr = s->cur_pkt.data;
863 s->cur_len = s->cur_pkt.size;
864 if (st->need_parsing && !st->parser) {
865 st->parser = av_parser_init(st->codec->codec_id);
866 if (!st->parser) {
867 /* no parser available: just output the raw packets */
868 st->need_parsing = AVSTREAM_PARSE_NONE;
869 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
870 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
871 }
872 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
873 st->parser->last_frame_offset=
874 st->parser->cur_offset= s->cur_pkt.pos;
875 }
876 }
877 }
878 }
879 if(st->codec->debug & FF_DEBUG_PTS)
880 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
881 pkt->stream_index,
882 pkt->pts,
883 pkt->dts,
884 pkt->size);
885
886 return 0;
887 }
888
889 static AVPacket *add_to_pktbuf(AVFormatContext *s, AVPacket *pkt){
890 AVPacketList *pktl= s->packet_buffer;
891 AVPacketList **plast_pktl= &s->packet_buffer;
892
893 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
894
895 pktl = av_mallocz(sizeof(AVPacketList));
896 if (!pktl)
897 return NULL;
898
899 /* add the packet in the buffered packet list */
900 *plast_pktl = pktl;
901 pktl->pkt= *pkt;
902 return &pktl->pkt;
903 }
904
905 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
906 {
907 AVPacketList *pktl;
908 int eof=0;
909 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
910
911 for(;;){
912 pktl = s->packet_buffer;
913 if (pktl) {
914 AVPacket *next_pkt= &pktl->pkt;
915
916 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
917 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
918 if( pktl->pkt.stream_index == next_pkt->stream_index
919 && next_pkt->dts < pktl->pkt.dts
920 && pktl->pkt.pts != pktl->pkt.dts //not b frame
921 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
922 next_pkt->pts= pktl->pkt.dts;
923 }
924 pktl= pktl->next;
925 }
926 pktl = s->packet_buffer;
927 }
928
929 if( next_pkt->pts != AV_NOPTS_VALUE
930 || next_pkt->dts == AV_NOPTS_VALUE
931 || !genpts || eof){
932 /* read packet from packet buffer, if there is data */
933 *pkt = *next_pkt;
934 s->packet_buffer = pktl->next;
935 av_free(pktl);
936 return 0;
937 }
938 }
939 if(genpts){
940 int ret= av_read_frame_internal(s, pkt);
941 if(ret<0){
942 if(pktl && ret != AVERROR(EAGAIN)){
943 eof=1;
944 continue;
945 }else
946 return ret;
947 }
948
949 if(av_dup_packet(add_to_pktbuf(s, pkt)) < 0)
950 return AVERROR(ENOMEM);
951 }else{
952 assert(!s->packet_buffer);
953 return av_read_frame_internal(s, pkt);
954 }
955 }
956 }
957
958 /* XXX: suppress the packet queue */
959 static void flush_packet_queue(AVFormatContext *s)
960 {
961 AVPacketList *pktl;
962
963 for(;;) {
964 pktl = s->packet_buffer;
965 if (!pktl)
966 break;
967 s->packet_buffer = pktl->next;
968 av_free_packet(&pktl->pkt);
969 av_free(pktl);
970 }
971 }
972
973 /*******************************************************/
974 /* seek support */
975
976 int av_find_default_stream_index(AVFormatContext *s)
977 {
978 int i;
979 AVStream *st;
980
981 if (s->nb_streams <= 0)
982 return -1;
983 for(i = 0; i < s->nb_streams; i++) {
984 st = s->streams[i];
985 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
986 return i;
987 }
988 }
989 return 0;
990 }
991
992 /**
993 * Flush the frame reader.
994 */
995 static void av_read_frame_flush(AVFormatContext *s)
996 {
997 AVStream *st;
998 int i;
999
1000 flush_packet_queue(s);
1001
1002 /* free previous packet */
1003 if (s->cur_st) {
1004 if (s->cur_st->parser)
1005 av_free_packet(&s->cur_pkt);
1006 s->cur_st = NULL;
1007 }
1008 /* fail safe */
1009 s->cur_ptr = NULL;
1010 s->cur_len = 0;
1011
1012 /* for each stream, reset read state */
1013 for(i = 0; i < s->nb_streams; i++) {
1014 st = s->streams[i];
1015
1016 if (st->parser) {
1017 av_parser_close(st->parser);
1018 st->parser = NULL;
1019 }
1020 st->last_IP_pts = AV_NOPTS_VALUE;
1021 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1022 }
1023 }
1024
1025 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1026 int i;
1027
1028 for(i = 0; i < s->nb_streams; i++) {
1029 AVStream *st = s->streams[i];
1030
1031 st->cur_dts = av_rescale(timestamp,
1032 st->time_base.den * (int64_t)ref_st->time_base.num,
1033 st->time_base.num * (int64_t)ref_st->time_base.den);
1034 }
1035 }
1036
1037 void ff_reduce_index(AVFormatContext *s, int stream_index)
1038 {
1039 AVStream *st= s->streams[stream_index];
1040 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1041
1042 if((unsigned)st->nb_index_entries >= max_entries){
1043 int i;
1044 for(i=0; 2*i<st->nb_index_entries; i++)
1045 st->index_entries[i]= st->index_entries[2*i];
1046 st->nb_index_entries= i;
1047 }
1048 }
1049
1050 int av_add_index_entry(AVStream *st,
1051 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1052 {
1053 AVIndexEntry *entries, *ie;
1054 int index;
1055
1056 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1057 return -1;
1058
1059 entries = av_fast_realloc(st->index_entries,
1060 &st->index_entries_allocated_size,
1061 (st->nb_index_entries + 1) *
1062 sizeof(AVIndexEntry));
1063 if(!entries)
1064 return -1;
1065
1066 st->index_entries= entries;
1067
1068 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1069
1070 if(index<0){
1071 index= st->nb_index_entries++;
1072 ie= &entries[index];
1073 assert(index==0 || ie[-1].timestamp < timestamp);
1074 }else{
1075 ie= &entries[index];
1076 if(ie->timestamp != timestamp){
1077 if(ie->timestamp <= timestamp)
1078 return -1;
1079 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1080 st->nb_index_entries++;
1081 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1082 distance= ie->min_distance;
1083 }
1084
1085 ie->pos = pos;
1086 ie->timestamp = timestamp;
1087 ie->min_distance= distance;
1088 ie->size= size;
1089 ie->flags = flags;
1090
1091 return index;
1092 }
1093
1094 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1095 int flags)
1096 {
1097 AVIndexEntry *entries= st->index_entries;
1098 int nb_entries= st->nb_index_entries;
1099 int a, b, m;
1100 int64_t timestamp;
1101
1102 a = - 1;
1103 b = nb_entries;
1104
1105 while (b - a > 1) {
1106 m = (a + b) >> 1;
1107 timestamp = entries[m].timestamp;
1108 if(timestamp >= wanted_timestamp)
1109 b = m;
1110 if(timestamp <= wanted_timestamp)
1111 a = m;
1112 }
1113 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1114
1115 if(!(flags & AVSEEK_FLAG_ANY)){
1116 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1117 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1118 }
1119 }
1120
1121 if(m == nb_entries)
1122 return -1;
1123 return m;
1124 }
1125
1126 #define DEBUG_SEEK
1127
1128 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1129 AVInputFormat *avif= s->iformat;
1130 int64_t pos_min, pos_max, pos, pos_limit;
1131 int64_t ts_min, ts_max, ts;
1132 int index;
1133 AVStream *st;
1134
1135 if (stream_index < 0)
1136 return -1;
1137
1138 #ifdef DEBUG_SEEK
1139 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1140 #endif
1141
1142 ts_max=
1143 ts_min= AV_NOPTS_VALUE;
1144 pos_limit= -1; //gcc falsely says it may be uninitialized
1145
1146 st= s->streams[stream_index];
1147 if(st->index_entries){
1148 AVIndexEntry *e;
1149
1150 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1151 index= FFMAX(index, 0);
1152 e= &st->index_entries[index];
1153
1154 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1155 pos_min= e->pos;
1156 ts_min= e->timestamp;
1157 #ifdef DEBUG_SEEK
1158 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1159 pos_min,ts_min);
1160 #endif
1161 }else{
1162 assert(index==0);
1163 }
1164
1165 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1166 assert(index < st->nb_index_entries);
1167 if(index >= 0){
1168 e= &st->index_entries[index];
1169 assert(e->timestamp >= target_ts);
1170 pos_max= e->pos;
1171 ts_max= e->timestamp;
1172 pos_limit= pos_max - e->min_distance;
1173 #ifdef DEBUG_SEEK
1174 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1175 pos_max,pos_limit, ts_max);
1176 #endif
1177 }
1178 }
1179
1180 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1181 if(pos<0)
1182 return -1;
1183
1184 /* do the seek */
1185 url_fseek(s->pb, pos, SEEK_SET);
1186
1187 av_update_cur_dts(s, st, ts);
1188
1189 return 0;
1190 }
1191
1192 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1193 int64_t pos, ts;
1194 int64_t start_pos, filesize;
1195 int no_change;
1196
1197 #ifdef DEBUG_SEEK
1198 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1199 #endif
1200
1201 if(ts_min == AV_NOPTS_VALUE){
1202 pos_min = s->data_offset;
1203 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1204 if (ts_min == AV_NOPTS_VALUE)
1205 return -1;
1206 }
1207
1208 if(ts_max == AV_NOPTS_VALUE){
1209 int step= 1024;
1210 filesize = url_fsize(s->pb);
1211 pos_max = filesize - 1;
1212 do{
1213 pos_max -= step;
1214 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1215 step += step;
1216 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1217 if (ts_max == AV_NOPTS_VALUE)
1218 return -1;
1219
1220 for(;;){
1221 int64_t tmp_pos= pos_max + 1;
1222 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1223 if(tmp_ts == AV_NOPTS_VALUE)
1224 break;
1225 ts_max= tmp_ts;
1226 pos_max= tmp_pos;
1227 if(tmp_pos >= filesize)
1228 break;
1229 }
1230 pos_limit= pos_max;
1231 }
1232
1233 if(ts_min > ts_max){
1234 return -1;
1235 }else if(ts_min == ts_max){
1236 pos_limit= pos_min;
1237 }
1238
1239 no_change=0;
1240 while (pos_min < pos_limit) {
1241 #ifdef DEBUG_SEEK
1242 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1243 pos_min, pos_max,
1244 ts_min, ts_max);
1245 #endif
1246 assert(pos_limit <= pos_max);
1247
1248 if(no_change==0){
1249 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1250 // interpolate position (better than dichotomy)
1251 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1252 + pos_min - approximate_keyframe_distance;
1253 }else if(no_change==1){
1254 // bisection, if interpolation failed to change min or max pos last time
1255 pos = (pos_min + pos_limit)>>1;
1256 }else{
1257 /* linear search if bisection failed, can only happen if there
1258 are very few or no keyframes between min/max */
1259 pos=pos_min;
1260 }
1261 if(pos <= pos_min)
1262 pos= pos_min + 1;
1263 else if(pos > pos_limit)
1264 pos= pos_limit;
1265 start_pos= pos;
1266
1267 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1268 if(pos == pos_max)
1269 no_change++;
1270 else
1271 no_change=0;
1272 #ifdef DEBUG_SEEK
1273 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1274 #endif
1275 if(ts == AV_NOPTS_VALUE){
1276 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1277 return -1;
1278 }
1279 assert(ts != AV_NOPTS_VALUE);
1280 if (target_ts <= ts) {
1281 pos_limit = start_pos - 1;
1282 pos_max = pos;
1283 ts_max = ts;
1284 }
1285 if (target_ts >= ts) {
1286 pos_min = pos;
1287 ts_min = ts;
1288 }
1289 }
1290
1291 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1292 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1293 #ifdef DEBUG_SEEK
1294 pos_min = pos;
1295 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1296 pos_min++;
1297 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1298 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1299 pos, ts_min, target_ts, ts_max);
1300 #endif
1301 *ts_ret= ts;
1302 return pos;
1303 }
1304
1305 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1306 int64_t pos_min, pos_max;
1307 #if 0
1308 AVStream *st;
1309
1310 if (stream_index < 0)
1311 return -1;
1312
1313 st= s->streams[stream_index];
1314 #endif
1315
1316 pos_min = s->data_offset;
1317 pos_max = url_fsize(s->pb) - 1;
1318
1319 if (pos < pos_min) pos= pos_min;
1320 else if(pos > pos_max) pos= pos_max;
1321
1322 url_fseek(s->pb, pos, SEEK_SET);
1323
1324 #if 0
1325 av_update_cur_dts(s, st, ts);
1326 #endif
1327 return 0;
1328 }
1329
1330 static int av_seek_frame_generic(AVFormatContext *s,
1331 int stream_index, int64_t timestamp, int flags)
1332 {
1333 int index;
1334 AVStream *st;
1335 AVIndexEntry *ie;
1336
1337 st = s->streams[stream_index];
1338
1339 index = av_index_search_timestamp(st, timestamp, flags);
1340
1341 if(index < 0 || index==st->nb_index_entries-1){
1342 int i;
1343 AVPacket pkt;
1344
1345 if(st->index_entries && st->nb_index_entries){
1346 ie= &st->index_entries[st->nb_index_entries-1];
1347 url_fseek(s->pb, ie->pos, SEEK_SET);
1348 av_update_cur_dts(s, st, ie->timestamp);
1349 }else
1350 url_fseek(s->pb, 0, SEEK_SET);
1351
1352 for(i=0;; i++) {
1353 int ret = av_read_frame(s, &pkt);
1354 if(ret<0)
1355 break;
1356 av_free_packet(&pkt);
1357 if(stream_index == pkt.stream_index){
1358 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1359 break;
1360 }
1361 }
1362 index = av_index_search_timestamp(st, timestamp, flags);
1363 }
1364 if (index < 0)
1365 return -1;
1366
1367 av_read_frame_flush(s);
1368 if (s->iformat->read_seek){
1369 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1370 return 0;
1371 }
1372 ie = &st->index_entries[index];
1373 url_fseek(s->pb, ie->pos, SEEK_SET);
1374
1375 av_update_cur_dts(s, st, ie->timestamp);
1376
1377 return 0;
1378 }
1379
1380 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1381 {
1382 int ret;
1383 AVStream *st;
1384
1385 av_read_frame_flush(s);
1386
1387 if(flags & AVSEEK_FLAG_BYTE)
1388 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1389
1390 if(stream_index < 0){
1391 stream_index= av_find_default_stream_index(s);
1392 if(stream_index < 0)
1393 return -1;
1394
1395 st= s->streams[stream_index];
1396 /* timestamp for default must be expressed in AV_TIME_BASE units */
1397 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1398 }
1399 st= s->streams[stream_index];
1400
1401 /* first, we try the format specific seek */
1402 if (s->iformat->read_seek)
1403 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1404 else
1405 ret = -1;
1406 if (ret >= 0) {
1407 return 0;
1408 }
1409
1410 if(s->iformat->read_timestamp)
1411 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1412 else
1413 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1414 }
1415
1416 /*******************************************************/
1417
1418 /**
1419 * Returns TRUE if the stream has accurate duration in any stream.
1420 *
1421 * @return TRUE if the stream has accurate duration for at least one component.
1422 */
1423 static int av_has_duration(AVFormatContext *ic)
1424 {
1425 int i;
1426 AVStream *st;
1427
1428 for(i = 0;i < ic->nb_streams; i++) {
1429 st = ic->streams[i];
1430 if (st->duration != AV_NOPTS_VALUE)
1431 return 1;
1432 }
1433 return 0;
1434 }
1435
1436 /**
1437 * Estimate the stream timings from the one of each components.
1438 *
1439 * Also computes the global bitrate if possible.
1440 */
1441 static void av_update_stream_timings(AVFormatContext *ic)
1442 {
1443 int64_t start_time, start_time1, end_time, end_time1;
1444 int64_t duration, duration1;
1445 int i;
1446 AVStream *st;
1447
1448 start_time = INT64_MAX;
1449 end_time = INT64_MIN;
1450 duration = INT64_MIN;
1451 for(i = 0;i < ic->nb_streams; i++) {
1452 st = ic->streams[i];
1453 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1454 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1455 if (start_time1 < start_time)
1456 start_time = start_time1;
1457 if (st->duration != AV_NOPTS_VALUE) {
1458 end_time1 = start_time1
1459 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1460 if (end_time1 > end_time)
1461 end_time = end_time1;
1462 }
1463 }
1464 if (st->duration != AV_NOPTS_VALUE) {
1465 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1466 if (duration1 > duration)
1467 duration = duration1;
1468 }
1469 }
1470 if (start_time != INT64_MAX) {
1471 ic->start_time = start_time;
1472 if (end_time != INT64_MIN) {
1473 if (end_time - start_time > duration)
1474 duration = end_time - start_time;
1475 }
1476 }
1477 if (duration != INT64_MIN) {
1478 ic->duration = duration;
1479 if (ic->file_size > 0) {
1480 /* compute the bitrate */
1481 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1482 (double)ic->duration;
1483 }
1484 }
1485 }
1486
1487 static void fill_all_stream_timings(AVFormatContext *ic)
1488 {
1489 int i;
1490 AVStream *st;
1491
1492 av_update_stream_timings(ic);
1493 for(i = 0;i < ic->nb_streams; i++) {
1494 st = ic->streams[i];
1495 if (st->start_time == AV_NOPTS_VALUE) {
1496 if(ic->start_time != AV_NOPTS_VALUE)
1497 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1498 if(ic->duration != AV_NOPTS_VALUE)
1499 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1500 }
1501 }
1502 }
1503
1504 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1505 {
1506 int64_t filesize, duration;
1507 int bit_rate, i;
1508 AVStream *st;
1509
1510 /* if bit_rate is already set, we believe it */
1511 if (ic->bit_rate == 0) {
1512 bit_rate = 0;
1513 for(i=0;i<ic->nb_streams;i++) {
1514 st = ic->streams[i];
1515 bit_rate += st->codec->bit_rate;
1516 }
1517 ic->bit_rate = bit_rate;
1518 }
1519
1520 /* if duration is already set, we believe it */
1521 if (ic->duration == AV_NOPTS_VALUE &&
1522 ic->bit_rate != 0 &&
1523 ic->file_size != 0) {
1524 filesize = ic->file_size;
1525 if (filesize > 0) {
1526 for(i = 0; i < ic->nb_streams; i++) {
1527 st = ic->streams[i];
1528 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1529 if (st->duration == AV_NOPTS_VALUE)
1530 st->duration = duration;
1531 }
1532 }
1533 }
1534 }
1535
1536 #define DURATION_MAX_READ_SIZE 250000
1537
1538 /* only usable for MPEG-PS streams */
1539 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1540 {
1541 AVPacket pkt1, *pkt = &pkt1;
1542 AVStream *st;
1543 int read_size, i, ret;
1544 int64_t end_time;
1545 int64_t filesize, offset, duration;
1546
1547 /* free previous packet */
1548 if (ic->cur_st && ic->cur_st->parser)
1549 av_free_packet(&ic->cur_pkt);
1550 ic->cur_st = NULL;
1551
1552 /* flush packet queue */
1553 flush_packet_queue(ic);
1554
1555 for(i=0;i<ic->nb_streams;i++) {
1556 st = ic->streams[i];
1557 if (st->parser) {
1558 av_parser_close(st->parser);
1559 st->parser= NULL;
1560 }
1561 }
1562
1563 /* we read the first packets to get the first PTS (not fully
1564 accurate, but it is enough now) */
1565 url_fseek(ic->pb, 0, SEEK_SET);
1566 read_size = 0;
1567 for(;;) {
1568 if (read_size >= DURATION_MAX_READ_SIZE)
1569 break;
1570 /* if all info is available, we can stop */
1571 for(i = 0;i < ic->nb_streams; i++) {
1572 st = ic->streams[i];
1573 if (st->start_time == AV_NOPTS_VALUE)
1574 break;
1575 }
1576 if (i == ic->nb_streams)
1577 break;
1578
1579 ret = av_read_packet(ic, pkt);
1580 if (ret != 0)
1581 break;
1582 read_size += pkt->size;
1583 st = ic->streams[pkt->stream_index];
1584 if (pkt->pts != AV_NOPTS_VALUE) {
1585 if (st->start_time == AV_NOPTS_VALUE)
1586 st->start_time = pkt->pts;
1587 }
1588 av_free_packet(pkt);
1589 }
1590
1591 /* estimate the end time (duration) */
1592 /* XXX: may need to support wrapping */
1593 filesize = ic->file_size;
1594 offset = filesize - DURATION_MAX_READ_SIZE;
1595 if (offset < 0)
1596 offset = 0;
1597
1598 url_fseek(ic->pb, offset, SEEK_SET);
1599 read_size = 0;
1600 for(;;) {
1601 if (read_size >= DURATION_MAX_READ_SIZE)
1602 break;
1603
1604 ret = av_read_packet(ic, pkt);
1605 if (ret != 0)
1606 break;
1607 read_size += pkt->size;
1608 st = ic->streams[pkt->stream_index];
1609 if (pkt->pts != AV_NOPTS_VALUE &&
1610 st->start_time != AV_NOPTS_VALUE) {
1611 end_time = pkt->pts;
1612 duration = end_time - st->start_time;
1613 if (duration > 0) {
1614 if (st->duration == AV_NOPTS_VALUE ||
1615 st->duration < duration)
1616 st->duration = duration;
1617 }
1618 }
1619 av_free_packet(pkt);
1620 }
1621
1622 fill_all_stream_timings(ic);
1623
1624 url_fseek(ic->pb, old_offset, SEEK_SET);
1625 for(i=0; i<ic->nb_streams; i++){
1626 st= ic->streams[i];
1627 st->cur_dts= st->first_dts;
1628 st->last_IP_pts = AV_NOPTS_VALUE;
1629 }
1630 }
1631
1632 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1633 {
1634 int64_t file_size;
1635
1636 /* get the file size, if possible */
1637 if (ic->iformat->flags & AVFMT_NOFILE) {
1638 file_size = 0;
1639 } else {
1640 file_size = url_fsize(ic->pb);
1641 if (file_size < 0)
1642 file_size = 0;
1643 }
1644 ic->file_size = file_size;
1645
1646 if ((!strcmp(ic->iformat->name, "mpeg") ||
1647 !strcmp(ic->iformat->name, "mpegts")) &&
1648 file_size && !url_is_streamed(ic->pb)) {
1649 /* get accurate estimate from the PTSes */
1650 av_estimate_timings_from_pts(ic, old_offset);
1651 } else if (av_has_duration(ic)) {
1652 /* at least one component has timings - we use them for all
1653 the components */
1654 fill_all_stream_timings(ic);
1655 } else {
1656 /* less precise: use bitrate info */
1657 av_estimate_timings_from_bit_rate(ic);
1658 }
1659 av_update_stream_timings(ic);
1660
1661 #if 0
1662 {
1663 int i;
1664 AVStream *st;
1665 for(i = 0;i < ic->nb_streams; i++) {
1666 st = ic->streams[i];
1667 printf("%d: start_time: %0.3f duration: %0.3f\n",
1668 i, (double)st->start_time / AV_TIME_BASE,
1669 (double)st->duration / AV_TIME_BASE);
1670 }
1671 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1672 (double)ic->start_time / AV_TIME_BASE,
1673 (double)ic->duration / AV_TIME_BASE,
1674 ic->bit_rate / 1000);
1675 }
1676 #endif
1677 }
1678
1679 static int has_codec_parameters(AVCodecContext *enc)
1680 {
1681 int val;
1682 switch(enc->codec_type) {
1683 case CODEC_TYPE_AUDIO:
1684 val = enc->sample_rate;
1685 break;
1686 case CODEC_TYPE_VIDEO:
1687 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1688 break;
1689 default:
1690 val = 1;
1691 break;
1692 }
1693 return (enc->codec_id != CODEC_ID_NONE && val != 0);
1694 }
1695
1696 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1697 {
1698 int16_t *samples;
1699 AVCodec *codec;
1700 int got_picture, data_size, ret=0;
1701 AVFrame picture;
1702
1703 if(!st->codec->codec){
1704 codec = avcodec_find_decoder(st->codec->codec_id);
1705 if (!codec)
1706 return -1;
1707 ret = avcodec_open(st->codec, codec);
1708 if (ret < 0)
1709 return ret;
1710 }
1711
1712 if(!has_codec_parameters(st->codec)){
1713 switch(st->codec->codec_type) {
1714 case CODEC_TYPE_VIDEO:
1715 ret = avcodec_decode_video(st->codec, &picture,
1716 &got_picture, data, size);
1717 break;
1718 case CODEC_TYPE_AUDIO:
1719 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1720 samples = av_malloc(data_size);
1721 if (!samples)
1722 goto fail;
1723 ret = avcodec_decode_audio2(st->codec, samples,
1724 &data_size, data, size);
1725 av_free(samples);
1726 break;
1727 default:
1728 break;
1729 }
1730 }
1731 fail:
1732 return ret;
1733 }
1734
1735 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
1736 {
1737 AVInputFormat *fmt;
1738 fmt = av_probe_input_format2(pd, 1, &score);
1739
1740 if (fmt) {
1741 if (strncmp(fmt->name, "mp3", 3) == 0)
1742 st->codec->codec_id = CODEC_ID_MP3;
1743 else if (strncmp(fmt->name, "ac3", 3) == 0)
1744 st->codec->codec_id = CODEC_ID_AC3;
1745 }
1746 return !!fmt;
1747 }
1748
1749 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1750 {
1751 while (tags->id != CODEC_ID_NONE) {
1752 if (tags->id == id)
1753 return tags->tag;
1754 tags++;
1755 }
1756 return 0;
1757 }
1758
1759 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1760 {
1761 int i;
1762 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1763 if(tag == tags[i].tag)
1764 return tags[i].id;
1765 }
1766 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1767 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1768 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1769 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1770 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1771 return tags[i].id;
1772 }
1773 return CODEC_ID_NONE;
1774 }
1775
1776 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1777 {
1778 int i;
1779 for(i=0; tags && tags[i]; i++){
1780 int tag= codec_get_tag(tags[i], id);
1781 if(tag) return tag;
1782 }
1783 return 0;
1784 }
1785
1786 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1787 {
1788 int i;
1789 for(i=0; tags && tags[i]; i++){
1790 enum CodecID id= codec_get_id(tags[i], tag);
1791 if(id!=CODEC_ID_NONE) return id;
1792 }
1793 return CODEC_ID_NONE;
1794 }
1795
1796 /* absolute maximum size we read until we abort */
1797 #define MAX_READ_SIZE 5000000
1798
1799 #define MAX_STD_TIMEBASES (60*12+5)
1800 static int get_std_framerate(int i){
1801 if(i<60*12) return i*1001;
1802 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1803 }
1804
1805 /*
1806 * Is the time base unreliable.
1807 * This is a heuristic to balance between quick acceptance of the values in
1808 * the headers vs. some extra checks.
1809 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1810 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1811 * And there are "variable" fps files this needs to detect as well.
1812 */
1813 static int tb_unreliable(AVCodecContext *c){
1814 if( c->time_base.den >= 101L*c->time_base.num
1815 || c->time_base.den < 5L*c->time_base.num
1816 /* || c->codec_tag == ff_get_fourcc("DIVX")
1817 || c->codec_tag == ff_get_fourcc("XVID")*/
1818 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1819 return 1;
1820 return 0;
1821 }
1822
1823 int av_find_stream_info(AVFormatContext *ic)
1824 {
1825 int i, count, ret, read_size, j;
1826 AVStream *st;
1827 AVPacket pkt1, *pkt;
1828 int64_t last_dts[MAX_STREAMS];
1829 int duration_count[MAX_STREAMS]={0};
1830 double (*duration_error)[MAX_STD_TIMEBASES];
1831 offset_t old_offset = url_ftell(ic->pb);
1832 int64_t codec_info_duration[MAX_STREAMS]={0};
1833 int codec_info_nb_frames[MAX_STREAMS]={0};
1834 AVProbeData probe_data[MAX_STREAMS];
1835 int codec_identified[MAX_STREAMS]={0};
1836
1837 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1838 if (!duration_error) return AVERROR(ENOMEM);
1839
1840 for(i=0;i<ic->nb_streams;i++) {
1841 st = ic->streams[i];
1842 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1843 /* if(!st->time_base.num)
1844 st->time_base= */
1845 if(!st->codec->time_base.num)
1846 st->codec->time_base= st->time_base;
1847 }
1848 //only for the split stuff
1849 if (!st->parser) {
1850 st->parser = av_parser_init(st->codec->codec_id);
1851 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1852 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1853 }
1854 }
1855 }
1856
1857 for(i=0;i<MAX_STREAMS;i++){
1858 last_dts[i]= AV_NOPTS_VALUE;
1859 }
1860
1861 memset(probe_data, 0, sizeof(probe_data));
1862 count = 0;
1863 read_size = 0;
1864 for(;;) {
1865 /* check if one codec still needs to be handled */
1866 for(i=0;i<ic->nb_streams;i++) {
1867 st = ic->streams[i];
1868 if (!has_codec_parameters(st->codec))
1869 break;
1870 /* variable fps and no guess at the real fps */
1871 if( tb_unreliable(st->codec)
1872 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1873 break;
1874 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1875 break;
1876 if(st->first_dts == AV_NOPTS_VALUE)
1877 break;
1878 }
1879 if (i == ic->nb_streams) {
1880 /* NOTE: if the format has no header, then we need to read
1881 some packets to get most of the streams, so we cannot
1882 stop here */
1883 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1884 /* if we found the info for all the codecs, we can stop */
1885 ret = count;
1886 break;
1887 }
1888 }
1889 /* we did not get all the codec info, but we read too much data */
1890 if (read_size >= MAX_READ_SIZE) {
1891 ret = count;
1892 break;
1893 }
1894
1895 /* NOTE: a new stream can be added there if no header in file
1896 (AVFMTCTX_NOHEADER) */
1897 ret = av_read_frame_internal(ic, &pkt1);
1898 if (ret < 0) {
1899 /* EOF or error */
1900 ret = -1; /* we could not have all the codec parameters before EOF */
1901 for(i=0;i<ic->nb_streams;i++) {
1902 st = ic->streams[i];
1903 if (!has_codec_parameters(st->codec)){
1904 char buf[256];
1905 avcodec_string(buf, sizeof(buf), st->codec, 0);
1906 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1907 } else {
1908 ret = 0;
1909 }
1910 }
1911 break;
1912 }
1913
1914 pkt= add_to_pktbuf(ic, &pkt1);
1915 if(av_dup_packet(pkt) < 0)
1916 return AVERROR(ENOMEM);
1917
1918 read_size += pkt->size;
1919
1920 st = ic->streams[pkt->stream_index];
1921 if(codec_info_nb_frames[st->index]>1)
1922 codec_info_duration[st->index] += pkt->duration;
1923 if (pkt->duration != 0)
1924 codec_info_nb_frames[st->index]++;
1925
1926 {
1927 int index= pkt->stream_index;
1928 int64_t last= last_dts[index];
1929 int64_t duration= pkt->dts - last;
1930
1931 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1932 double dur= duration * av_q2d(st->time_base);
1933
1934 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1935 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1936 if(duration_count[index] < 2)
1937 memset(duration_error[index], 0, sizeof(*duration_error));
1938 for(i=1; i<MAX_STD_TIMEBASES; i++){
1939 int framerate= get_std_framerate(i);
1940 int ticks= lrintf(dur*framerate/(1001*12));
1941 double error= dur - ticks*1001*12/(double)framerate;
1942 duration_error[index][i] += error*error;
1943 }
1944 duration_count[index]++;
1945 }
1946 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1947 last_dts[pkt->stream_index]= pkt->dts;
1948
1949 if (st->codec->codec_id == CODEC_ID_NONE) {
1950 AVProbeData *pd = &(probe_data[st->index]);
1951 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
1952 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
1953 pd->buf_size += pkt->size;
1954 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
1955 }
1956 }
1957 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1958 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1959 if(i){
1960 st->codec->extradata_size= i;
1961 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1962 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1963 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1964 }
1965 }
1966
1967 /* if still no information, we try to open the codec and to
1968 decompress the frame. We try to avoid that in most cases as
1969 it takes longer and uses more memory. For MPEG-4, we need to
1970 decompress for QuickTime. */
1971 if (!has_codec_parameters(st->codec) /*&&
1972 (st->codec->codec_id == CODEC_ID_FLV1 ||
1973 st->codec->codec_id == CODEC_ID_H264 ||
1974 st->codec->codec_id == CODEC_ID_H263 ||
1975 st->codec->codec_id == CODEC_ID_H261 ||
1976 st->codec->codec_id == CODEC_ID_VORBIS ||
1977 st->codec->codec_id == CODEC_ID_MJPEG ||
1978 st->codec->codec_id == CODEC_ID_PNG ||
1979 st->codec->codec_id == CODEC_ID_PAM ||
1980 st->codec->codec_id == CODEC_ID_PGM ||
1981 st->codec->codec_id == CODEC_ID_PGMYUV ||
1982 st->codec->codec_id == CODEC_ID_PBM ||
1983 st->codec->codec_id == CODEC_ID_PPM ||
1984 st->codec->codec_id == CODEC_ID_SHORTEN ||
1985 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1986 try_decode_frame(st, pkt->data, pkt->size);
1987
1988 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1989 break;
1990 }
1991 count++;
1992 }
1993
1994 // close codecs which were opened in try_decode_frame()
1995 for(i=0;i<ic->nb_streams;i++) {
1996 st = ic->streams[i];
1997 if(st->codec->codec)
1998 avcodec_close(st->codec);
1999 }
2000 for(i=0;i<ic->nb_streams;i++) {
2001 st = ic->streams[i];
2002 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2003 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2004 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2005
2006 if(duration_count[i]
2007 && tb_unreliable(st->codec) /*&&
2008 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2009 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2010 double best_error= 2*av_q2d(st->time_base);
2011 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2012
2013 for(j=1; j<MAX_STD_TIMEBASES; j++){
2014 double error= duration_error[i][j] * get_std_framerate(j);
2015 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2016 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2017 if(error < best_error){
2018 best_error= error;
2019 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2020 }
2021 }
2022 }
2023
2024 if (!st->r_frame_rate.num){
2025 if( st->codec->time_base.den * (int64_t)st->time_base.num
2026 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2027 st->r_frame_rate.num = st->codec->time_base.den;
2028 st->r_frame_rate.den = st->codec->time_base.num;
2029 }else{
2030 st->r_frame_rate.num = st->time_base.den;
2031 st->r_frame_rate.den = st->time_base.num;
2032 }
2033 }
2034 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2035 if (st->codec->codec_id == CODEC_ID_NONE && probe_data[st->index].buf_size > 0) {
2036 codec_identified[st->index] = set_codec_from_probe_data(st, &(probe_data[st->index]), 1);
2037 if (codec_identified[st->index]) {
2038 st->need_parsing = AVSTREAM_PARSE_FULL;
2039 }
2040 }
2041 if(!st->codec->bits_per_sample)
2042 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2043 }
2044 }
2045
2046 av_estimate_timings(ic, old_offset);
2047
2048 for(i=0;i<ic->nb_streams;i++) {
2049 st = ic->streams[i];
2050 if (codec_identified[st->index])
2051 break;
2052 }
2053 //FIXME this is a mess
2054 if(i!=ic->nb_streams){
2055 av_read_frame_flush(ic);
2056 for(i=0;i<ic->nb_streams;i++) {
2057 st = ic->streams[i];
2058 if (codec_identified[st->index]) {
2059 av_seek_frame(ic, st->index, 0.0, 0);
2060 }
2061 st->cur_dts= st->first_dts;
2062 }
2063 url_fseek(ic->pb, ic->data_offset, SEEK_SET);
2064 }
2065
2066 #if 0
2067 /* correct DTS for B-frame streams with no timestamps */
2068 for(i=0;i<ic->nb_streams;i++) {
2069 st = ic->streams[i];
2070 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2071 if(b-frames){
2072 ppktl = &ic->packet_buffer;
2073 while(ppkt1){
2074 if(ppkt1->stream_index != i)
2075 continue;
2076 if(ppkt1->pkt->dts < 0)
2077 break;
2078 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2079 break;
2080 ppkt1->pkt->dts -= delta;
2081 ppkt1= ppkt1->next;
2082 }
2083 if(ppkt1)
2084 continue;
2085 st->cur_dts -= delta;
2086 }
2087 }
2088 }
2089 #endif
2090
2091 av_free(duration_error);
2092 for(i=0;i<MAX_STREAMS;i++){
2093 av_freep(&(probe_data[i].buf));
2094 }
2095
2096 return ret;
2097 }
2098
2099 /*******************************************************/
2100
2101 int av_read_play(AVFormatContext *s)
2102 {
2103 if (s->iformat->read_play)
2104 return s->iformat->read_play(s);
2105 if (s->pb)
2106 return av_url_read_fpause(s->pb, 0);
2107 return AVERROR(ENOSYS);
2108 }
2109
2110 int av_read_pause(AVFormatContext *s)
2111 {
2112 if (s->iformat->read_pause)
2113 return s->iformat->read_pause(s);
2114 if (s->pb)
2115 return av_url_read_fpause(s->pb, 1);
2116 return AVERROR(ENOSYS);
2117 }
2118
2119 void av_close_input_stream(AVFormatContext *s)
2120 {
2121 int i;
2122 AVStream *st;
2123
2124 /* free previous packet */
2125 if (s->cur_st && s->cur_st->parser)
2126 av_free_packet(&s->cur_pkt);
2127
2128 if (s->iformat->read_close)
2129 s->iformat->read_close(s);
2130 for(i=0;i<s->nb_streams;i++) {
2131 /* free all data in a stream component */
2132 st = s->streams[i];
2133 if (st->parser) {
2134 av_parser_close(st->parser);
2135 }
2136 av_free(st->index_entries);
2137 av_free(st->codec->extradata);
2138 av_free(st->codec);
2139 av_free(st->filename);
2140 av_free(st);
2141 }
2142 for(i=s->nb_programs-1; i>=0; i--) {
2143 av_freep(&s->programs[i]->provider_name);
2144 av_freep(&s->programs[i]->name);
2145 av_freep(&s->programs[i]->stream_index);
2146 av_freep(&s->programs[i]);
2147 }
2148 flush_packet_queue(s);
2149 av_freep(&s->priv_data);
2150 av_free(s);
2151 }
2152
2153 void av_close_input_file(AVFormatContext *s)
2154 {
2155 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2156 av_close_input_stream(s);
2157 if (pb)
2158 url_fclose(pb);
2159 }
2160
2161 AVStream *av_new_stream(AVFormatContext *s, int id)
2162 {
2163 AVStream *st;
2164 int i;
2165
2166 if (s->nb_streams >= MAX_STREAMS)
2167 return NULL;
2168
2169 st = av_mallocz(sizeof(AVStream));
2170 if (!st)
2171 return NULL;
2172
2173 st->codec= avcodec_alloc_context();
2174 if (s->iformat) {
2175 /* no default bitrate if decoding */
2176 st->codec->bit_rate = 0;
2177 }
2178 st->index = s->nb_streams;
2179 st->id = id;
2180 st->start_time = AV_NOPTS_VALUE;
2181 st->duration = AV_NOPTS_VALUE;
2182 st->cur_dts = AV_NOPTS_VALUE;
2183 st->first_dts = AV_NOPTS_VALUE;
2184
2185 /* default pts setting is MPEG-like */
2186 av_set_pts_info(st, 33, 1, 90000);
2187 st->last_IP_pts = AV_NOPTS_VALUE;
2188 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2189 st->pts_buffer[i]= AV_NOPTS_VALUE;
2190
2191 s->streams[s->nb_streams++] = st;
2192 return st;
2193 }
2194
2195 AVProgram *av_new_program(AVFormatContext *ac, int id)
2196 {
2197 AVProgram *program=NULL;
2198 int i;
2199
2200 #ifdef DEBUG_SI
2201 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2202 #endif
2203
2204 for(i=0; i<ac->nb_programs; i++)
2205 if(ac->programs[i]->id == id)
2206 program = ac->programs[i];
2207
2208 if(!program){
2209 program = av_mallocz(sizeof(AVProgram));
2210 if (!program)
2211 return NULL;
2212 dynarray_add(&ac->programs, &ac->nb_programs, program);
2213 program->discard = AVDISCARD_NONE;
2214 }
2215 program->id = id;
2216
2217 return program;
2218 }
2219
2220 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2221 {
2222 assert(!provider_name == !name);
2223 if(name){
2224 av_free(program->provider_name);
2225 av_free(program-> name);
2226 program->provider_name = av_strdup(provider_name);
2227 program-> name = av_strdup( name);
2228 }
2229 }
2230
2231
2232 /************************************************************/
2233 /* output media file */
2234
2235 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2236 {
2237 int ret;
2238
2239 if (s->oformat->priv_data_size > 0) {
2240 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2241 if (!s->priv_data)
2242 return AVERROR(ENOMEM);
2243 } else
2244 s->priv_data = NULL;
2245
2246 if (s->oformat->set_parameters) {
2247 ret = s->oformat->set_parameters(s, ap);
2248 if (ret < 0)
2249 return ret;
2250 }
2251 return 0;
2252 }
2253
2254 int av_write_header(AVFormatContext *s)
2255 {
2256 int ret, i;
2257 AVStream *st;
2258
2259 // some sanity checks
2260 for(i=0;i<s->nb_streams;i++) {
2261 st = s->streams[i];
2262
2263 switch (st->codec->codec_type) {
2264 case CODEC_TYPE_AUDIO:
2265 if(st->codec->sample_rate<=0){
2266 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2267 return -1;
2268 }
2269 break;
2270 case CODEC_TYPE_VIDEO:
2271 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2272 av_log(s, AV_LOG_ERROR, "time base not set\n");
2273 return -1;
2274 }
2275 if(st->codec->width<=0 || st->codec->height<=0){
2276 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2277 return -1;
2278 }
2279 break;
2280 }
2281
2282 if(s->oformat->codec_tag){
2283 if(st->codec->codec_tag){
2284 //FIXME
2285 //check that tag + id is in the table
2286 //if neither is in the table -> OK
2287 //if tag is in the table with another id -> FAIL
2288 //if id is in the table with another tag -> FAIL unless strict < ?
2289 }else
2290 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2291 }
2292 }
2293
2294 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2295 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2296 if (!s->priv_data)
2297 return AVERROR(ENOMEM);
2298 }
2299
2300 if(s->oformat->write_header){
2301 ret = s->oformat->write_header(s);
2302 if (ret < 0)
2303 return ret;
2304 }
2305
2306 /* init PTS generation */
2307 for(i=0;i<s->nb_streams;i++) {
2308 int64_t den = AV_NOPTS_VALUE;
2309 st = s->streams[i];
2310
2311 switch (st->codec->codec_type) {
2312 case CODEC_TYPE_AUDIO:
2313 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2314 break;
2315 case CODEC_TYPE_VIDEO:
2316 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2317 break;
2318 default:
2319 break;
2320 }
2321 if (den != AV_NOPTS_VALUE) {
2322 if (den <= 0)
2323 return AVERROR_INVALIDDATA;
2324 av_frac_init(&st->pts, 0, 0, den);
2325 }
2326 }
2327 return 0;
2328 }
2329
2330 //FIXME merge with compute_pkt_fields
2331 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2332 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2333 int num, den, frame_size, i;
2334
2335 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2336
2337 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2338 return -1;*/
2339
2340 /* duration field */
2341 if (pkt->duration == 0) {
2342 compute_frame_duration(&num, &den, st, NULL, pkt);
2343 if (den && num) {
2344 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2345 }
2346 }
2347
2348 //XXX/FIXME this is a temporary hack until all encoders output pts
2349 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2350 pkt->dts=
2351 // pkt->pts= st->cur_dts;
2352 pkt->pts= st->pts.val;
2353 }
2354
2355 //calculate dts from pts
2356 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2357 st->pts_buffer[0]= pkt->pts;
2358 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2359 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2360 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2361 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2362
2363 pkt->dts= st->pts_buffer[0];
2364 }
2365
2366 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2367 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2368 return -1;
2369 }
2370 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2371 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2372 return -1;
2373 }
2374
2375 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2376 st->cur_dts= pkt->dts;
2377 st->pts.val= pkt->dts;
2378
2379 /* update pts */
2380 switch (st->codec->codec_type) {
2381 case CODEC_TYPE_AUDIO:
2382 frame_size = get_audio_frame_size(st->codec, pkt->size);
2383
2384 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2385 likely equal to the encoder delay, but it would be better if we
2386 had the real timestamps from the encoder */
2387 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2388 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2389 }
2390 break;
2391 case CODEC_TYPE_VIDEO:
2392 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2393 break;
2394 default:
2395 break;
2396 }
2397 return 0;
2398 }
2399
2400 static void truncate_ts(AVStream *st, AVPacket *pkt){
2401 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2402
2403 // if(pkt->dts < 0)
2404 // pkt->dts= 0; //this happens for low_delay=0 and B-frames, FIXME, needs further investigation about what we should do here
2405
2406 if (pkt->pts != AV_NOPTS_VALUE)
2407 pkt->pts &= pts_mask;
2408 if (pkt->dts != AV_NOPTS_VALUE)
2409 pkt->dts &= pts_mask;
2410 }
2411
2412 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2413 {
2414 int ret;
2415
2416 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2417 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2418 return ret;
2419
2420 truncate_ts(s->streams[pkt->stream_index], pkt);
2421
2422 ret= s->oformat->write_packet(s, pkt);
2423 if(!ret)
2424 ret= url_ferror(s->pb);
2425 return ret;
2426 }
2427
2428 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2429 AVPacketList *pktl, **next_point, *this_pktl;
2430 int stream_count=0;
2431 int streams[MAX_STREAMS];
2432
2433 if(pkt){
2434 AVStream *st= s->streams[ pkt->stream_index];
2435
2436 // assert(pkt->destruct != av_destruct_packet); //FIXME
2437
2438 this_pktl = av_mallocz(sizeof(AVPacketList));
2439 this_pktl->pkt= *pkt;
2440 if(pkt->destruct == av_destruct_packet)
2441 pkt->destruct= NULL; // not shared -> must keep original from being freed
2442 else
2443 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2444
2445 next_point = &s->packet_buffer;
2446 while(*next_point){
2447 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2448 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2449 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2450 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2451 break;
2452 next_point= &(*next_point)->next;
2453 }
2454 this_pktl->next= *next_point;
2455 *next_point= this_pktl;
2456 }
2457
2458 memset(streams, 0, sizeof(streams));
2459 pktl= s->packet_buffer;
2460 while(pktl){
2461 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2462 if(streams[ pktl->pkt.stream_index ] == 0)
2463 stream_count++;
2464 streams[ pktl->pkt.stream_index ]++;
2465 pktl= pktl->next;
2466 }
2467
2468 if(s->nb_streams == stream_count || (flush && stream_count)){
2469 pktl= s->packet_buffer;
2470 *out= pktl->pkt;
2471
2472 s->packet_buffer= pktl->next;
2473 av_freep(&pktl);
2474 return 1;
2475 }else{
2476 av_init_packet(out);
2477 return 0;
2478 }
2479 }
2480
2481 /**
2482 * Interleaves an AVPacket correctly so it can be muxed.
2483 * @param out the interleaved packet will be output here
2484 * @param in the input packet
2485 * @param flush 1 if no further packets are available as input and all
2486 * remaining packets should be output
2487 * @return 1 if a packet was output, 0 if no packet could be output,
2488 * < 0 if an error occurred
2489 */
2490 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2491 if(s->oformat->interleave_packet)
2492 return s->oformat->interleave_packet(s, out, in, flush);
2493 else
2494 return av_interleave_packet_per_dts(s, out, in, flush);
2495 }
2496
2497 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2498 AVStream *st= s->streams[ pkt->stream_index];
2499
2500 //FIXME/XXX/HACK drop zero sized packets
2501 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2502 return 0;
2503
2504 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2505 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2506 return -1;
2507
2508 if(pkt->dts == AV_NOPTS_VALUE)
2509 return -1;
2510
2511 for(;;){
2512 AVPacket opkt;
2513 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2514 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2515 return ret;
2516
2517 truncate_ts(s->streams[opkt.stream_index], &opkt);
2518 ret= s->oformat->write_packet(s, &opkt);
2519
2520 av_free_packet(&opkt);
2521 pkt= NULL;
2522
2523 if(ret<0)
2524 return ret;
2525 if(url_ferror(s->pb))
2526 return url_ferror(s->pb);
2527 }
2528 }
2529
2530 int av_write_trailer(AVFormatContext *s)
2531 {
2532 int ret, i;
2533
2534 for(;;){
2535 AVPacket pkt;
2536 ret= av_interleave_packet(s, &pkt, NULL, 1);
2537 if(ret<0) //FIXME cleanup needed for ret<0 ?
2538 goto fail;
2539 if(!ret)
2540 break;
2541
2542 truncate_ts(s->streams[pkt.stream_index], &pkt);
2543 ret= s->oformat->write_packet(s, &pkt);
2544
2545 av_free_packet(&pkt);
2546
2547 if(ret<0)
2548 goto fail;
2549 if(url_ferror(s->pb))
2550 goto fail;
2551 }
2552
2553 if(s->oformat->write_trailer)
2554 ret = s->oformat->write_trailer(s);
2555 fail:
2556 if(ret == 0)
2557 ret=url_ferror(s->pb);
2558 for(i=0;i<s->nb_streams;i++)
2559 av_freep(&s->streams[i]->priv_data);
2560 av_freep(&s->priv_data);
2561 return ret;
2562 }
2563
2564 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2565 {
2566 int i, j;
2567 AVProgram *program=NULL;
2568 void *tmp;
2569
2570 for(i=0; i<ac->nb_programs; i++){
2571 if(ac->programs[i]->id != progid)
2572 continue;
2573 program = ac->programs[i];
2574 for(j=0; j<program->nb_stream_indexes; j++)
2575 if(program->stream_index[j] == idx)
2576 return;
2577
2578 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2579 if(!tmp)
2580 return;
2581 program->stream_index = tmp;
2582 program->stream_index[program->nb_stream_indexes++] = idx;
2583 return;
2584 }
2585 }
2586
2587 /* "user interface" functions */
2588 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2589 {
2590 char buf[256];
2591 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2592 AVStream *st = ic->streams[i];
2593 int g = ff_gcd(st->time_base.num, st->time_base.den);
2594 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2595 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2596 /* the pid is an important information, so we display it */
2597 /* XXX: add a generic system */
2598 if (flags & AVFMT_SHOW_IDS)
2599 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2600 if (strlen(st->language) > 0)
2601 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2602 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2603 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2604 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2605 if(st->r_frame_rate.den && st->r_frame_rate.num)
2606 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2607 /* else if(st->time_base.den && st->time_base.num)
2608 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2609 else
2610 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2611 }
2612 av_log(NULL, AV_LOG_INFO, "\n");
2613 }
2614
2615 void dump_format(AVFormatContext *ic,
2616 int index,
2617 const char *url,
2618 int is_output)
2619 {
2620 int i;
2621
2622 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2623 is_output ? "Output" : "Input",
2624 index,
2625 is_output ? ic->oformat->name : ic->iformat->name,
2626 is_output ? "to" : "from", url);
2627 if (!is_output) {
2628 av_log(NULL, AV_LOG_INFO, " Duration: ");
2629 if (ic->duration != AV_NOPTS_VALUE) {
2630 int hours, mins, secs, us;
2631 secs = ic->duration / AV_TIME_BASE;
2632 us = ic->duration % AV_TIME_BASE;
2633 mins = secs / 60;
2634 secs %= 60;
2635 hours = mins / 60;
2636 mins %= 60;
2637 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2638 (100 * us) / AV_TIME_BASE);
2639 } else {
2640 av_log(NULL, AV_LOG_INFO, "N/A");
2641 }
2642 if (ic->start_time != AV_NOPTS_VALUE) {
2643 int secs, us;
2644 av_log(NULL, AV_LOG_INFO, ", start: ");
2645 secs = ic->start_time / AV_TIME_BASE;
2646 us = ic->start_time % AV_TIME_BASE;
2647 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2648 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2649 }
2650 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2651 if (ic->bit_rate) {
2652 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2653 } else {
2654 av_log(NULL, AV_LOG_INFO, "N/A");
2655 }
2656 av_log(NULL, AV_LOG_INFO, "\n");
2657 }
2658 if(ic->nb_programs) {
2659 int j, k;
2660 for(j=0; j<ic->nb_programs; j++) {
2661 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2662 ic->programs[j]->name ? ic->programs[j]->name : "");
2663 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2664 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2665 }
2666 } else
2667 for(i=0;i<ic->nb_streams;i++)
2668 dump_stream_format(ic, i, index, is_output);
2669 }
2670
2671 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2672 {
2673 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2674 }
2675
2676 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2677 {
2678 AVRational frame_rate;
2679 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2680 *frame_rate_num= frame_rate.num;
2681 *frame_rate_den= frame_rate.den;
2682 return ret;
2683 }
2684
2685 /**
2686 * Gets the current time in microseconds.
2687 */
2688 int64_t av_gettime(void)
2689 {
2690 struct timeval tv;
2691 gettimeofday(&tv,NULL);
2692 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2693 }
2694
2695 int64_t parse_date(const char *datestr, int duration)
2696 {
2697 const char *p;
2698 int64_t t;
2699 struct tm dt;
2700 int i;
2701 static const char *date_fmt[] = {
2702 "%Y-%m-%d",
2703 "%Y%m%d",
2704 };
2705 static const char *time_fmt[] = {
2706 "%H:%M:%S",
2707 "%H%M%S",
2708 };
2709 const char *q;
2710 int is_utc, len;
2711 char lastch;
2712 int negative = 0;
2713
2714 #undef time
2715 time_t now = time(0);
2716
2717 len = strlen(datestr);
2718 if (len > 0)
2719 lastch = datestr[len - 1];
2720 else
2721 lastch = '\0';
2722 is_utc = (lastch == 'z' || lastch == 'Z');
2723
2724 memset(&dt, 0, sizeof(dt));
2725
2726 p = datestr;
2727 q = NULL;
2728 if (!duration) {
2729 /* parse the year-month-day part */
2730 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2731 q = small_strptime(p, date_fmt[i], &dt);
2732 if (q) {
2733 break;
2734 }
2735 }
2736
2737 /* if the year-month-day part is missing, then take the
2738 * current year-month-day time */
2739 if (!q) {
2740 if (is_utc) {
2741 dt = *gmtime(&now);
2742 } else {
2743 dt = *localtime(&now);
2744 }
2745 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2746 } else {
2747 p = q;
2748 }
2749
2750 if (*p == 'T' || *p == 't' || *p == ' ')
2751 p++;
2752
2753 /* parse the hour-minute-second part */
2754 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2755 q = small_strptime(p, time_fmt[i], &dt);
2756 if (q) {
2757 break;
2758 }
2759 }
2760 } else {
2761 /* parse datestr as a duration */
2762 if (p[0] == '-') {
2763 negative = 1;
2764 ++p;
2765 }
2766 /* parse datestr as HH:MM:SS */
2767 q = small_strptime(p, time_fmt[0], &dt);
2768 if (!q) {
2769 /* parse datestr as S+ */
2770 dt.tm_sec = strtol(p, (char **)&q, 10);
2771 if (q == p)
2772 /* the parsing didn't succeed */
2773 return INT64_MIN;
2774 dt.tm_min = 0;
2775 dt.tm_hour = 0;
2776 }
2777 }
2778
2779 /* Now we have all the fields that we can get */
2780 if (!q) {
2781 return INT64_MIN;
2782 }
2783
2784 if (duration) {
2785 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2786 } else {
2787 dt.tm_isdst = -1; /* unknown */
2788 if (is_utc) {
2789 t = mktimegm(&dt);
2790 } else {
2791 t = mktime(&dt);
2792 }
2793 }
2794
2795 t *= 1000000;
2796
2797 /* parse the .m... part */
2798 if (*q == '.') {
2799 int val, n;
2800 q++;
2801 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2802 if (!isdigit(*q))
2803 break;
2804 val += n * (*q - '0');
2805 }
2806 t += val;
2807 }
2808 return negative ? -t : t;
2809 }
2810
2811 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2812 {
2813 const char *p;
2814 char tag[128], *q;
2815
2816 p = info;
2817 if (*p == '?')
2818 p++;
2819 for(;;) {
2820 q = tag;
2821 while (*p != '\0' && *p != '=' && *p != '&') {
2822 if ((q - tag) < sizeof(tag) - 1)
2823 *q++ = *p;
2824 p++;
2825 }
2826 *q = '\0';
2827 q = arg;
2828 if (*p == '=') {
2829 p++;
2830 while (*p != '&' && *p != '\0') {
2831 if ((q - arg) < arg_size - 1) {
2832 if (*p == '+')
2833 *q++ = ' ';
2834 else
2835 *q++ = *p;
2836 }
2837 p++;
2838 }
2839 *q = '\0';
2840 }
2841 if (!strcmp(tag, tag1))
2842 return 1;
2843 if (*p != '&')
2844 break;
2845 p++;
2846 }
2847 return 0;
2848 }
2849
2850 int av_get_frame_filename(char *buf, int buf_size,
2851 const char *path, int number)
2852 {
2853 const char *p;
2854 char *q, buf1[20], c;
2855 int nd, len, percentd_found;
2856
2857 q = buf;
2858 p = path;
2859 percentd_found = 0;
2860 for(;;) {
2861 c = *p++;
2862 if (c == '\0')
2863 break;
2864 if (c == '%') {
2865 do {
2866 nd = 0;
2867 while (isdigit(*p)) {
2868 nd = nd * 10 + *p++ - '0';
2869 }
2870 c = *p++;
2871 } while (isdigit(c));
2872
2873 switch(c) {
2874 case '%':
2875 goto addchar;
2876 case 'd':
2877 if (percentd_found)
2878 goto fail;
2879 percentd_found = 1;
2880 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2881 len = strlen(buf1);
2882 if ((q - buf + len) > buf_size - 1)
2883 goto fail;
2884 memcpy(q, buf1, len);
2885 q += len;
2886 break;
2887 default:
2888 goto fail;
2889 }
2890 } else {
2891 addchar:
2892 if ((q - buf) < buf_size - 1)
2893 *q++ = c;
2894 }
2895 }
2896 if (!percentd_found)
2897 goto fail;
2898 *q = '\0';
2899 return 0;
2900 fail:
2901 *q = '\0';
2902 return -1;
2903 }
2904
2905 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
2906 {
2907 int len, i, j, c;
2908 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2909
2910 for(i=0;i<size;i+=16) {
2911 len = size - i;
2912 if (len > 16)
2913 len = 16;
2914 PRINT("%08x ", i);
2915 for(j=0;j<16;j++) {
2916 if (j < len)
2917 PRINT(" %02x", buf[i+j]);
2918 else
2919 PRINT(" ");
2920 }
2921 PRINT(" ");
2922 for(j=0;j<len;j++) {
2923 c = buf[i+j];
2924 if (c < ' ' || c > '~')
2925 c = '.';
2926 PRINT("%c", c);
2927 }
2928 PRINT("\n");
2929 }
2930 #undef PRINT
2931 }
2932
2933 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2934 {
2935 hex_dump_internal(NULL, f, 0, buf, size);
2936 }
2937
2938 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
2939 {
2940 hex_dump_internal(avcl, NULL, level, buf, size);
2941 }
2942
2943 //FIXME needs to know the time_base
2944 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
2945 {
2946 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2947 PRINT("stream #%d:\n", pkt->stream_index);
2948 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2949 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2950 /* DTS is _always_ valid after av_read_frame() */
2951 PRINT(" dts=");
2952 if (pkt->dts == AV_NOPTS_VALUE)
2953 PRINT("N/A");
2954 else
2955 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
2956 /* PTS may not be known if B-frames are present. */
2957 PRINT(" pts=");
2958 if (pkt->pts == AV_NOPTS_VALUE)
2959 PRINT("N/A");
2960 else
2961 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
2962 PRINT("\n");
2963 PRINT(" size=%d\n", pkt->size);
2964 #undef PRINT
2965 if (dump_payload)
2966 av_hex_dump(f, pkt->data, pkt->size);
2967 }
2968
2969 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2970 {
2971 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
2972 }
2973
2974 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
2975 {
2976 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
2977 }
2978
2979 void url_split(char *proto, int proto_size,
2980 char *authorization, int authorization_size,
2981 char *hostname, int hostname_size,
2982 int *port_ptr,
2983 char *path, int path_size,
2984 const char *url)
2985 {
2986 const char *p, *ls, *at, *col, *brk;
2987
2988 if (port_ptr) *port_ptr = -1;
2989 if (proto_size > 0) proto[0] = 0;
2990 if (authorization_size > 0) authorization[0] = 0;
2991 if (hostname_size > 0) hostname[0] = 0;
2992 if (path_size > 0) path[0] = 0;
2993
2994 /* parse protocol */
2995 if ((p = strchr(url, ':'))) {
2996 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2997 p++; /* skip ':' */
2998 if (*p == '/') p++;
2999 if (*p == '/') p++;
3000 } else {
3001 /* no protocol means plain filename */
3002 av_strlcpy(path, url, path_size);
3003 return;
3004 }
3005
3006 /* separate path from hostname */
3007 ls = strchr(p, '/');
3008 if(!ls)
3009 ls = strchr(p, '?');
3010 if(ls)
3011 av_strlcpy(path, ls, path_size);
3012 else
3013 ls = &p[strlen(p)]; // XXX
3014
3015 /* the rest is hostname, use that to parse auth/port */
3016 if (ls != p) {
3017 /* authorization (user[:pass]@hostname) */
3018 if ((at = strchr(p, '@')) && at < ls) {
3019 av_strlcpy(authorization, p,
3020 FFMIN(authorization_size, at + 1 - p));
3021 p = at + 1; /* skip '@' */
3022 }
3023
3024 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3025 /* [host]:port */
3026 av_strlcpy(hostname, p + 1,
3027 FFMIN(hostname_size, brk - p));
3028 if (brk[1] == ':' && port_ptr)
3029 *port_ptr = atoi(brk + 2);
3030 } else if ((col = strchr(p, ':')) && col < ls) {
3031 av_strlcpy(hostname, p,
3032 FFMIN(col + 1 - p, hostname_size));
3033 if (port_ptr) *port_ptr = atoi(col + 1);
3034 } else
3035 av_strlcpy(hostname, p,
3036 FFMIN(ls + 1 - p, hostname_size));
3037 }
3038 }
3039
3040 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3041 int pts_num, int pts_den)
3042 {
3043 s->pts_wrap_bits = pts_wrap_bits;
3044 s->time_base.num = pts_num;
3045 s->time_base.den = pts_den;
3046 }
3047
3048 /* fraction handling */
3049
3050 /**
3051 * f = val + (num / den) + 0.5.
3052 *
3053 * 'num' is normalized so that it is such as 0 <= num < den.
3054 *
3055 * @param f fractional number
3056 * @param val integer value
3057 * @param num must be >= 0
3058 * @param den must be >= 1
3059 */
3060 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3061 {
3062 num += (den >> 1);
3063 if (num >= den) {
3064 val += num / den;
3065 num = num % den;
3066 }
3067 f->val = val;
3068 f->num = num;
3069 f->den = den;
3070 }
3071
3072 /**
3073 * Fractional addition to f: f = f + (incr / f->den).
3074 *
3075 * @param f fractional number
3076 * @param incr increment, can be positive or negative
3077 */
3078 static void av_frac_add(AVFrac *f, int64_t incr)
3079 {
3080 int64_t num, den;
3081
3082 num = f->num + incr;
3083 den = f->den;
3084 if (num < 0) {
3085 f->val += num / den;
3086 num = num % den;
3087 if (num < 0) {
3088 num += den;
3089 f->val--;
3090 }
3091 } else if (num >= den) {
3092 f->val += num / den;
3093 num = num % den;
3094 }
3095 f->num = num;
3096 }