remove useless title check
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "libavcodec/opt.h"
23 #include "libavutil/avstring.h"
24 #include "riff.h"
25 #include <sys/time.h>
26 #include <time.h>
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 /**
32 * @file libavformat/utils.c
33 * various utility functions for use within FFmpeg
34 */
35
36 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
37 static void av_frac_add(AVFrac *f, int64_t incr);
38
39 /** head of registered input format linked list */
40 AVInputFormat *first_iformat = NULL;
41 /** head of registered output format linked list */
42 AVOutputFormat *first_oformat = NULL;
43
44 AVInputFormat *av_iformat_next(AVInputFormat *f)
45 {
46 if(f) return f->next;
47 else return first_iformat;
48 }
49
50 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
51 {
52 if(f) return f->next;
53 else return first_oformat;
54 }
55
56 void av_register_input_format(AVInputFormat *format)
57 {
58 AVInputFormat **p;
59 p = &first_iformat;
60 while (*p != NULL) p = &(*p)->next;
61 *p = format;
62 format->next = NULL;
63 }
64
65 void av_register_output_format(AVOutputFormat *format)
66 {
67 AVOutputFormat **p;
68 p = &first_oformat;
69 while (*p != NULL) p = &(*p)->next;
70 *p = format;
71 format->next = NULL;
72 }
73
74 int match_ext(const char *filename, const char *extensions)
75 {
76 const char *ext, *p;
77 char ext1[32], *q;
78
79 if(!filename)
80 return 0;
81
82 ext = strrchr(filename, '.');
83 if (ext) {
84 ext++;
85 p = extensions;
86 for(;;) {
87 q = ext1;
88 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
89 *q++ = *p++;
90 *q = '\0';
91 if (!strcasecmp(ext1, ext))
92 return 1;
93 if (*p == '\0')
94 break;
95 p++;
96 }
97 }
98 return 0;
99 }
100
101 AVOutputFormat *guess_format(const char *short_name, const char *filename,
102 const char *mime_type)
103 {
104 AVOutputFormat *fmt, *fmt_found;
105 int score_max, score;
106
107 /* specific test for image sequences */
108 #ifdef CONFIG_IMAGE2_MUXER
109 if (!short_name && filename &&
110 av_filename_number_test(filename) &&
111 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
112 return guess_format("image2", NULL, NULL);
113 }
114 #endif
115 /* Find the proper file type. */
116 fmt_found = NULL;
117 score_max = 0;
118 fmt = first_oformat;
119 while (fmt != NULL) {
120 score = 0;
121 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
122 score += 100;
123 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
124 score += 10;
125 if (filename && fmt->extensions &&
126 match_ext(filename, fmt->extensions)) {
127 score += 5;
128 }
129 if (score > score_max) {
130 score_max = score;
131 fmt_found = fmt;
132 }
133 fmt = fmt->next;
134 }
135 return fmt_found;
136 }
137
138 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
139 const char *mime_type)
140 {
141 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
142
143 if (fmt) {
144 AVOutputFormat *stream_fmt;
145 char stream_format_name[64];
146
147 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
148 stream_fmt = guess_format(stream_format_name, NULL, NULL);
149
150 if (stream_fmt)
151 fmt = stream_fmt;
152 }
153
154 return fmt;
155 }
156
157 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
158 const char *filename, const char *mime_type, enum CodecType type){
159 if(type == CODEC_TYPE_VIDEO){
160 enum CodecID codec_id= CODEC_ID_NONE;
161
162 #ifdef CONFIG_IMAGE2_MUXER
163 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
164 codec_id= av_guess_image2_codec(filename);
165 }
166 #endif
167 if(codec_id == CODEC_ID_NONE)
168 codec_id= fmt->video_codec;
169 return codec_id;
170 }else if(type == CODEC_TYPE_AUDIO)
171 return fmt->audio_codec;
172 else
173 return CODEC_ID_NONE;
174 }
175
176 AVInputFormat *av_find_input_format(const char *short_name)
177 {
178 AVInputFormat *fmt;
179 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
180 if (!strcmp(fmt->name, short_name))
181 return fmt;
182 }
183 return NULL;
184 }
185
186 /* memory handling */
187
188 void av_destruct_packet(AVPacket *pkt)
189 {
190 av_free(pkt->data);
191 pkt->data = NULL; pkt->size = 0;
192 }
193
194 void av_init_packet(AVPacket *pkt)
195 {
196 pkt->pts = AV_NOPTS_VALUE;
197 pkt->dts = AV_NOPTS_VALUE;
198 pkt->pos = -1;
199 pkt->duration = 0;
200 pkt->flags = 0;
201 pkt->stream_index = 0;
202 pkt->destruct= av_destruct_packet_nofree;
203 }
204
205 int av_new_packet(AVPacket *pkt, int size)
206 {
207 uint8_t *data;
208 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
209 return AVERROR(ENOMEM);
210 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
211 if (!data)
212 return AVERROR(ENOMEM);
213 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
214
215 av_init_packet(pkt);
216 pkt->data = data;
217 pkt->size = size;
218 pkt->destruct = av_destruct_packet;
219 return 0;
220 }
221
222 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
223 {
224 int ret= av_new_packet(pkt, size);
225
226 if(ret<0)
227 return ret;
228
229 pkt->pos= url_ftell(s);
230
231 ret= get_buffer(s, pkt->data, size);
232 if(ret<=0)
233 av_free_packet(pkt);
234 else
235 pkt->size= ret;
236
237 return ret;
238 }
239
240 int av_dup_packet(AVPacket *pkt)
241 {
242 if (pkt->destruct != av_destruct_packet) {
243 uint8_t *data;
244 /* We duplicate the packet and don't forget to add the padding again. */
245 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
246 return AVERROR(ENOMEM);
247 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
248 if (!data) {
249 return AVERROR(ENOMEM);
250 }
251 memcpy(data, pkt->data, pkt->size);
252 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
253 pkt->data = data;
254 pkt->destruct = av_destruct_packet;
255 }
256 return 0;
257 }
258
259 int av_filename_number_test(const char *filename)
260 {
261 char buf[1024];
262 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
263 }
264
265 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
266 {
267 AVInputFormat *fmt1, *fmt;
268 int score;
269
270 fmt = NULL;
271 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
272 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
273 continue;
274 score = 0;
275 if (fmt1->read_probe) {
276 score = fmt1->read_probe(pd);
277 } else if (fmt1->extensions) {
278 if (match_ext(pd->filename, fmt1->extensions)) {
279 score = 50;
280 }
281 }
282 if (score > *score_max) {
283 *score_max = score;
284 fmt = fmt1;
285 }else if (score == *score_max)
286 fmt = NULL;
287 }
288 return fmt;
289 }
290
291 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
292 int score=0;
293 return av_probe_input_format2(pd, is_opened, &score);
294 }
295
296 /************************************************************/
297 /* input media file */
298
299 /**
300 * Open a media file from an IO stream. 'fmt' must be specified.
301 */
302 static const char* format_to_name(void* ptr)
303 {
304 AVFormatContext* fc = (AVFormatContext*) ptr;
305 if(fc->iformat) return fc->iformat->name;
306 else if(fc->oformat) return fc->oformat->name;
307 else return "NULL";
308 }
309
310 #define OFFSET(x) offsetof(AVFormatContext,x)
311 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
312 //these names are too long to be readable
313 #define E AV_OPT_FLAG_ENCODING_PARAM
314 #define D AV_OPT_FLAG_DECODING_PARAM
315
316 static const AVOption options[]={
317 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
318 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
319 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
320 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
321 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
322 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
323 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
324 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
325 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
326 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
327 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
328 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
329 {NULL},
330 };
331
332 #undef E
333 #undef D
334 #undef DEFAULT
335
336 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
337
338 static void avformat_get_context_defaults(AVFormatContext *s)
339 {
340 memset(s, 0, sizeof(AVFormatContext));
341
342 s->av_class = &av_format_context_class;
343
344 av_opt_set_defaults(s);
345 }
346
347 AVFormatContext *av_alloc_format_context(void)
348 {
349 AVFormatContext *ic;
350 ic = av_malloc(sizeof(AVFormatContext));
351 if (!ic) return ic;
352 avformat_get_context_defaults(ic);
353 ic->av_class = &av_format_context_class;
354 return ic;
355 }
356
357 int av_open_input_stream(AVFormatContext **ic_ptr,
358 ByteIOContext *pb, const char *filename,
359 AVInputFormat *fmt, AVFormatParameters *ap)
360 {
361 int err;
362 AVFormatContext *ic;
363 AVFormatParameters default_ap;
364
365 if(!ap){
366 ap=&default_ap;
367 memset(ap, 0, sizeof(default_ap));
368 }
369
370 if(!ap->prealloced_context)
371 ic = av_alloc_format_context();
372 else
373 ic = *ic_ptr;
374 if (!ic) {
375 err = AVERROR(ENOMEM);
376 goto fail;
377 }
378 ic->iformat = fmt;
379 ic->pb = pb;
380 ic->duration = AV_NOPTS_VALUE;
381 ic->start_time = AV_NOPTS_VALUE;
382 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
383
384 /* allocate private data */
385 if (fmt->priv_data_size > 0) {
386 ic->priv_data = av_mallocz(fmt->priv_data_size);
387 if (!ic->priv_data) {
388 err = AVERROR(ENOMEM);
389 goto fail;
390 }
391 } else {
392 ic->priv_data = NULL;
393 }
394
395 err = ic->iformat->read_header(ic, ap);
396 if (err < 0)
397 goto fail;
398
399 if (pb && !ic->data_offset)
400 ic->data_offset = url_ftell(ic->pb);
401
402 *ic_ptr = ic;
403 return 0;
404 fail:
405 if (ic) {
406 av_freep(&ic->priv_data);
407 }
408 av_free(ic);
409 *ic_ptr = NULL;
410 return err;
411 }
412
413 /** size of probe buffer, for guessing file type from file contents */
414 #define PROBE_BUF_MIN 2048
415 #define PROBE_BUF_MAX (1<<20)
416
417 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
418 AVInputFormat *fmt,
419 int buf_size,
420 AVFormatParameters *ap)
421 {
422 int err, probe_size;
423 AVProbeData probe_data, *pd = &probe_data;
424 ByteIOContext *pb = NULL;
425
426 pd->filename = "";
427 if (filename)
428 pd->filename = filename;
429 pd->buf = NULL;
430 pd->buf_size = 0;
431
432 if (!fmt) {
433 /* guess format if no file can be opened */
434 fmt = av_probe_input_format(pd, 0);
435 }
436
437 /* Do not open file if the format does not need it. XXX: specific
438 hack needed to handle RTSP/TCP */
439 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
440 /* if no file needed do not try to open one */
441 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
442 goto fail;
443 }
444 if (buf_size > 0) {
445 url_setbufsize(pb, buf_size);
446 }
447
448 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
449 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
450 /* read probe data */
451 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
452 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
453 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
454 if (url_fseek(pb, 0, SEEK_SET) < 0) {
455 url_fclose(pb);
456 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
457 pb = NULL;
458 err = AVERROR(EIO);
459 goto fail;
460 }
461 }
462 /* guess file format */
463 fmt = av_probe_input_format2(pd, 1, &score);
464 }
465 av_freep(&pd->buf);
466 }
467
468 /* if still no format found, error */
469 if (!fmt) {
470 err = AVERROR_NOFMT;
471 goto fail;
472 }
473
474 /* check filename in case an image number is expected */
475 if (fmt->flags & AVFMT_NEEDNUMBER) {
476 if (!av_filename_number_test(filename)) {
477 err = AVERROR_NUMEXPECTED;
478 goto fail;
479 }
480 }
481 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
482 if (err)
483 goto fail;
484 return 0;
485 fail:
486 av_freep(&pd->buf);
487 if (pb)
488 url_fclose(pb);
489 *ic_ptr = NULL;
490 return err;
491
492 }
493
494 /*******************************************************/
495
496 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
497 {
498 int ret;
499 AVStream *st;
500 av_init_packet(pkt);
501 ret= s->iformat->read_packet(s, pkt);
502 if (ret < 0)
503 return ret;
504 st= s->streams[pkt->stream_index];
505
506 switch(st->codec->codec_type){
507 case CODEC_TYPE_VIDEO:
508 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
509 break;
510 case CODEC_TYPE_AUDIO:
511 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
512 break;
513 case CODEC_TYPE_SUBTITLE:
514 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
515 break;
516 }
517
518 return ret;
519 }
520
521 /**********************************************************/
522
523 /**
524 * Get the number of samples of an audio frame. Return -1 on error.
525 */
526 static int get_audio_frame_size(AVCodecContext *enc, int size)
527 {
528 int frame_size;
529
530 if (enc->frame_size <= 1) {
531 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
532
533 if (bits_per_sample) {
534 if (enc->channels == 0)
535 return -1;
536 frame_size = (size << 3) / (bits_per_sample * enc->channels);
537 } else {
538 /* used for example by ADPCM codecs */
539 if (enc->bit_rate == 0)
540 return -1;
541 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
542 }
543 } else {
544 frame_size = enc->frame_size;
545 }
546 return frame_size;
547 }
548
549
550 /**
551 * Return the frame duration in seconds. Return 0 if not available.
552 */
553 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
554 AVCodecParserContext *pc, AVPacket *pkt)
555 {
556 int frame_size;
557
558 *pnum = 0;
559 *pden = 0;
560 switch(st->codec->codec_type) {
561 case CODEC_TYPE_VIDEO:
562 if(st->time_base.num*1000LL > st->time_base.den){
563 *pnum = st->time_base.num;
564 *pden = st->time_base.den;
565 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
566 *pnum = st->codec->time_base.num;
567 *pden = st->codec->time_base.den;
568 if (pc && pc->repeat_pict) {
569 *pden *= 2;
570 *pnum = (*pnum) * (2 + pc->repeat_pict);
571 }
572 }
573 break;
574 case CODEC_TYPE_AUDIO:
575 frame_size = get_audio_frame_size(st->codec, pkt->size);
576 if (frame_size < 0)
577 break;
578 *pnum = frame_size;
579 *pden = st->codec->sample_rate;
580 break;
581 default:
582 break;
583 }
584 }
585
586 static int is_intra_only(AVCodecContext *enc){
587 if(enc->codec_type == CODEC_TYPE_AUDIO){
588 return 1;
589 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
590 switch(enc->codec_id){
591 case CODEC_ID_MJPEG:
592 case CODEC_ID_MJPEGB:
593 case CODEC_ID_LJPEG:
594 case CODEC_ID_RAWVIDEO:
595 case CODEC_ID_DVVIDEO:
596 case CODEC_ID_HUFFYUV:
597 case CODEC_ID_FFVHUFF:
598 case CODEC_ID_ASV1:
599 case CODEC_ID_ASV2:
600 case CODEC_ID_VCR1:
601 return 1;
602 default: break;
603 }
604 }
605 return 0;
606 }
607
608 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
609 int64_t dts, int64_t pts)
610 {
611 AVStream *st= s->streams[stream_index];
612 AVPacketList *pktl= s->packet_buffer;
613
614 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)
615 return;
616
617 st->first_dts= dts - st->cur_dts;
618 st->cur_dts= dts;
619
620 for(; pktl; pktl= pktl->next){
621 if(pktl->pkt.stream_index != stream_index)
622 continue;
623 //FIXME think more about this check
624 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
625 pktl->pkt.pts += st->first_dts;
626
627 if(pktl->pkt.dts != AV_NOPTS_VALUE)
628 pktl->pkt.dts += st->first_dts;
629
630 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
631 st->start_time= pktl->pkt.pts;
632 }
633 if (st->start_time == AV_NOPTS_VALUE)
634 st->start_time = pts;
635 }
636
637 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
638 {
639 AVPacketList *pktl= s->packet_buffer;
640
641 assert(pkt->duration && !st->cur_dts);
642
643 for(; pktl; pktl= pktl->next){
644 if(pktl->pkt.stream_index != pkt->stream_index)
645 continue;
646 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
647 && !pktl->pkt.duration){
648 pktl->pkt.pts= pktl->pkt.dts= st->cur_dts;
649 st->cur_dts += pkt->duration;
650 pktl->pkt.duration= pkt->duration;
651 }else
652 break;
653 }
654 }
655
656 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
657 AVCodecParserContext *pc, AVPacket *pkt)
658 {
659 int num, den, presentation_delayed, delay, i;
660 int64_t offset;
661
662 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
663 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
664 pkt->dts -= 1LL<<st->pts_wrap_bits;
665 }
666
667 if (pkt->duration == 0) {
668 compute_frame_duration(&num, &den, st, pc, pkt);
669 if (den && num) {
670 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
671
672 if(st->cur_dts == 0 && pkt->duration != 0)
673 update_initial_durations(s, st, pkt);
674 }
675 }
676
677 /* correct timestamps with byte offset if demuxers only have timestamps
678 on packet boundaries */
679 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
680 /* this will estimate bitrate based on this frame's duration and size */
681 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
682 if(pkt->pts != AV_NOPTS_VALUE)
683 pkt->pts += offset;
684 if(pkt->dts != AV_NOPTS_VALUE)
685 pkt->dts += offset;
686 }
687
688 /* do we have a video B-frame ? */
689 delay= st->codec->has_b_frames;
690 presentation_delayed = 0;
691 /* XXX: need has_b_frame, but cannot get it if the codec is
692 not initialized */
693 if (delay &&
694 pc && pc->pict_type != FF_B_TYPE)
695 presentation_delayed = 1;
696 /* This may be redundant, but it should not hurt. */
697 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
698 presentation_delayed = 1;
699
700 if(st->cur_dts == AV_NOPTS_VALUE){
701 st->cur_dts = 0; //FIXME maybe set it to 0 during init
702 }
703
704 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
705 /* interpolate PTS and DTS if they are not present */
706 if(delay <=1){
707 if (presentation_delayed) {
708 /* DTS = decompression timestamp */
709 /* PTS = presentation timestamp */
710 if (pkt->dts == AV_NOPTS_VALUE)
711 pkt->dts = st->last_IP_pts;
712 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
713 if (pkt->dts == AV_NOPTS_VALUE)
714 pkt->dts = st->cur_dts;
715
716 /* this is tricky: the dts must be incremented by the duration
717 of the frame we are displaying, i.e. the last I- or P-frame */
718 if (st->last_IP_duration == 0)
719 st->last_IP_duration = pkt->duration;
720 st->cur_dts = pkt->dts + st->last_IP_duration;
721 st->last_IP_duration = pkt->duration;
722 st->last_IP_pts= pkt->pts;
723 /* cannot compute PTS if not present (we can compute it only
724 by knowing the future */
725 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
726 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
727 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
728 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
729 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
730 pkt->pts += pkt->duration;
731 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
732 }
733 }
734
735 /* presentation is not delayed : PTS and DTS are the same */
736 if(pkt->pts == AV_NOPTS_VALUE)
737 pkt->pts = pkt->dts;
738 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
739 if(pkt->pts == AV_NOPTS_VALUE)
740 pkt->pts = st->cur_dts;
741 pkt->dts = pkt->pts;
742 st->cur_dts = pkt->pts + pkt->duration;
743 }
744 }
745
746 if(pkt->pts != AV_NOPTS_VALUE){
747 st->pts_buffer[0]= pkt->pts;
748 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
749 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
750 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
751 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
752 if(pkt->dts == AV_NOPTS_VALUE)
753 pkt->dts= st->pts_buffer[0];
754 if(delay>1){
755 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
756 }
757 if(pkt->dts > st->cur_dts)
758 st->cur_dts = pkt->dts;
759 }
760
761 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
762
763 /* update flags */
764 if(is_intra_only(st->codec))
765 pkt->flags |= PKT_FLAG_KEY;
766 else if (pc) {
767 pkt->flags = 0;
768 /* keyframe computation */
769 if (pc->pict_type == FF_I_TYPE)
770 pkt->flags |= PKT_FLAG_KEY;
771 }
772 }
773
774 void av_destruct_packet_nofree(AVPacket *pkt)
775 {
776 pkt->data = NULL; pkt->size = 0;
777 }
778
779 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
780 {
781 AVStream *st;
782 int len, ret, i;
783
784 av_init_packet(pkt);
785
786 for(;;) {
787 /* select current input stream component */
788 st = s->cur_st;
789 if (st) {
790 if (!st->need_parsing || !st->parser) {
791 /* no parsing needed: we just output the packet as is */
792 /* raw data support */
793 *pkt = s->cur_pkt;
794 compute_pkt_fields(s, st, NULL, pkt);
795 s->cur_st = NULL;
796 break;
797 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
798 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
799 s->cur_ptr, s->cur_len,
800 s->cur_pkt.pts, s->cur_pkt.dts);
801 s->cur_pkt.pts = AV_NOPTS_VALUE;
802 s->cur_pkt.dts = AV_NOPTS_VALUE;
803 /* increment read pointer */
804 s->cur_ptr += len;
805 s->cur_len -= len;
806
807 /* return packet if any */
808 if (pkt->size) {
809 got_packet:
810 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
811 pkt->duration = 0;
812 pkt->stream_index = st->index;
813 pkt->pts = st->parser->pts;
814 pkt->dts = st->parser->dts;
815 pkt->destruct = av_destruct_packet_nofree;
816 compute_pkt_fields(s, st, st->parser, pkt);
817
818 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
819 ff_reduce_index(s, st->index);
820 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
821 0, 0, AVINDEX_KEYFRAME);
822 }
823
824 break;
825 }
826 } else {
827 /* free packet */
828 av_free_packet(&s->cur_pkt);
829 s->cur_st = NULL;
830 }
831 } else {
832 /* read next packet */
833 ret = av_read_packet(s, &s->cur_pkt);
834 if (ret < 0) {
835 if (ret == AVERROR(EAGAIN))
836 return ret;
837 /* return the last frames, if any */
838 for(i = 0; i < s->nb_streams; i++) {
839 st = s->streams[i];
840 if (st->parser && st->need_parsing) {
841 av_parser_parse(st->parser, st->codec,
842 &pkt->data, &pkt->size,
843 NULL, 0,
844 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
845 if (pkt->size)
846 goto got_packet;
847 }
848 }
849 /* no more packets: really terminate parsing */
850 return ret;
851 }
852
853 st = s->streams[s->cur_pkt.stream_index];
854 if(st->codec->debug & FF_DEBUG_PTS)
855 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
856 s->cur_pkt.stream_index,
857 s->cur_pkt.pts,
858 s->cur_pkt.dts,
859 s->cur_pkt.size);
860
861 s->cur_st = st;
862 s->cur_ptr = s->cur_pkt.data;
863 s->cur_len = s->cur_pkt.size;
864 if (st->need_parsing && !st->parser) {
865 st->parser = av_parser_init(st->codec->codec_id);
866 if (!st->parser) {
867 /* no parser available: just output the raw packets */
868 st->need_parsing = AVSTREAM_PARSE_NONE;
869 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
870 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
871 }
872 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
873 st->parser->last_frame_offset=
874 st->parser->cur_offset= s->cur_pkt.pos;
875 }
876 }
877 }
878 }
879 if(st->codec->debug & FF_DEBUG_PTS)
880 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
881 pkt->stream_index,
882 pkt->pts,
883 pkt->dts,
884 pkt->size);
885
886 return 0;
887 }
888
889 static AVPacket *add_to_pktbuf(AVFormatContext *s, AVPacket *pkt){
890 AVPacketList *pktl= s->packet_buffer;
891 AVPacketList **plast_pktl= &s->packet_buffer;
892
893 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
894
895 pktl = av_mallocz(sizeof(AVPacketList));
896 if (!pktl)
897 return NULL;
898
899 /* add the packet in the buffered packet list */
900 *plast_pktl = pktl;
901 pktl->pkt= *pkt;
902 return &pktl->pkt;
903 }
904
905 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
906 {
907 AVPacketList *pktl;
908 int eof=0;
909 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
910
911 for(;;){
912 pktl = s->packet_buffer;
913 if (pktl) {
914 AVPacket *next_pkt= &pktl->pkt;
915
916 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
917 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
918 if( pktl->pkt.stream_index == next_pkt->stream_index
919 && next_pkt->dts < pktl->pkt.dts
920 && pktl->pkt.pts != pktl->pkt.dts //not b frame
921 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
922 next_pkt->pts= pktl->pkt.dts;
923 }
924 pktl= pktl->next;
925 }
926 pktl = s->packet_buffer;
927 }
928
929 if( next_pkt->pts != AV_NOPTS_VALUE
930 || next_pkt->dts == AV_NOPTS_VALUE
931 || !genpts || eof){
932 /* read packet from packet buffer, if there is data */
933 *pkt = *next_pkt;
934 s->packet_buffer = pktl->next;
935 av_free(pktl);
936 return 0;
937 }
938 }
939 if(genpts){
940 int ret= av_read_frame_internal(s, pkt);
941 if(ret<0){
942 if(pktl && ret != AVERROR(EAGAIN)){
943 eof=1;
944 continue;
945 }else
946 return ret;
947 }
948
949 if(av_dup_packet(add_to_pktbuf(s, pkt)) < 0)
950 return AVERROR(ENOMEM);
951 }else{
952 assert(!s->packet_buffer);
953 return av_read_frame_internal(s, pkt);
954 }
955 }
956 }
957
958 /* XXX: suppress the packet queue */
959 static void flush_packet_queue(AVFormatContext *s)
960 {
961 AVPacketList *pktl;
962
963 for(;;) {
964 pktl = s->packet_buffer;
965 if (!pktl)
966 break;
967 s->packet_buffer = pktl->next;
968 av_free_packet(&pktl->pkt);
969 av_free(pktl);
970 }
971 }
972
973 /*******************************************************/
974 /* seek support */
975
976 int av_find_default_stream_index(AVFormatContext *s)
977 {
978 int i;
979 AVStream *st;
980
981 if (s->nb_streams <= 0)
982 return -1;
983 for(i = 0; i < s->nb_streams; i++) {
984 st = s->streams[i];
985 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
986 return i;
987 }
988 }
989 return 0;
990 }
991
992 /**
993 * Flush the frame reader.
994 */
995 static void av_read_frame_flush(AVFormatContext *s)
996 {
997 AVStream *st;
998 int i;
999
1000 flush_packet_queue(s);
1001
1002 /* free previous packet */
1003 if (s->cur_st) {
1004 if (s->cur_st->parser)
1005 av_free_packet(&s->cur_pkt);
1006 s->cur_st = NULL;
1007 }
1008 /* fail safe */
1009 s->cur_ptr = NULL;
1010 s->cur_len = 0;
1011
1012 /* for each stream, reset read state */
1013 for(i = 0; i < s->nb_streams; i++) {
1014 st = s->streams[i];
1015
1016 if (st->parser) {
1017 av_parser_close(st->parser);
1018 st->parser = NULL;
1019 }
1020 st->last_IP_pts = AV_NOPTS_VALUE;
1021 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1022 }
1023 }
1024
1025 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1026 int i;
1027
1028 for(i = 0; i < s->nb_streams; i++) {
1029 AVStream *st = s->streams[i];
1030
1031 st->cur_dts = av_rescale(timestamp,
1032 st->time_base.den * (int64_t)ref_st->time_base.num,
1033 st->time_base.num * (int64_t)ref_st->time_base.den);
1034 }
1035 }
1036
1037 void ff_reduce_index(AVFormatContext *s, int stream_index)
1038 {
1039 AVStream *st= s->streams[stream_index];
1040 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1041
1042 if((unsigned)st->nb_index_entries >= max_entries){
1043 int i;
1044 for(i=0; 2*i<st->nb_index_entries; i++)
1045 st->index_entries[i]= st->index_entries[2*i];
1046 st->nb_index_entries= i;
1047 }
1048 }
1049
1050 int av_add_index_entry(AVStream *st,
1051 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1052 {
1053 AVIndexEntry *entries, *ie;
1054 int index;
1055
1056 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1057 return -1;
1058
1059 entries = av_fast_realloc(st->index_entries,
1060 &st->index_entries_allocated_size,
1061 (st->nb_index_entries + 1) *
1062 sizeof(AVIndexEntry));
1063 if(!entries)
1064 return -1;
1065
1066 st->index_entries= entries;
1067
1068 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1069
1070 if(index<0){
1071 index= st->nb_index_entries++;
1072 ie= &entries[index];
1073 assert(index==0 || ie[-1].timestamp < timestamp);
1074 }else{
1075 ie= &entries[index];
1076 if(ie->timestamp != timestamp){
1077 if(ie->timestamp <= timestamp)
1078 return -1;
1079 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1080 st->nb_index_entries++;
1081 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1082 distance= ie->min_distance;
1083 }
1084
1085 ie->pos = pos;
1086 ie->timestamp = timestamp;
1087 ie->min_distance= distance;
1088 ie->size= size;
1089 ie->flags = flags;
1090
1091 return index;
1092 }
1093
1094 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1095 int flags)
1096 {
1097 AVIndexEntry *entries= st->index_entries;
1098 int nb_entries= st->nb_index_entries;
1099 int a, b, m;
1100 int64_t timestamp;
1101
1102 a = - 1;
1103 b = nb_entries;
1104
1105 while (b - a > 1) {
1106 m = (a + b) >> 1;
1107 timestamp = entries[m].timestamp;
1108 if(timestamp >= wanted_timestamp)
1109 b = m;
1110 if(timestamp <= wanted_timestamp)
1111 a = m;
1112 }
1113 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1114
1115 if(!(flags & AVSEEK_FLAG_ANY)){
1116 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1117 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1118 }
1119 }
1120
1121 if(m == nb_entries)
1122 return -1;
1123 return m;
1124 }
1125
1126 #define DEBUG_SEEK
1127
1128 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1129 AVInputFormat *avif= s->iformat;
1130 int64_t pos_min, pos_max, pos, pos_limit;
1131 int64_t ts_min, ts_max, ts;
1132 int index;
1133 AVStream *st;
1134
1135 if (stream_index < 0)
1136 return -1;
1137
1138 #ifdef DEBUG_SEEK
1139 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1140 #endif
1141
1142 ts_max=
1143 ts_min= AV_NOPTS_VALUE;
1144 pos_limit= -1; //gcc falsely says it may be uninitialized
1145
1146 st= s->streams[stream_index];
1147 if(st->index_entries){
1148 AVIndexEntry *e;
1149
1150 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1151 index= FFMAX(index, 0);
1152 e= &st->index_entries[index];
1153
1154 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1155 pos_min= e->pos;
1156 ts_min= e->timestamp;
1157 #ifdef DEBUG_SEEK
1158 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1159 pos_min,ts_min);
1160 #endif
1161 }else{
1162 assert(index==0);
1163 }
1164
1165 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1166 assert(index < st->nb_index_entries);
1167 if(index >= 0){
1168 e= &st->index_entries[index];
1169 assert(e->timestamp >= target_ts);
1170 pos_max= e->pos;
1171 ts_max= e->timestamp;
1172 pos_limit= pos_max - e->min_distance;
1173 #ifdef DEBUG_SEEK
1174 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1175 pos_max,pos_limit, ts_max);
1176 #endif
1177 }
1178 }
1179
1180 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1181 if(pos<0)
1182 return -1;
1183
1184 /* do the seek */
1185 url_fseek(s->pb, pos, SEEK_SET);
1186
1187 av_update_cur_dts(s, st, ts);
1188
1189 return 0;
1190 }
1191
1192 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1193 int64_t pos, ts;
1194 int64_t start_pos, filesize;
1195 int no_change;
1196
1197 #ifdef DEBUG_SEEK
1198 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1199 #endif
1200
1201 if(ts_min == AV_NOPTS_VALUE){
1202 pos_min = s->data_offset;
1203 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1204 if (ts_min == AV_NOPTS_VALUE)
1205 return -1;
1206 }
1207
1208 if(ts_max == AV_NOPTS_VALUE){
1209 int step= 1024;
1210 filesize = url_fsize(s->pb);
1211 pos_max = filesize - 1;
1212 do{
1213 pos_max -= step;
1214 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1215 step += step;
1216 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1217 if (ts_max == AV_NOPTS_VALUE)
1218 return -1;
1219
1220 for(;;){
1221 int64_t tmp_pos= pos_max + 1;
1222 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1223 if(tmp_ts == AV_NOPTS_VALUE)
1224 break;
1225 ts_max= tmp_ts;
1226 pos_max= tmp_pos;
1227 if(tmp_pos >= filesize)
1228 break;
1229 }
1230 pos_limit= pos_max;
1231 }
1232
1233 if(ts_min > ts_max){
1234 return -1;
1235 }else if(ts_min == ts_max){
1236 pos_limit= pos_min;
1237 }
1238
1239 no_change=0;
1240 while (pos_min < pos_limit) {
1241 #ifdef DEBUG_SEEK
1242 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1243 pos_min, pos_max,
1244 ts_min, ts_max);
1245 #endif
1246 assert(pos_limit <= pos_max);
1247
1248 if(no_change==0){
1249 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1250 // interpolate position (better than dichotomy)
1251 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1252 + pos_min - approximate_keyframe_distance;
1253 }else if(no_change==1){
1254 // bisection, if interpolation failed to change min or max pos last time
1255 pos = (pos_min + pos_limit)>>1;
1256 }else{
1257 /* linear search if bisection failed, can only happen if there
1258 are very few or no keyframes between min/max */
1259 pos=pos_min;
1260 }
1261 if(pos <= pos_min)
1262 pos= pos_min + 1;
1263 else if(pos > pos_limit)
1264 pos= pos_limit;
1265 start_pos= pos;
1266
1267 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1268 if(pos == pos_max)
1269 no_change++;
1270 else
1271 no_change=0;
1272 #ifdef DEBUG_SEEK
1273 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1274 #endif
1275 if(ts == AV_NOPTS_VALUE){
1276 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1277 return -1;
1278 }
1279 assert(ts != AV_NOPTS_VALUE);
1280 if (target_ts <= ts) {
1281 pos_limit = start_pos - 1;
1282 pos_max = pos;
1283 ts_max = ts;
1284 }
1285 if (target_ts >= ts) {
1286 pos_min = pos;
1287 ts_min = ts;
1288 }
1289 }
1290
1291 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1292 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1293 #ifdef DEBUG_SEEK
1294 pos_min = pos;
1295 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1296 pos_min++;
1297 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1298 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1299 pos, ts_min, target_ts, ts_max);
1300 #endif
1301 *ts_ret= ts;
1302 return pos;
1303 }
1304
1305 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1306 int64_t pos_min, pos_max;
1307 #if 0
1308 AVStream *st;
1309
1310 if (stream_index < 0)
1311 return -1;
1312
1313 st= s->streams[stream_index];
1314 #endif
1315
1316 pos_min = s->data_offset;
1317 pos_max = url_fsize(s->pb) - 1;
1318
1319 if (pos < pos_min) pos= pos_min;
1320 else if(pos > pos_max) pos= pos_max;
1321
1322 url_fseek(s->pb, pos, SEEK_SET);
1323
1324 #if 0
1325 av_update_cur_dts(s, st, ts);
1326 #endif
1327 return 0;
1328 }
1329
1330 static int av_seek_frame_generic(AVFormatContext *s,
1331 int stream_index, int64_t timestamp, int flags)
1332 {
1333 int index;
1334 AVStream *st;
1335 AVIndexEntry *ie;
1336
1337 st = s->streams[stream_index];
1338
1339 index = av_index_search_timestamp(st, timestamp, flags);
1340
1341 if(index < 0 || index==st->nb_index_entries-1){
1342 int i;
1343 AVPacket pkt;
1344
1345 if(st->index_entries && st->nb_index_entries){
1346 ie= &st->index_entries[st->nb_index_entries-1];
1347 url_fseek(s->pb, ie->pos, SEEK_SET);
1348 av_update_cur_dts(s, st, ie->timestamp);
1349 }else
1350 url_fseek(s->pb, 0, SEEK_SET);
1351
1352 for(i=0;; i++) {
1353 int ret = av_read_frame(s, &pkt);
1354 if(ret<0)
1355 break;
1356 av_free_packet(&pkt);
1357 if(stream_index == pkt.stream_index){
1358 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1359 break;
1360 }
1361 }
1362 index = av_index_search_timestamp(st, timestamp, flags);
1363 }
1364 if (index < 0)
1365 return -1;
1366
1367 av_read_frame_flush(s);
1368 if (s->iformat->read_seek){
1369 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1370 return 0;
1371 }
1372 ie = &st->index_entries[index];
1373 url_fseek(s->pb, ie->pos, SEEK_SET);
1374
1375 av_update_cur_dts(s, st, ie->timestamp);
1376
1377 return 0;
1378 }
1379
1380 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1381 {
1382 int ret;
1383 AVStream *st;
1384
1385 av_read_frame_flush(s);
1386
1387 if(flags & AVSEEK_FLAG_BYTE)
1388 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1389
1390 if(stream_index < 0){
1391 stream_index= av_find_default_stream_index(s);
1392 if(stream_index < 0)
1393 return -1;
1394
1395 st= s->streams[stream_index];
1396 /* timestamp for default must be expressed in AV_TIME_BASE units */
1397 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1398 }
1399 st= s->streams[stream_index];
1400
1401 /* first, we try the format specific seek */
1402 if (s->iformat->read_seek)
1403 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1404 else
1405 ret = -1;
1406 if (ret >= 0) {
1407 return 0;
1408 }
1409
1410 if(s->iformat->read_timestamp)
1411 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1412 else
1413 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1414 }
1415
1416 /*******************************************************/
1417
1418 /**
1419 * Returns TRUE if the stream has accurate duration in any stream.
1420 *
1421 * @return TRUE if the stream has accurate duration for at least one component.
1422 */
1423 static int av_has_duration(AVFormatContext *ic)
1424 {
1425 int i;
1426 AVStream *st;
1427
1428 for(i = 0;i < ic->nb_streams; i++) {
1429 st = ic->streams[i];
1430 if (st->duration != AV_NOPTS_VALUE)
1431 return 1;
1432 }
1433 return 0;
1434 }
1435
1436 /**
1437 * Estimate the stream timings from the one of each components.
1438 *
1439 * Also computes the global bitrate if possible.
1440 */
1441 static void av_update_stream_timings(AVFormatContext *ic)
1442 {
1443 int64_t start_time, start_time1, end_time, end_time1;
1444 int64_t duration, duration1;
1445 int i;
1446 AVStream *st;
1447
1448 start_time = INT64_MAX;
1449 end_time = INT64_MIN;
1450 duration = INT64_MIN;
1451 for(i = 0;i < ic->nb_streams; i++) {
1452 st = ic->streams[i];
1453 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1454 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1455 if (start_time1 < start_time)
1456 start_time = start_time1;
1457 if (st->duration != AV_NOPTS_VALUE) {
1458 end_time1 = start_time1
1459 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1460 if (end_time1 > end_time)
1461 end_time = end_time1;
1462 }
1463 }
1464 if (st->duration != AV_NOPTS_VALUE) {
1465 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1466 if (duration1 > duration)
1467 duration = duration1;
1468 }
1469 }
1470 if (start_time != INT64_MAX) {
1471 ic->start_time = start_time;
1472 if (end_time != INT64_MIN) {
1473 if (end_time - start_time > duration)
1474 duration = end_time - start_time;
1475 }
1476 }
1477 if (duration != INT64_MIN) {
1478 ic->duration = duration;
1479 if (ic->file_size > 0) {
1480 /* compute the bitrate */
1481 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1482 (double)ic->duration;
1483 }
1484 }
1485 }
1486
1487 static void fill_all_stream_timings(AVFormatContext *ic)
1488 {
1489 int i;
1490 AVStream *st;
1491
1492 av_update_stream_timings(ic);
1493 for(i = 0;i < ic->nb_streams; i++) {
1494 st = ic->streams[i];
1495 if (st->start_time == AV_NOPTS_VALUE) {
1496 if(ic->start_time != AV_NOPTS_VALUE)
1497 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1498 if(ic->duration != AV_NOPTS_VALUE)
1499 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1500 }
1501 }
1502 }
1503
1504 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1505 {
1506 int64_t filesize, duration;
1507 int bit_rate, i;
1508 AVStream *st;
1509
1510 /* if bit_rate is already set, we believe it */
1511 if (ic->bit_rate == 0) {
1512 bit_rate = 0;
1513 for(i=0;i<ic->nb_streams;i++) {
1514 st = ic->streams[i];
1515 bit_rate += st->codec->bit_rate;
1516 }
1517 ic->bit_rate = bit_rate;
1518 }
1519
1520 /* if duration is already set, we believe it */
1521 if (ic->duration == AV_NOPTS_VALUE &&
1522 ic->bit_rate != 0 &&
1523 ic->file_size != 0) {
1524 filesize = ic->file_size;
1525 if (filesize > 0) {
1526 for(i = 0; i < ic->nb_streams; i++) {
1527 st = ic->streams[i];
1528 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1529 if (st->duration == AV_NOPTS_VALUE)
1530 st->duration = duration;
1531 }
1532 }
1533 }
1534 }
1535
1536 #define DURATION_MAX_READ_SIZE 250000
1537
1538 /* only usable for MPEG-PS streams */
1539 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1540 {
1541 AVPacket pkt1, *pkt = &pkt1;
1542 AVStream *st;
1543 int read_size, i, ret;
1544 int64_t end_time;
1545 int64_t filesize, offset, duration;
1546
1547 /* free previous packet */
1548 if (ic->cur_st && ic->cur_st->parser)
1549 av_free_packet(&ic->cur_pkt);
1550 ic->cur_st = NULL;
1551
1552 /* flush packet queue */
1553 flush_packet_queue(ic);
1554
1555 for(i=0;i<ic->nb_streams;i++) {
1556 st = ic->streams[i];
1557 if (st->parser) {
1558 av_parser_close(st->parser);
1559 st->parser= NULL;
1560 }
1561 }
1562
1563 /* we read the first packets to get the first PTS (not fully
1564 accurate, but it is enough now) */
1565 url_fseek(ic->pb, 0, SEEK_SET);
1566 read_size = 0;
1567 for(;;) {
1568 if (read_size >= DURATION_MAX_READ_SIZE)
1569 break;
1570 /* if all info is available, we can stop */
1571 for(i = 0;i < ic->nb_streams; i++) {
1572 st = ic->streams[i];
1573 if (st->start_time == AV_NOPTS_VALUE)
1574 break;
1575 }
1576 if (i == ic->nb_streams)
1577 break;
1578
1579 ret = av_read_packet(ic, pkt);
1580 if (ret != 0)
1581 break;
1582 read_size += pkt->size;
1583 st = ic->streams[pkt->stream_index];
1584 if (pkt->pts != AV_NOPTS_VALUE) {
1585 if (st->start_time == AV_NOPTS_VALUE)
1586 st->start_time = pkt->pts;
1587 }
1588 av_free_packet(pkt);
1589 }
1590
1591 /* estimate the end time (duration) */
1592 /* XXX: may need to support wrapping */
1593 filesize = ic->file_size;
1594 offset = filesize - DURATION_MAX_READ_SIZE;
1595 if (offset < 0)
1596 offset = 0;
1597
1598 url_fseek(ic->pb, offset, SEEK_SET);
1599 read_size = 0;
1600 for(;;) {
1601 if (read_size >= DURATION_MAX_READ_SIZE)
1602 break;
1603
1604 ret = av_read_packet(ic, pkt);
1605 if (ret != 0)
1606 break;
1607 read_size += pkt->size;
1608 st = ic->streams[pkt->stream_index];
1609 if (pkt->pts != AV_NOPTS_VALUE &&
1610 st->start_time != AV_NOPTS_VALUE) {
1611 end_time = pkt->pts;
1612 duration = end_time - st->start_time;
1613 if (duration > 0) {
1614 if (st->duration == AV_NOPTS_VALUE ||
1615 st->duration < duration)
1616 st->duration = duration;
1617 }
1618 }
1619 av_free_packet(pkt);
1620 }
1621
1622 fill_all_stream_timings(ic);
1623
1624 url_fseek(ic->pb, old_offset, SEEK_SET);
1625 for(i=0; i<ic->nb_streams; i++){
1626 st= ic->streams[i];
1627 st->cur_dts= st->first_dts;
1628 st->last_IP_pts = AV_NOPTS_VALUE;
1629 }
1630 }
1631
1632 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1633 {
1634 int64_t file_size;
1635
1636 /* get the file size, if possible */
1637 if (ic->iformat->flags & AVFMT_NOFILE) {
1638 file_size = 0;
1639 } else {
1640 file_size = url_fsize(ic->pb);
1641 if (file_size < 0)
1642 file_size = 0;
1643 }
1644 ic->file_size = file_size;
1645
1646 if ((!strcmp(ic->iformat->name, "mpeg") ||
1647 !strcmp(ic->iformat->name, "mpegts")) &&
1648 file_size && !url_is_streamed(ic->pb)) {
1649 /* get accurate estimate from the PTSes */
1650 av_estimate_timings_from_pts(ic, old_offset);
1651 } else if (av_has_duration(ic)) {
1652 /* at least one component has timings - we use them for all
1653 the components */
1654 fill_all_stream_timings(ic);
1655 } else {
1656 /* less precise: use bitrate info */
1657 av_estimate_timings_from_bit_rate(ic);
1658 }
1659 av_update_stream_timings(ic);
1660
1661 #if 0
1662 {
1663 int i;
1664 AVStream *st;
1665 for(i = 0;i < ic->nb_streams; i++) {
1666 st = ic->streams[i];
1667 printf("%d: start_time: %0.3f duration: %0.3f\n",
1668 i, (double)st->start_time / AV_TIME_BASE,
1669 (double)st->duration / AV_TIME_BASE);
1670 }
1671 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1672 (double)ic->start_time / AV_TIME_BASE,
1673 (double)ic->duration / AV_TIME_BASE,
1674 ic->bit_rate / 1000);
1675 }
1676 #endif
1677 }
1678
1679 static int has_codec_parameters(AVCodecContext *enc)
1680 {
1681 int val;
1682 switch(enc->codec_type) {
1683 case CODEC_TYPE_AUDIO:
1684 val = enc->sample_rate;
1685 break;
1686 case CODEC_TYPE_VIDEO:
1687 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1688 break;
1689 default:
1690 val = 1;
1691 break;
1692 }
1693 return enc->codec_id != CODEC_ID_NONE && val != 0;
1694 }
1695
1696 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1697 {
1698 int16_t *samples;
1699 AVCodec *codec;
1700 int got_picture, data_size, ret=0;
1701 AVFrame picture;
1702
1703 if(!st->codec->codec){
1704 codec = avcodec_find_decoder(st->codec->codec_id);
1705 if (!codec)
1706 return -1;
1707 ret = avcodec_open(st->codec, codec);
1708 if (ret < 0)
1709 return ret;
1710 }
1711
1712 if(!has_codec_parameters(st->codec)){
1713 switch(st->codec->codec_type) {
1714 case CODEC_TYPE_VIDEO:
1715 ret = avcodec_decode_video(st->codec, &picture,
1716 &got_picture, data, size);
1717 break;
1718 case CODEC_TYPE_AUDIO:
1719 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1720 samples = av_malloc(data_size);
1721 if (!samples)
1722 goto fail;
1723 ret = avcodec_decode_audio2(st->codec, samples,
1724 &data_size, data, size);
1725 av_free(samples);
1726 break;
1727 default:
1728 break;
1729 }
1730 }
1731 fail:
1732 return ret;
1733 }
1734
1735 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
1736 {
1737 AVInputFormat *fmt;
1738 fmt = av_probe_input_format2(pd, 1, &score);
1739
1740 if (fmt) {
1741 if (strncmp(fmt->name, "mp3", 3) == 0)
1742 st->codec->codec_id = CODEC_ID_MP3;
1743 else if (strncmp(fmt->name, "ac3", 3) == 0)
1744 st->codec->codec_id = CODEC_ID_AC3;
1745 }
1746 return !!fmt;
1747 }
1748
1749 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1750 {
1751 while (tags->id != CODEC_ID_NONE) {
1752 if (tags->id == id)
1753 return tags->tag;
1754 tags++;
1755 }
1756 return 0;
1757 }
1758
1759 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1760 {
1761 int i;
1762 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1763 if(tag == tags[i].tag)
1764 return tags[i].id;
1765 }
1766 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1767 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1768 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1769 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1770 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1771 return tags[i].id;
1772 }
1773 return CODEC_ID_NONE;
1774 }
1775
1776 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1777 {
1778 int i;
1779 for(i=0; tags && tags[i]; i++){
1780 int tag= codec_get_tag(tags[i], id);
1781 if(tag) return tag;
1782 }
1783 return 0;
1784 }
1785
1786 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1787 {
1788 int i;
1789 for(i=0; tags && tags[i]; i++){
1790 enum CodecID id= codec_get_id(tags[i], tag);
1791 if(id!=CODEC_ID_NONE) return id;
1792 }
1793 return CODEC_ID_NONE;
1794 }
1795
1796 /* absolute maximum size we read until we abort */
1797 #define MAX_READ_SIZE 5000000
1798
1799 #define MAX_STD_TIMEBASES (60*12+5)
1800 static int get_std_framerate(int i){
1801 if(i<60*12) return i*1001;
1802 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1803 }
1804
1805 /*
1806 * Is the time base unreliable.
1807 * This is a heuristic to balance between quick acceptance of the values in
1808 * the headers vs. some extra checks.
1809 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1810 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1811 * And there are "variable" fps files this needs to detect as well.
1812 */
1813 static int tb_unreliable(AVCodecContext *c){
1814 if( c->time_base.den >= 101L*c->time_base.num
1815 || c->time_base.den < 5L*c->time_base.num
1816 /* || c->codec_tag == ff_get_fourcc("DIVX")
1817 || c->codec_tag == ff_get_fourcc("XVID")*/
1818 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1819 return 1;
1820 return 0;
1821 }
1822
1823 int av_find_stream_info(AVFormatContext *ic)
1824 {
1825 int i, count, ret, read_size, j;
1826 AVStream *st;
1827 AVPacket pkt1, *pkt;
1828 int64_t last_dts[MAX_STREAMS];
1829 int duration_count[MAX_STREAMS]={0};
1830 double (*duration_error)[MAX_STD_TIMEBASES];
1831 offset_t old_offset = url_ftell(ic->pb);
1832 int64_t codec_info_duration[MAX_STREAMS]={0};
1833 int codec_info_nb_frames[MAX_STREAMS]={0};
1834 AVProbeData probe_data[MAX_STREAMS];
1835 int codec_identified[MAX_STREAMS]={0};
1836
1837 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1838 if (!duration_error) return AVERROR(ENOMEM);
1839
1840 for(i=0;i<ic->nb_streams;i++) {
1841 st = ic->streams[i];
1842 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1843 /* if(!st->time_base.num)
1844 st->time_base= */
1845 if(!st->codec->time_base.num)
1846 st->codec->time_base= st->time_base;
1847 }
1848 //only for the split stuff
1849 if (!st->parser) {
1850 st->parser = av_parser_init(st->codec->codec_id);
1851 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1852 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1853 }
1854 }
1855 }
1856
1857 for(i=0;i<MAX_STREAMS;i++){
1858 last_dts[i]= AV_NOPTS_VALUE;
1859 }
1860
1861 memset(probe_data, 0, sizeof(probe_data));
1862 count = 0;
1863 read_size = 0;
1864 for(;;) {
1865 /* check if one codec still needs to be handled */
1866 for(i=0;i<ic->nb_streams;i++) {
1867 st = ic->streams[i];
1868 if (!has_codec_parameters(st->codec))
1869 break;
1870 /* variable fps and no guess at the real fps */
1871 if( tb_unreliable(st->codec)
1872 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1873 break;
1874 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1875 break;
1876 if(st->first_dts == AV_NOPTS_VALUE)
1877 break;
1878 }
1879 if (i == ic->nb_streams) {
1880 /* NOTE: if the format has no header, then we need to read
1881 some packets to get most of the streams, so we cannot
1882 stop here */
1883 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1884 /* if we found the info for all the codecs, we can stop */
1885 ret = count;
1886 break;
1887 }
1888 }
1889 /* we did not get all the codec info, but we read too much data */
1890 if (read_size >= MAX_READ_SIZE) {
1891 ret = count;
1892 break;
1893 }
1894
1895 /* NOTE: a new stream can be added there if no header in file
1896 (AVFMTCTX_NOHEADER) */
1897 ret = av_read_frame_internal(ic, &pkt1);
1898 if (ret < 0) {
1899 /* EOF or error */
1900 ret = -1; /* we could not have all the codec parameters before EOF */
1901 for(i=0;i<ic->nb_streams;i++) {
1902 st = ic->streams[i];
1903 if (!has_codec_parameters(st->codec)){
1904 char buf[256];
1905 avcodec_string(buf, sizeof(buf), st->codec, 0);
1906 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1907 } else {
1908 ret = 0;
1909 }
1910 }
1911 break;
1912 }
1913
1914 pkt= add_to_pktbuf(ic, &pkt1);
1915 if(av_dup_packet(pkt) < 0)
1916 return AVERROR(ENOMEM);
1917
1918 read_size += pkt->size;
1919
1920 st = ic->streams[pkt->stream_index];
1921 if(codec_info_nb_frames[st->index]>1)
1922 codec_info_duration[st->index] += pkt->duration;
1923 if (pkt->duration != 0)
1924 codec_info_nb_frames[st->index]++;
1925
1926 {
1927 int index= pkt->stream_index;
1928 int64_t last= last_dts[index];
1929 int64_t duration= pkt->dts - last;
1930
1931 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1932 double dur= duration * av_q2d(st->time_base);
1933
1934 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1935 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1936 if(duration_count[index] < 2)
1937 memset(duration_error[index], 0, sizeof(*duration_error));
1938 for(i=1; i<MAX_STD_TIMEBASES; i++){
1939 int framerate= get_std_framerate(i);
1940 int ticks= lrintf(dur*framerate/(1001*12));
1941 double error= dur - ticks*1001*12/(double)framerate;
1942 duration_error[index][i] += error*error;
1943 }
1944 duration_count[index]++;
1945 }
1946 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1947 last_dts[pkt->stream_index]= pkt->dts;
1948
1949 if (st->codec->codec_id == CODEC_ID_NONE) {
1950 AVProbeData *pd = &(probe_data[st->index]);
1951 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
1952 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
1953 pd->buf_size += pkt->size;
1954 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
1955 }
1956 }
1957 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1958 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1959 if(i){
1960 st->codec->extradata_size= i;
1961 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1962 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1963 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1964 }
1965 }
1966
1967 /* if still no information, we try to open the codec and to
1968 decompress the frame. We try to avoid that in most cases as
1969 it takes longer and uses more memory. For MPEG-4, we need to
1970 decompress for QuickTime. */
1971 if (!has_codec_parameters(st->codec) /*&&
1972 (st->codec->codec_id == CODEC_ID_FLV1 ||
1973 st->codec->codec_id == CODEC_ID_H264 ||
1974 st->codec->codec_id == CODEC_ID_H263 ||
1975 st->codec->codec_id == CODEC_ID_H261 ||
1976 st->codec->codec_id == CODEC_ID_VORBIS ||
1977 st->codec->codec_id == CODEC_ID_MJPEG ||
1978 st->codec->codec_id == CODEC_ID_PNG ||
1979 st->codec->codec_id == CODEC_ID_PAM ||
1980 st->codec->codec_id == CODEC_ID_PGM ||
1981 st->codec->codec_id == CODEC_ID_PGMYUV ||
1982 st->codec->codec_id == CODEC_ID_PBM ||
1983 st->codec->codec_id == CODEC_ID_PPM ||
1984 st->codec->codec_id == CODEC_ID_SHORTEN ||
1985 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1986 try_decode_frame(st, pkt->data, pkt->size);
1987
1988 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1989 break;
1990 }
1991 count++;
1992 }
1993
1994 // close codecs which were opened in try_decode_frame()
1995 for(i=0;i<ic->nb_streams;i++) {
1996 st = ic->streams[i];
1997 if(st->codec->codec)
1998 avcodec_close(st->codec);
1999 }
2000 for(i=0;i<ic->nb_streams;i++) {
2001 st = ic->streams[i];
2002 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2003 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2004 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2005
2006 if(duration_count[i]
2007 && tb_unreliable(st->codec) /*&&
2008 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2009 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2010 double best_error= 2*av_q2d(st->time_base);
2011 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2012
2013 for(j=1; j<MAX_STD_TIMEBASES; j++){
2014 double error= duration_error[i][j] * get_std_framerate(j);
2015 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2016 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2017 if(error < best_error){
2018 best_error= error;
2019 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2020 }
2021 }
2022 }
2023
2024 if (!st->r_frame_rate.num){
2025 if( st->codec->time_base.den * (int64_t)st->time_base.num
2026 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2027 st->r_frame_rate.num = st->codec->time_base.den;
2028 st->r_frame_rate.den = st->codec->time_base.num;
2029 }else{
2030 st->r_frame_rate.num = st->time_base.den;
2031 st->r_frame_rate.den = st->time_base.num;
2032 }
2033 }
2034 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2035 if (st->codec->codec_id == CODEC_ID_NONE && probe_data[st->index].buf_size > 0) {
2036 codec_identified[st->index] = set_codec_from_probe_data(st, &(probe_data[st->index]), 1);
2037 if (codec_identified[st->index]) {
2038 st->need_parsing = AVSTREAM_PARSE_FULL;
2039 }
2040 }
2041 if(!st->codec->bits_per_sample)
2042 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2043 }
2044 }
2045
2046 av_estimate_timings(ic, old_offset);
2047
2048 for(i=0;i<ic->nb_streams;i++) {
2049 st = ic->streams[i];
2050 if (codec_identified[st->index])
2051 break;
2052 }
2053 //FIXME this is a mess
2054 if(i!=ic->nb_streams){
2055 av_read_frame_flush(ic);
2056 for(i=0;i<ic->nb_streams;i++) {
2057 st = ic->streams[i];
2058 if (codec_identified[st->index]) {
2059 av_seek_frame(ic, st->index, 0.0, 0);
2060 }
2061 st->cur_dts= st->first_dts;
2062 }
2063 url_fseek(ic->pb, ic->data_offset, SEEK_SET);
2064 }
2065
2066 #if 0
2067 /* correct DTS for B-frame streams with no timestamps */
2068 for(i=0;i<ic->nb_streams;i++) {
2069 st = ic->streams[i];
2070 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2071 if(b-frames){
2072 ppktl = &ic->packet_buffer;
2073 while(ppkt1){
2074 if(ppkt1->stream_index != i)
2075 continue;
2076 if(ppkt1->pkt->dts < 0)
2077 break;
2078 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2079 break;
2080 ppkt1->pkt->dts -= delta;
2081 ppkt1= ppkt1->next;
2082 }
2083 if(ppkt1)
2084 continue;
2085 st->cur_dts -= delta;
2086 }
2087 }
2088 }
2089 #endif
2090
2091 av_free(duration_error);
2092 for(i=0;i<MAX_STREAMS;i++){
2093 av_freep(&(probe_data[i].buf));
2094 }
2095
2096 return ret;
2097 }
2098
2099 /*******************************************************/
2100
2101 int av_read_play(AVFormatContext *s)
2102 {
2103 if (s->iformat->read_play)
2104 return s->iformat->read_play(s);
2105 if (s->pb)
2106 return av_url_read_fpause(s->pb, 0);
2107 return AVERROR(ENOSYS);
2108 }
2109
2110 int av_read_pause(AVFormatContext *s)
2111 {
2112 if (s->iformat->read_pause)
2113 return s->iformat->read_pause(s);
2114 if (s->pb)
2115 return av_url_read_fpause(s->pb, 1);
2116 return AVERROR(ENOSYS);
2117 }
2118
2119 void av_close_input_stream(AVFormatContext *s)
2120 {
2121 int i;
2122 AVStream *st;
2123
2124 /* free previous packet */
2125 if (s->cur_st && s->cur_st->parser)
2126 av_free_packet(&s->cur_pkt);
2127
2128 if (s->iformat->read_close)
2129 s->iformat->read_close(s);
2130 for(i=0;i<s->nb_streams;i++) {
2131 /* free all data in a stream component */
2132 st = s->streams[i];
2133 if (st->parser) {
2134 av_parser_close(st->parser);
2135 }
2136 av_free(st->index_entries);
2137 av_free(st->codec->extradata);
2138 av_free(st->codec);
2139 av_free(st->filename);
2140 av_free(st);
2141 }
2142 for(i=s->nb_programs-1; i>=0; i--) {
2143 av_freep(&s->programs[i]->provider_name);
2144 av_freep(&s->programs[i]->name);
2145 av_freep(&s->programs[i]->stream_index);
2146 av_freep(&s->programs[i]);
2147 }
2148 av_freep(&s->programs);
2149 flush_packet_queue(s);
2150 av_freep(&s->priv_data);
2151 while(s->num_chapters--) {
2152 av_free(s->chapters[s->num_chapters]->title);
2153 av_free(s->chapters[s->num_chapters]);
2154 }
2155 av_freep(&s->chapters);
2156 av_free(s);
2157 }
2158
2159 void av_close_input_file(AVFormatContext *s)
2160 {
2161 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2162 av_close_input_stream(s);
2163 if (pb)
2164 url_fclose(pb);
2165 }
2166
2167 AVStream *av_new_stream(AVFormatContext *s, int id)
2168 {
2169 AVStream *st;
2170 int i;
2171
2172 if (s->nb_streams >= MAX_STREAMS)
2173 return NULL;
2174
2175 st = av_mallocz(sizeof(AVStream));
2176 if (!st)
2177 return NULL;
2178
2179 st->codec= avcodec_alloc_context();
2180 if (s->iformat) {
2181 /* no default bitrate if decoding */
2182 st->codec->bit_rate = 0;
2183 }
2184 st->index = s->nb_streams;
2185 st->id = id;
2186 st->start_time = AV_NOPTS_VALUE;
2187 st->duration = AV_NOPTS_VALUE;
2188 st->cur_dts = AV_NOPTS_VALUE;
2189 st->first_dts = AV_NOPTS_VALUE;
2190
2191 /* default pts setting is MPEG-like */
2192 av_set_pts_info(st, 33, 1, 90000);
2193 st->last_IP_pts = AV_NOPTS_VALUE;
2194 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2195 st->pts_buffer[i]= AV_NOPTS_VALUE;
2196
2197 s->streams[s->nb_streams++] = st;
2198 return st;
2199 }
2200
2201 AVProgram *av_new_program(AVFormatContext *ac, int id)
2202 {
2203 AVProgram *program=NULL;
2204 int i;
2205
2206 #ifdef DEBUG_SI
2207 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2208 #endif
2209
2210 for(i=0; i<ac->nb_programs; i++)
2211 if(ac->programs[i]->id == id)
2212 program = ac->programs[i];
2213
2214 if(!program){
2215 program = av_mallocz(sizeof(AVProgram));
2216 if (!program)
2217 return NULL;
2218 dynarray_add(&ac->programs, &ac->nb_programs, program);
2219 program->discard = AVDISCARD_NONE;
2220 }
2221 program->id = id;
2222
2223 return program;
2224 }
2225
2226 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2227 {
2228 assert(!provider_name == !name);
2229 if(name){
2230 av_free(program->provider_name);
2231 av_free(program-> name);
2232 program->provider_name = av_strdup(provider_name);
2233 program-> name = av_strdup( name);
2234 }
2235 }
2236
2237 int ff_new_chapter(AVFormatContext *s, int id, int64_t start, int64_t end, const char *title)
2238 {
2239 AVChapter *chapter = NULL;
2240 int i;
2241
2242 for(i=0; i<s->num_chapters; i++)
2243 if(s->chapters[i]->id == id)
2244 chapter = s->chapters[i];
2245
2246 if(!chapter){
2247 chapter= av_mallocz(sizeof(AVChapter));
2248 if(!chapter)
2249 return AVERROR(ENOMEM);
2250 dynarray_add(&s->chapters, &s->num_chapters, chapter);
2251 }
2252 if(chapter->title)
2253 av_free(chapter->title);
2254 chapter->title = av_strdup(title);
2255 chapter->id = id;
2256 chapter->start = start;
2257 chapter->end = end;
2258
2259 return 0;
2260 }
2261
2262 /************************************************************/
2263 /* output media file */
2264
2265 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2266 {
2267 int ret;
2268
2269 if (s->oformat->priv_data_size > 0) {
2270 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2271 if (!s->priv_data)
2272 return AVERROR(ENOMEM);
2273 } else
2274 s->priv_data = NULL;
2275
2276 if (s->oformat->set_parameters) {
2277 ret = s->oformat->set_parameters(s, ap);
2278 if (ret < 0)
2279 return ret;
2280 }
2281 return 0;
2282 }
2283
2284 int av_write_header(AVFormatContext *s)
2285 {
2286 int ret, i;
2287 AVStream *st;
2288
2289 // some sanity checks
2290 for(i=0;i<s->nb_streams;i++) {
2291 st = s->streams[i];
2292
2293 switch (st->codec->codec_type) {
2294 case CODEC_TYPE_AUDIO:
2295 if(st->codec->sample_rate<=0){
2296 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2297 return -1;
2298 }
2299 break;
2300 case CODEC_TYPE_VIDEO:
2301 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2302 av_log(s, AV_LOG_ERROR, "time base not set\n");
2303 return -1;
2304 }
2305 if(st->codec->width<=0 || st->codec->height<=0){
2306 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2307 return -1;
2308 }
2309 break;
2310 }
2311
2312 if(s->oformat->codec_tag){
2313 if(st->codec->codec_tag){
2314 //FIXME
2315 //check that tag + id is in the table
2316 //if neither is in the table -> OK
2317 //if tag is in the table with another id -> FAIL
2318 //if id is in the table with another tag -> FAIL unless strict < ?
2319 }else
2320 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2321 }
2322 }
2323
2324 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2325 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2326 if (!s->priv_data)
2327 return AVERROR(ENOMEM);
2328 }
2329
2330 if(s->oformat->write_header){
2331 ret = s->oformat->write_header(s);
2332 if (ret < 0)
2333 return ret;
2334 }
2335
2336 /* init PTS generation */
2337 for(i=0;i<s->nb_streams;i++) {
2338 int64_t den = AV_NOPTS_VALUE;
2339 st = s->streams[i];
2340
2341 switch (st->codec->codec_type) {
2342 case CODEC_TYPE_AUDIO:
2343 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2344 break;
2345 case CODEC_TYPE_VIDEO:
2346 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2347 break;
2348 default:
2349 break;
2350 }
2351 if (den != AV_NOPTS_VALUE) {
2352 if (den <= 0)
2353 return AVERROR_INVALIDDATA;
2354 av_frac_init(&st->pts, 0, 0, den);
2355 }
2356 }
2357 return 0;
2358 }
2359
2360 //FIXME merge with compute_pkt_fields
2361 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2362 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2363 int num, den, frame_size, i;
2364
2365 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2366
2367 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2368 return -1;*/
2369
2370 /* duration field */
2371 if (pkt->duration == 0) {
2372 compute_frame_duration(&num, &den, st, NULL, pkt);
2373 if (den && num) {
2374 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2375 }
2376 }
2377
2378 //XXX/FIXME this is a temporary hack until all encoders output pts
2379 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2380 pkt->dts=
2381 // pkt->pts= st->cur_dts;
2382 pkt->pts= st->pts.val;
2383 }
2384
2385 //calculate dts from pts
2386 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2387 st->pts_buffer[0]= pkt->pts;
2388 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2389 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2390 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2391 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2392
2393 pkt->dts= st->pts_buffer[0];
2394 }
2395
2396 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2397 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2398 return -1;
2399 }
2400 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2401 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2402 return -1;
2403 }
2404
2405 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2406 st->cur_dts= pkt->dts;
2407 st->pts.val= pkt->dts;
2408
2409 /* update pts */
2410 switch (st->codec->codec_type) {
2411 case CODEC_TYPE_AUDIO:
2412 frame_size = get_audio_frame_size(st->codec, pkt->size);
2413
2414 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2415 likely equal to the encoder delay, but it would be better if we
2416 had the real timestamps from the encoder */
2417 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2418 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2419 }
2420 break;
2421 case CODEC_TYPE_VIDEO:
2422 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2423 break;
2424 default:
2425 break;
2426 }
2427 return 0;
2428 }
2429
2430 static void truncate_ts(AVStream *st, AVPacket *pkt){
2431 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2432
2433 // if(pkt->dts < 0)
2434 // pkt->dts= 0; //this happens for low_delay=0 and B-frames, FIXME, needs further investigation about what we should do here
2435
2436 if (pkt->pts != AV_NOPTS_VALUE)
2437 pkt->pts &= pts_mask;
2438 if (pkt->dts != AV_NOPTS_VALUE)
2439 pkt->dts &= pts_mask;
2440 }
2441
2442 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2443 {
2444 int ret;
2445
2446 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2447 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2448 return ret;
2449
2450 truncate_ts(s->streams[pkt->stream_index], pkt);
2451
2452 ret= s->oformat->write_packet(s, pkt);
2453 if(!ret)
2454 ret= url_ferror(s->pb);
2455 return ret;
2456 }
2457
2458 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2459 AVPacketList *pktl, **next_point, *this_pktl;
2460 int stream_count=0;
2461 int streams[MAX_STREAMS];
2462
2463 if(pkt){
2464 AVStream *st= s->streams[ pkt->stream_index];
2465
2466 // assert(pkt->destruct != av_destruct_packet); //FIXME
2467
2468 this_pktl = av_mallocz(sizeof(AVPacketList));
2469 this_pktl->pkt= *pkt;
2470 if(pkt->destruct == av_destruct_packet)
2471 pkt->destruct= NULL; // not shared -> must keep original from being freed
2472 else
2473 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2474
2475 next_point = &s->packet_buffer;
2476 while(*next_point){
2477 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2478 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2479 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2480 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2481 break;
2482 next_point= &(*next_point)->next;
2483 }
2484 this_pktl->next= *next_point;
2485 *next_point= this_pktl;
2486 }
2487
2488 memset(streams, 0, sizeof(streams));
2489 pktl= s->packet_buffer;
2490 while(pktl){
2491 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2492 if(streams[ pktl->pkt.stream_index ] == 0)
2493 stream_count++;
2494 streams[ pktl->pkt.stream_index ]++;
2495 pktl= pktl->next;
2496 }
2497
2498 if(s->nb_streams == stream_count || (flush && stream_count)){
2499 pktl= s->packet_buffer;
2500 *out= pktl->pkt;
2501
2502 s->packet_buffer= pktl->next;
2503 av_freep(&pktl);
2504 return 1;
2505 }else{
2506 av_init_packet(out);
2507 return 0;
2508 }
2509 }
2510
2511 /**
2512 * Interleaves an AVPacket correctly so it can be muxed.
2513 * @param out the interleaved packet will be output here
2514 * @param in the input packet
2515 * @param flush 1 if no further packets are available as input and all
2516 * remaining packets should be output
2517 * @return 1 if a packet was output, 0 if no packet could be output,
2518 * < 0 if an error occurred
2519 */
2520 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2521 if(s->oformat->interleave_packet)
2522 return s->oformat->interleave_packet(s, out, in, flush);
2523 else
2524 return av_interleave_packet_per_dts(s, out, in, flush);
2525 }
2526
2527 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2528 AVStream *st= s->streams[ pkt->stream_index];
2529
2530 //FIXME/XXX/HACK drop zero sized packets
2531 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2532 return 0;
2533
2534 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2535 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2536 return -1;
2537
2538 if(pkt->dts == AV_NOPTS_VALUE)
2539 return -1;
2540
2541 for(;;){
2542 AVPacket opkt;
2543 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2544 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2545 return ret;
2546
2547 truncate_ts(s->streams[opkt.stream_index], &opkt);
2548 ret= s->oformat->write_packet(s, &opkt);
2549
2550 av_free_packet(&opkt);
2551 pkt= NULL;
2552
2553 if(ret<0)
2554 return ret;
2555 if(url_ferror(s->pb))
2556 return url_ferror(s->pb);
2557 }
2558 }
2559
2560 int av_write_trailer(AVFormatContext *s)
2561 {
2562 int ret, i;
2563
2564 for(;;){
2565 AVPacket pkt;
2566 ret= av_interleave_packet(s, &pkt, NULL, 1);
2567 if(ret<0) //FIXME cleanup needed for ret<0 ?
2568 goto fail;
2569 if(!ret)
2570 break;
2571
2572 truncate_ts(s->streams[pkt.stream_index], &pkt);
2573 ret= s->oformat->write_packet(s, &pkt);
2574
2575 av_free_packet(&pkt);
2576
2577 if(ret<0)
2578 goto fail;
2579 if(url_ferror(s->pb))
2580 goto fail;
2581 }
2582
2583 if(s->oformat->write_trailer)
2584 ret = s->oformat->write_trailer(s);
2585 fail:
2586 if(ret == 0)
2587 ret=url_ferror(s->pb);
2588 for(i=0;i<s->nb_streams;i++)
2589 av_freep(&s->streams[i]->priv_data);
2590 av_freep(&s->priv_data);
2591 return ret;
2592 }
2593
2594 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2595 {
2596 int i, j;
2597 AVProgram *program=NULL;
2598 void *tmp;
2599
2600 for(i=0; i<ac->nb_programs; i++){
2601 if(ac->programs[i]->id != progid)
2602 continue;
2603 program = ac->programs[i];
2604 for(j=0; j<program->nb_stream_indexes; j++)
2605 if(program->stream_index[j] == idx)
2606 return;
2607
2608 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2609 if(!tmp)
2610 return;
2611 program->stream_index = tmp;
2612 program->stream_index[program->nb_stream_indexes++] = idx;
2613 return;
2614 }
2615 }
2616
2617 /* "user interface" functions */
2618 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2619 {
2620 char buf[256];
2621 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2622 AVStream *st = ic->streams[i];
2623 int g = ff_gcd(st->time_base.num, st->time_base.den);
2624 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2625 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2626 /* the pid is an important information, so we display it */
2627 /* XXX: add a generic system */
2628 if (flags & AVFMT_SHOW_IDS)
2629 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2630 if (strlen(st->language) > 0)
2631 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2632 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2633 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2634 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2635 if(st->r_frame_rate.den && st->r_frame_rate.num)
2636 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2637 /* else if(st->time_base.den && st->time_base.num)
2638 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2639 else
2640 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2641 }
2642 av_log(NULL, AV_LOG_INFO, "\n");
2643 }
2644
2645 void dump_format(AVFormatContext *ic,
2646 int index,
2647 const char *url,
2648 int is_output)
2649 {
2650 int i;
2651
2652 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2653 is_output ? "Output" : "Input",
2654 index,
2655 is_output ? ic->oformat->name : ic->iformat->name,
2656 is_output ? "to" : "from", url);
2657 if (!is_output) {
2658 av_log(NULL, AV_LOG_INFO, " Duration: ");
2659 if (ic->duration != AV_NOPTS_VALUE) {
2660 int hours, mins, secs, us;
2661 secs = ic->duration / AV_TIME_BASE;
2662 us = ic->duration % AV_TIME_BASE;
2663 mins = secs / 60;
2664 secs %= 60;
2665 hours = mins / 60;
2666 mins %= 60;
2667 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2668 (100 * us) / AV_TIME_BASE);
2669 } else {
2670 av_log(NULL, AV_LOG_INFO, "N/A");
2671 }
2672 if (ic->start_time != AV_NOPTS_VALUE) {
2673 int secs, us;
2674 av_log(NULL, AV_LOG_INFO, ", start: ");
2675 secs = ic->start_time / AV_TIME_BASE;
2676 us = ic->start_time % AV_TIME_BASE;
2677 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2678 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2679 }
2680 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2681 if (ic->bit_rate) {
2682 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2683 } else {
2684 av_log(NULL, AV_LOG_INFO, "N/A");
2685 }
2686 av_log(NULL, AV_LOG_INFO, "\n");
2687 }
2688 if(ic->nb_programs) {
2689 int j, k;
2690 for(j=0; j<ic->nb_programs; j++) {
2691 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2692 ic->programs[j]->name ? ic->programs[j]->name : "");
2693 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2694 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2695 }
2696 } else
2697 for(i=0;i<ic->nb_streams;i++)
2698 dump_stream_format(ic, i, index, is_output);
2699 }
2700
2701 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2702 {
2703 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2704 }
2705
2706 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2707 {
2708 AVRational frame_rate;
2709 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2710 *frame_rate_num= frame_rate.num;
2711 *frame_rate_den= frame_rate.den;
2712 return ret;
2713 }
2714
2715 /**
2716 * Gets the current time in microseconds.
2717 */
2718 int64_t av_gettime(void)
2719 {
2720 struct timeval tv;
2721 gettimeofday(&tv,NULL);
2722 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2723 }
2724
2725 int64_t parse_date(const char *datestr, int duration)
2726 {
2727 const char *p;
2728 int64_t t;
2729 struct tm dt;
2730 int i;
2731 static const char *date_fmt[] = {
2732 "%Y-%m-%d",
2733 "%Y%m%d",
2734 };
2735 static const char *time_fmt[] = {
2736 "%H:%M:%S",
2737 "%H%M%S",
2738 };
2739 const char *q;
2740 int is_utc, len;
2741 char lastch;
2742 int negative = 0;
2743
2744 #undef time
2745 time_t now = time(0);
2746
2747 len = strlen(datestr);
2748 if (len > 0)
2749 lastch = datestr[len - 1];
2750 else
2751 lastch = '\0';
2752 is_utc = (lastch == 'z' || lastch == 'Z');
2753
2754 memset(&dt, 0, sizeof(dt));
2755
2756 p = datestr;
2757 q = NULL;
2758 if (!duration) {
2759 /* parse the year-month-day part */
2760 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2761 q = small_strptime(p, date_fmt[i], &dt);
2762 if (q) {
2763 break;
2764 }
2765 }
2766
2767 /* if the year-month-day part is missing, then take the
2768 * current year-month-day time */
2769 if (!q) {
2770 if (is_utc) {
2771 dt = *gmtime(&now);
2772 } else {
2773 dt = *localtime(&now);
2774 }
2775 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2776 } else {
2777 p = q;
2778 }
2779
2780 if (*p == 'T' || *p == 't' || *p == ' ')
2781 p++;
2782
2783 /* parse the hour-minute-second part */
2784 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2785 q = small_strptime(p, time_fmt[i], &dt);
2786 if (q) {
2787 break;
2788 }
2789 }
2790 } else {
2791 /* parse datestr as a duration */
2792 if (p[0] == '-') {
2793 negative = 1;
2794 ++p;
2795 }
2796 /* parse datestr as HH:MM:SS */
2797 q = small_strptime(p, time_fmt[0], &dt);
2798 if (!q) {
2799 /* parse datestr as S+ */
2800 dt.tm_sec = strtol(p, (char **)&q, 10);
2801 if (q == p)
2802 /* the parsing didn't succeed */
2803 return INT64_MIN;
2804 dt.tm_min = 0;
2805 dt.tm_hour = 0;
2806 }
2807 }
2808
2809 /* Now we have all the fields that we can get */
2810 if (!q) {
2811 return INT64_MIN;
2812 }
2813
2814 if (duration) {
2815 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2816 } else {
2817 dt.tm_isdst = -1; /* unknown */
2818 if (is_utc) {
2819 t = mktimegm(&dt);
2820 } else {
2821 t = mktime(&dt);
2822 }
2823 }
2824
2825 t *= 1000000;
2826
2827 /* parse the .m... part */
2828 if (*q == '.') {
2829 int val, n;
2830 q++;
2831 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2832 if (!isdigit(*q))
2833 break;
2834 val += n * (*q - '0');
2835 }
2836 t += val;
2837 }
2838 return negative ? -t : t;
2839 }
2840
2841 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2842 {
2843 const char *p;
2844 char tag[128], *q;
2845
2846 p = info;
2847 if (*p == '?')
2848 p++;
2849 for(;;) {
2850 q = tag;
2851 while (*p != '\0' && *p != '=' && *p != '&') {
2852 if ((q - tag) < sizeof(tag) - 1)
2853 *q++ = *p;
2854 p++;
2855 }
2856 *q = '\0';
2857 q = arg;
2858 if (*p == '=') {
2859 p++;
2860 while (*p != '&' && *p != '\0') {
2861 if ((q - arg) < arg_size - 1) {
2862 if (*p == '+')
2863 *q++ = ' ';
2864 else
2865 *q++ = *p;
2866 }
2867 p++;
2868 }
2869 *q = '\0';
2870 }
2871 if (!strcmp(tag, tag1))
2872 return 1;
2873 if (*p != '&')
2874 break;
2875 p++;
2876 }
2877 return 0;
2878 }
2879
2880 int av_get_frame_filename(char *buf, int buf_size,
2881 const char *path, int number)
2882 {
2883 const char *p;
2884 char *q, buf1[20], c;
2885 int nd, len, percentd_found;
2886
2887 q = buf;
2888 p = path;
2889 percentd_found = 0;
2890 for(;;) {
2891 c = *p++;
2892 if (c == '\0')
2893 break;
2894 if (c == '%') {
2895 do {
2896 nd = 0;
2897 while (isdigit(*p)) {
2898 nd = nd * 10 + *p++ - '0';
2899 }
2900 c = *p++;
2901 } while (isdigit(c));
2902
2903 switch(c) {
2904 case '%':
2905 goto addchar;
2906 case 'd':
2907 if (percentd_found)
2908 goto fail;
2909 percentd_found = 1;
2910 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2911 len = strlen(buf1);
2912 if ((q - buf + len) > buf_size - 1)
2913 goto fail;
2914 memcpy(q, buf1, len);
2915 q += len;
2916 break;
2917 default:
2918 goto fail;
2919 }
2920 } else {
2921 addchar:
2922 if ((q - buf) < buf_size - 1)
2923 *q++ = c;
2924 }
2925 }
2926 if (!percentd_found)
2927 goto fail;
2928 *q = '\0';
2929 return 0;
2930 fail:
2931 *q = '\0';
2932 return -1;
2933 }
2934
2935 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
2936 {
2937 int len, i, j, c;
2938 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2939
2940 for(i=0;i<size;i+=16) {
2941 len = size - i;
2942 if (len > 16)
2943 len = 16;
2944 PRINT("%08x ", i);
2945 for(j=0;j<16;j++) {
2946 if (j < len)
2947 PRINT(" %02x", buf[i+j]);
2948 else
2949 PRINT(" ");
2950 }
2951 PRINT(" ");
2952 for(j=0;j<len;j++) {
2953 c = buf[i+j];
2954 if (c < ' ' || c > '~')
2955 c = '.';
2956 PRINT("%c", c);
2957 }
2958 PRINT("\n");
2959 }
2960 #undef PRINT
2961 }
2962
2963 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2964 {
2965 hex_dump_internal(NULL, f, 0, buf, size);
2966 }
2967
2968 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
2969 {
2970 hex_dump_internal(avcl, NULL, level, buf, size);
2971 }
2972
2973 //FIXME needs to know the time_base
2974 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
2975 {
2976 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2977 PRINT("stream #%d:\n", pkt->stream_index);
2978 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2979 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2980 /* DTS is _always_ valid after av_read_frame() */
2981 PRINT(" dts=");
2982 if (pkt->dts == AV_NOPTS_VALUE)
2983 PRINT("N/A");
2984 else
2985 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
2986 /* PTS may not be known if B-frames are present. */
2987 PRINT(" pts=");
2988 if (pkt->pts == AV_NOPTS_VALUE)
2989 PRINT("N/A");
2990 else
2991 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
2992 PRINT("\n");
2993 PRINT(" size=%d\n", pkt->size);
2994 #undef PRINT
2995 if (dump_payload)
2996 av_hex_dump(f, pkt->data, pkt->size);
2997 }
2998
2999 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3000 {
3001 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3002 }
3003
3004 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3005 {
3006 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3007 }
3008
3009 void url_split(char *proto, int proto_size,
3010 char *authorization, int authorization_size,
3011 char *hostname, int hostname_size,
3012 int *port_ptr,
3013 char *path, int path_size,
3014 const char *url)
3015 {
3016 const char *p, *ls, *at, *col, *brk;
3017
3018 if (port_ptr) *port_ptr = -1;
3019 if (proto_size > 0) proto[0] = 0;
3020 if (authorization_size > 0) authorization[0] = 0;
3021 if (hostname_size > 0) hostname[0] = 0;
3022 if (path_size > 0) path[0] = 0;
3023
3024 /* parse protocol */
3025 if ((p = strchr(url, ':'))) {
3026 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3027 p++; /* skip ':' */
3028 if (*p == '/') p++;
3029 if (*p == '/') p++;
3030 } else {
3031 /* no protocol means plain filename */
3032 av_strlcpy(path, url, path_size);
3033 return;
3034 }
3035
3036 /* separate path from hostname */
3037 ls = strchr(p, '/');
3038 if(!ls)
3039 ls = strchr(p, '?');
3040 if(ls)
3041 av_strlcpy(path, ls, path_size);
3042 else
3043 ls = &p[strlen(p)]; // XXX
3044
3045 /* the rest is hostname, use that to parse auth/port */
3046 if (ls != p) {
3047 /* authorization (user[:pass]@hostname) */
3048 if ((at = strchr(p, '@')) && at < ls) {
3049 av_strlcpy(authorization, p,
3050 FFMIN(authorization_size, at + 1 - p));
3051 p = at + 1; /* skip '@' */
3052 }
3053
3054 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3055 /* [host]:port */
3056 av_strlcpy(hostname, p + 1,
3057 FFMIN(hostname_size, brk - p));
3058 if (brk[1] == ':' && port_ptr)
3059 *port_ptr = atoi(brk + 2);
3060 } else if ((col = strchr(p, ':')) && col < ls) {
3061 av_strlcpy(hostname, p,
3062 FFMIN(col + 1 - p, hostname_size));
3063 if (port_ptr) *port_ptr = atoi(col + 1);
3064 } else
3065 av_strlcpy(hostname, p,
3066 FFMIN(ls + 1 - p, hostname_size));
3067 }
3068 }
3069
3070 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3071 int pts_num, int pts_den)
3072 {
3073 s->pts_wrap_bits = pts_wrap_bits;
3074 s->time_base.num = pts_num;
3075 s->time_base.den = pts_den;
3076 }
3077
3078 /* fraction handling */
3079
3080 /**
3081 * f = val + (num / den) + 0.5.
3082 *
3083 * 'num' is normalized so that it is such as 0 <= num < den.
3084 *
3085 * @param f fractional number
3086 * @param val integer value
3087 * @param num must be >= 0
3088 * @param den must be >= 1
3089 */
3090 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3091 {
3092 num += (den >> 1);
3093 if (num >= den) {
3094 val += num / den;
3095 num = num % den;
3096 }
3097 f->val = val;
3098 f->num = num;
3099 f->den = den;
3100 }
3101
3102 /**
3103 * Fractional addition to f: f = f + (incr / f->den).
3104 *
3105 * @param f fractional number
3106 * @param incr increment, can be positive or negative
3107 */
3108 static void av_frac_add(AVFrac *f, int64_t incr)
3109 {
3110 int64_t num, den;
3111
3112 num = f->num + incr;
3113 den = f->den;
3114 if (num < 0) {
3115 f->val += num / den;
3116 num = num % den;
3117 if (num < 0) {
3118 num += den;
3119 f->val--;
3120 }
3121 } else if (num >= den) {
3122 f->val += num / den;
3123 num = num % den;
3124 }
3125 f->num = num;
3126 }