indent
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "libavcodec/opt.h"
23 #include "libavutil/avstring.h"
24 #include "riff.h"
25 #include <sys/time.h>
26 #include <time.h>
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 /**
32 * @file libavformat/utils.c
33 * various utility functions for use within FFmpeg
34 */
35
36 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
37 static void av_frac_add(AVFrac *f, int64_t incr);
38
39 /** head of registered input format linked list */
40 AVInputFormat *first_iformat = NULL;
41 /** head of registered output format linked list */
42 AVOutputFormat *first_oformat = NULL;
43
44 AVInputFormat *av_iformat_next(AVInputFormat *f)
45 {
46 if(f) return f->next;
47 else return first_iformat;
48 }
49
50 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
51 {
52 if(f) return f->next;
53 else return first_oformat;
54 }
55
56 void av_register_input_format(AVInputFormat *format)
57 {
58 AVInputFormat **p;
59 p = &first_iformat;
60 while (*p != NULL) p = &(*p)->next;
61 *p = format;
62 format->next = NULL;
63 }
64
65 void av_register_output_format(AVOutputFormat *format)
66 {
67 AVOutputFormat **p;
68 p = &first_oformat;
69 while (*p != NULL) p = &(*p)->next;
70 *p = format;
71 format->next = NULL;
72 }
73
74 int match_ext(const char *filename, const char *extensions)
75 {
76 const char *ext, *p;
77 char ext1[32], *q;
78
79 if(!filename)
80 return 0;
81
82 ext = strrchr(filename, '.');
83 if (ext) {
84 ext++;
85 p = extensions;
86 for(;;) {
87 q = ext1;
88 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
89 *q++ = *p++;
90 *q = '\0';
91 if (!strcasecmp(ext1, ext))
92 return 1;
93 if (*p == '\0')
94 break;
95 p++;
96 }
97 }
98 return 0;
99 }
100
101 AVOutputFormat *guess_format(const char *short_name, const char *filename,
102 const char *mime_type)
103 {
104 AVOutputFormat *fmt, *fmt_found;
105 int score_max, score;
106
107 /* specific test for image sequences */
108 #ifdef CONFIG_IMAGE2_MUXER
109 if (!short_name && filename &&
110 av_filename_number_test(filename) &&
111 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
112 return guess_format("image2", NULL, NULL);
113 }
114 #endif
115 /* Find the proper file type. */
116 fmt_found = NULL;
117 score_max = 0;
118 fmt = first_oformat;
119 while (fmt != NULL) {
120 score = 0;
121 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
122 score += 100;
123 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
124 score += 10;
125 if (filename && fmt->extensions &&
126 match_ext(filename, fmt->extensions)) {
127 score += 5;
128 }
129 if (score > score_max) {
130 score_max = score;
131 fmt_found = fmt;
132 }
133 fmt = fmt->next;
134 }
135 return fmt_found;
136 }
137
138 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
139 const char *mime_type)
140 {
141 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
142
143 if (fmt) {
144 AVOutputFormat *stream_fmt;
145 char stream_format_name[64];
146
147 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
148 stream_fmt = guess_format(stream_format_name, NULL, NULL);
149
150 if (stream_fmt)
151 fmt = stream_fmt;
152 }
153
154 return fmt;
155 }
156
157 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
158 const char *filename, const char *mime_type, enum CodecType type){
159 if(type == CODEC_TYPE_VIDEO){
160 enum CodecID codec_id= CODEC_ID_NONE;
161
162 #ifdef CONFIG_IMAGE2_MUXER
163 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
164 codec_id= av_guess_image2_codec(filename);
165 }
166 #endif
167 if(codec_id == CODEC_ID_NONE)
168 codec_id= fmt->video_codec;
169 return codec_id;
170 }else if(type == CODEC_TYPE_AUDIO)
171 return fmt->audio_codec;
172 else
173 return CODEC_ID_NONE;
174 }
175
176 AVInputFormat *av_find_input_format(const char *short_name)
177 {
178 AVInputFormat *fmt;
179 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
180 if (!strcmp(fmt->name, short_name))
181 return fmt;
182 }
183 return NULL;
184 }
185
186 /* memory handling */
187
188 void av_destruct_packet(AVPacket *pkt)
189 {
190 av_free(pkt->data);
191 pkt->data = NULL; pkt->size = 0;
192 }
193
194 void av_init_packet(AVPacket *pkt)
195 {
196 pkt->pts = AV_NOPTS_VALUE;
197 pkt->dts = AV_NOPTS_VALUE;
198 pkt->pos = -1;
199 pkt->duration = 0;
200 pkt->flags = 0;
201 pkt->stream_index = 0;
202 pkt->destruct= av_destruct_packet_nofree;
203 }
204
205 int av_new_packet(AVPacket *pkt, int size)
206 {
207 uint8_t *data;
208 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
209 return AVERROR(ENOMEM);
210 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
211 if (!data)
212 return AVERROR(ENOMEM);
213 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
214
215 av_init_packet(pkt);
216 pkt->data = data;
217 pkt->size = size;
218 pkt->destruct = av_destruct_packet;
219 return 0;
220 }
221
222 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
223 {
224 int ret= av_new_packet(pkt, size);
225
226 if(ret<0)
227 return ret;
228
229 pkt->pos= url_ftell(s);
230
231 ret= get_buffer(s, pkt->data, size);
232 if(ret<=0)
233 av_free_packet(pkt);
234 else
235 pkt->size= ret;
236
237 return ret;
238 }
239
240 int av_dup_packet(AVPacket *pkt)
241 {
242 if (pkt->destruct != av_destruct_packet) {
243 uint8_t *data;
244 /* We duplicate the packet and don't forget to add the padding again. */
245 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
246 return AVERROR(ENOMEM);
247 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
248 if (!data) {
249 return AVERROR(ENOMEM);
250 }
251 memcpy(data, pkt->data, pkt->size);
252 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
253 pkt->data = data;
254 pkt->destruct = av_destruct_packet;
255 }
256 return 0;
257 }
258
259 int av_filename_number_test(const char *filename)
260 {
261 char buf[1024];
262 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
263 }
264
265 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
266 {
267 AVInputFormat *fmt1, *fmt;
268 int score;
269
270 fmt = NULL;
271 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
272 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
273 continue;
274 score = 0;
275 if (fmt1->read_probe) {
276 score = fmt1->read_probe(pd);
277 } else if (fmt1->extensions) {
278 if (match_ext(pd->filename, fmt1->extensions)) {
279 score = 50;
280 }
281 }
282 if (score > *score_max) {
283 *score_max = score;
284 fmt = fmt1;
285 }else if (score == *score_max)
286 fmt = NULL;
287 }
288 return fmt;
289 }
290
291 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
292 int score=0;
293 return av_probe_input_format2(pd, is_opened, &score);
294 }
295
296 /************************************************************/
297 /* input media file */
298
299 /**
300 * Open a media file from an IO stream. 'fmt' must be specified.
301 */
302 static const char* format_to_name(void* ptr)
303 {
304 AVFormatContext* fc = (AVFormatContext*) ptr;
305 if(fc->iformat) return fc->iformat->name;
306 else if(fc->oformat) return fc->oformat->name;
307 else return "NULL";
308 }
309
310 #define OFFSET(x) offsetof(AVFormatContext,x)
311 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
312 //these names are too long to be readable
313 #define E AV_OPT_FLAG_ENCODING_PARAM
314 #define D AV_OPT_FLAG_DECODING_PARAM
315
316 static const AVOption options[]={
317 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
318 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
319 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
320 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
321 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
322 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
323 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
324 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
325 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
326 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
327 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
328 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
329 {NULL},
330 };
331
332 #undef E
333 #undef D
334 #undef DEFAULT
335
336 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
337
338 static void avformat_get_context_defaults(AVFormatContext *s)
339 {
340 memset(s, 0, sizeof(AVFormatContext));
341
342 s->av_class = &av_format_context_class;
343
344 av_opt_set_defaults(s);
345 }
346
347 AVFormatContext *av_alloc_format_context(void)
348 {
349 AVFormatContext *ic;
350 ic = av_malloc(sizeof(AVFormatContext));
351 if (!ic) return ic;
352 avformat_get_context_defaults(ic);
353 ic->av_class = &av_format_context_class;
354 return ic;
355 }
356
357 int av_open_input_stream(AVFormatContext **ic_ptr,
358 ByteIOContext *pb, const char *filename,
359 AVInputFormat *fmt, AVFormatParameters *ap)
360 {
361 int err;
362 AVFormatContext *ic;
363 AVFormatParameters default_ap;
364
365 if(!ap){
366 ap=&default_ap;
367 memset(ap, 0, sizeof(default_ap));
368 }
369
370 if(!ap->prealloced_context)
371 ic = av_alloc_format_context();
372 else
373 ic = *ic_ptr;
374 if (!ic) {
375 err = AVERROR(ENOMEM);
376 goto fail;
377 }
378 ic->iformat = fmt;
379 ic->pb = pb;
380 ic->duration = AV_NOPTS_VALUE;
381 ic->start_time = AV_NOPTS_VALUE;
382 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
383
384 /* allocate private data */
385 if (fmt->priv_data_size > 0) {
386 ic->priv_data = av_mallocz(fmt->priv_data_size);
387 if (!ic->priv_data) {
388 err = AVERROR(ENOMEM);
389 goto fail;
390 }
391 } else {
392 ic->priv_data = NULL;
393 }
394
395 err = ic->iformat->read_header(ic, ap);
396 if (err < 0)
397 goto fail;
398
399 if (pb && !ic->data_offset)
400 ic->data_offset = url_ftell(ic->pb);
401
402 *ic_ptr = ic;
403 return 0;
404 fail:
405 if (ic) {
406 av_freep(&ic->priv_data);
407 }
408 av_free(ic);
409 *ic_ptr = NULL;
410 return err;
411 }
412
413 /** size of probe buffer, for guessing file type from file contents */
414 #define PROBE_BUF_MIN 2048
415 #define PROBE_BUF_MAX (1<<20)
416
417 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
418 AVInputFormat *fmt,
419 int buf_size,
420 AVFormatParameters *ap)
421 {
422 int err, probe_size;
423 AVProbeData probe_data, *pd = &probe_data;
424 ByteIOContext *pb = NULL;
425
426 pd->filename = "";
427 if (filename)
428 pd->filename = filename;
429 pd->buf = NULL;
430 pd->buf_size = 0;
431
432 if (!fmt) {
433 /* guess format if no file can be opened */
434 fmt = av_probe_input_format(pd, 0);
435 }
436
437 /* Do not open file if the format does not need it. XXX: specific
438 hack needed to handle RTSP/TCP */
439 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
440 /* if no file needed do not try to open one */
441 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
442 goto fail;
443 }
444 if (buf_size > 0) {
445 url_setbufsize(pb, buf_size);
446 }
447
448 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
449 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
450 /* read probe data */
451 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
452 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
453 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
454 if (url_fseek(pb, 0, SEEK_SET) < 0) {
455 url_fclose(pb);
456 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
457 pb = NULL;
458 err = AVERROR(EIO);
459 goto fail;
460 }
461 }
462 /* guess file format */
463 fmt = av_probe_input_format2(pd, 1, &score);
464 }
465 av_freep(&pd->buf);
466 }
467
468 /* if still no format found, error */
469 if (!fmt) {
470 err = AVERROR_NOFMT;
471 goto fail;
472 }
473
474 /* check filename in case an image number is expected */
475 if (fmt->flags & AVFMT_NEEDNUMBER) {
476 if (!av_filename_number_test(filename)) {
477 err = AVERROR_NUMEXPECTED;
478 goto fail;
479 }
480 }
481 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
482 if (err)
483 goto fail;
484 return 0;
485 fail:
486 av_freep(&pd->buf);
487 if (pb)
488 url_fclose(pb);
489 *ic_ptr = NULL;
490 return err;
491
492 }
493
494 /*******************************************************/
495
496 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
497 {
498 int ret;
499 AVStream *st;
500 av_init_packet(pkt);
501 ret= s->iformat->read_packet(s, pkt);
502 if (ret < 0)
503 return ret;
504 st= s->streams[pkt->stream_index];
505
506 switch(st->codec->codec_type){
507 case CODEC_TYPE_VIDEO:
508 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
509 break;
510 case CODEC_TYPE_AUDIO:
511 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
512 break;
513 case CODEC_TYPE_SUBTITLE:
514 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
515 break;
516 }
517
518 return ret;
519 }
520
521 /**********************************************************/
522
523 /**
524 * Get the number of samples of an audio frame. Return -1 on error.
525 */
526 static int get_audio_frame_size(AVCodecContext *enc, int size)
527 {
528 int frame_size;
529
530 if (enc->frame_size <= 1) {
531 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
532
533 if (bits_per_sample) {
534 if (enc->channels == 0)
535 return -1;
536 frame_size = (size << 3) / (bits_per_sample * enc->channels);
537 } else {
538 /* used for example by ADPCM codecs */
539 if (enc->bit_rate == 0)
540 return -1;
541 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
542 }
543 } else {
544 frame_size = enc->frame_size;
545 }
546 return frame_size;
547 }
548
549
550 /**
551 * Return the frame duration in seconds. Return 0 if not available.
552 */
553 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
554 AVCodecParserContext *pc, AVPacket *pkt)
555 {
556 int frame_size;
557
558 *pnum = 0;
559 *pden = 0;
560 switch(st->codec->codec_type) {
561 case CODEC_TYPE_VIDEO:
562 if(st->time_base.num*1000LL > st->time_base.den){
563 *pnum = st->time_base.num;
564 *pden = st->time_base.den;
565 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
566 *pnum = st->codec->time_base.num;
567 *pden = st->codec->time_base.den;
568 if (pc && pc->repeat_pict) {
569 *pden *= 2;
570 *pnum = (*pnum) * (2 + pc->repeat_pict);
571 }
572 }
573 break;
574 case CODEC_TYPE_AUDIO:
575 frame_size = get_audio_frame_size(st->codec, pkt->size);
576 if (frame_size < 0)
577 break;
578 *pnum = frame_size;
579 *pden = st->codec->sample_rate;
580 break;
581 default:
582 break;
583 }
584 }
585
586 static int is_intra_only(AVCodecContext *enc){
587 if(enc->codec_type == CODEC_TYPE_AUDIO){
588 return 1;
589 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
590 switch(enc->codec_id){
591 case CODEC_ID_MJPEG:
592 case CODEC_ID_MJPEGB:
593 case CODEC_ID_LJPEG:
594 case CODEC_ID_RAWVIDEO:
595 case CODEC_ID_DVVIDEO:
596 case CODEC_ID_HUFFYUV:
597 case CODEC_ID_FFVHUFF:
598 case CODEC_ID_ASV1:
599 case CODEC_ID_ASV2:
600 case CODEC_ID_VCR1:
601 return 1;
602 default: break;
603 }
604 }
605 return 0;
606 }
607
608 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
609 int64_t dts, int64_t pts)
610 {
611 AVStream *st= s->streams[stream_index];
612 AVPacketList *pktl= s->packet_buffer;
613
614 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE)
615 return;
616
617 st->first_dts= dts - st->cur_dts;
618 st->cur_dts= dts;
619
620 for(; pktl; pktl= pktl->next){
621 if(pktl->pkt.stream_index != stream_index)
622 continue;
623 //FIXME think more about this check
624 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
625 pktl->pkt.pts += st->first_dts;
626
627 if(pktl->pkt.dts != AV_NOPTS_VALUE)
628 pktl->pkt.dts += st->first_dts;
629
630 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
631 st->start_time= pktl->pkt.pts;
632 }
633 if (st->start_time == AV_NOPTS_VALUE)
634 st->start_time = pts;
635 }
636
637 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
638 {
639 AVPacketList *pktl= s->packet_buffer;
640
641 assert(pkt->duration && !st->cur_dts);
642
643 for(; pktl; pktl= pktl->next){
644 if(pktl->pkt.stream_index != pkt->stream_index)
645 continue;
646 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
647 && !pktl->pkt.duration){
648 pktl->pkt.pts= pktl->pkt.dts= st->cur_dts;
649 st->cur_dts += pkt->duration;
650 pktl->pkt.duration= pkt->duration;
651 }else
652 break;
653 }
654 }
655
656 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
657 AVCodecParserContext *pc, AVPacket *pkt)
658 {
659 int num, den, presentation_delayed, delay, i;
660 int64_t offset;
661
662 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
663 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
664 pkt->dts -= 1LL<<st->pts_wrap_bits;
665 }
666
667 if (pkt->duration == 0) {
668 compute_frame_duration(&num, &den, st, pc, pkt);
669 if (den && num) {
670 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
671
672 if(st->cur_dts == 0 && pkt->duration != 0)
673 update_initial_durations(s, st, pkt);
674 }
675 }
676
677 /* correct timestamps with byte offset if demuxers only have timestamps
678 on packet boundaries */
679 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
680 /* this will estimate bitrate based on this frame's duration and size */
681 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
682 if(pkt->pts != AV_NOPTS_VALUE)
683 pkt->pts += offset;
684 if(pkt->dts != AV_NOPTS_VALUE)
685 pkt->dts += offset;
686 }
687
688 /* do we have a video B-frame ? */
689 delay= st->codec->has_b_frames;
690 presentation_delayed = 0;
691 /* XXX: need has_b_frame, but cannot get it if the codec is
692 not initialized */
693 if (delay &&
694 pc && pc->pict_type != FF_B_TYPE)
695 presentation_delayed = 1;
696 /* This may be redundant, but it should not hurt. */
697 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
698 presentation_delayed = 1;
699
700 if(st->cur_dts == AV_NOPTS_VALUE){
701 st->cur_dts = 0; //FIXME maybe set it to 0 during init
702 }
703
704 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
705 /* interpolate PTS and DTS if they are not present */
706 if(delay <=1){
707 if (presentation_delayed) {
708 /* DTS = decompression timestamp */
709 /* PTS = presentation timestamp */
710 if (pkt->dts == AV_NOPTS_VALUE)
711 pkt->dts = st->last_IP_pts;
712 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
713 if (pkt->dts == AV_NOPTS_VALUE)
714 pkt->dts = st->cur_dts;
715
716 /* this is tricky: the dts must be incremented by the duration
717 of the frame we are displaying, i.e. the last I- or P-frame */
718 if (st->last_IP_duration == 0)
719 st->last_IP_duration = pkt->duration;
720 st->cur_dts = pkt->dts + st->last_IP_duration;
721 st->last_IP_duration = pkt->duration;
722 st->last_IP_pts= pkt->pts;
723 /* cannot compute PTS if not present (we can compute it only
724 by knowing the future */
725 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
726 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
727 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
728 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
729 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
730 pkt->pts += pkt->duration;
731 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
732 }
733 }
734
735 /* presentation is not delayed : PTS and DTS are the same */
736 if(pkt->pts == AV_NOPTS_VALUE)
737 pkt->pts = pkt->dts;
738 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
739 if(pkt->pts == AV_NOPTS_VALUE)
740 pkt->pts = st->cur_dts;
741 pkt->dts = pkt->pts;
742 st->cur_dts = pkt->pts + pkt->duration;
743 }
744 }
745
746 if(pkt->pts != AV_NOPTS_VALUE){
747 st->pts_buffer[0]= pkt->pts;
748 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
749 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
750 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
751 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
752 if(pkt->dts == AV_NOPTS_VALUE)
753 pkt->dts= st->pts_buffer[0];
754 if(delay>1){
755 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
756 }
757 if(pkt->dts > st->cur_dts)
758 st->cur_dts = pkt->dts;
759 }
760
761 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
762
763 /* update flags */
764 if(is_intra_only(st->codec))
765 pkt->flags |= PKT_FLAG_KEY;
766 else if (pc) {
767 pkt->flags = 0;
768 /* keyframe computation */
769 if (pc->pict_type == FF_I_TYPE)
770 pkt->flags |= PKT_FLAG_KEY;
771 }
772 }
773
774 void av_destruct_packet_nofree(AVPacket *pkt)
775 {
776 pkt->data = NULL; pkt->size = 0;
777 }
778
779 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
780 {
781 AVStream *st;
782 int len, ret, i;
783
784 av_init_packet(pkt);
785
786 for(;;) {
787 /* select current input stream component */
788 st = s->cur_st;
789 if (st) {
790 if (!st->need_parsing || !st->parser) {
791 /* no parsing needed: we just output the packet as is */
792 /* raw data support */
793 *pkt = s->cur_pkt;
794 compute_pkt_fields(s, st, NULL, pkt);
795 s->cur_st = NULL;
796 break;
797 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
798 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
799 s->cur_ptr, s->cur_len,
800 s->cur_pkt.pts, s->cur_pkt.dts);
801 s->cur_pkt.pts = AV_NOPTS_VALUE;
802 s->cur_pkt.dts = AV_NOPTS_VALUE;
803 /* increment read pointer */
804 s->cur_ptr += len;
805 s->cur_len -= len;
806
807 /* return packet if any */
808 if (pkt->size) {
809 got_packet:
810 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
811 pkt->duration = 0;
812 pkt->stream_index = st->index;
813 pkt->pts = st->parser->pts;
814 pkt->dts = st->parser->dts;
815 pkt->destruct = av_destruct_packet_nofree;
816 compute_pkt_fields(s, st, st->parser, pkt);
817
818 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
819 ff_reduce_index(s, st->index);
820 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
821 0, 0, AVINDEX_KEYFRAME);
822 }
823
824 break;
825 }
826 } else {
827 /* free packet */
828 av_free_packet(&s->cur_pkt);
829 s->cur_st = NULL;
830 }
831 } else {
832 /* read next packet */
833 ret = av_read_packet(s, &s->cur_pkt);
834 if (ret < 0) {
835 if (ret == AVERROR(EAGAIN))
836 return ret;
837 /* return the last frames, if any */
838 for(i = 0; i < s->nb_streams; i++) {
839 st = s->streams[i];
840 if (st->parser && st->need_parsing) {
841 av_parser_parse(st->parser, st->codec,
842 &pkt->data, &pkt->size,
843 NULL, 0,
844 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
845 if (pkt->size)
846 goto got_packet;
847 }
848 }
849 /* no more packets: really terminate parsing */
850 return ret;
851 }
852
853 st = s->streams[s->cur_pkt.stream_index];
854 if(st->codec->debug & FF_DEBUG_PTS)
855 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
856 s->cur_pkt.stream_index,
857 s->cur_pkt.pts,
858 s->cur_pkt.dts,
859 s->cur_pkt.size);
860
861 s->cur_st = st;
862 s->cur_ptr = s->cur_pkt.data;
863 s->cur_len = s->cur_pkt.size;
864 if (st->need_parsing && !st->parser) {
865 st->parser = av_parser_init(st->codec->codec_id);
866 if (!st->parser) {
867 /* no parser available: just output the raw packets */
868 st->need_parsing = AVSTREAM_PARSE_NONE;
869 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
870 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
871 }
872 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
873 st->parser->last_frame_offset=
874 st->parser->cur_offset= s->cur_pkt.pos;
875 }
876 }
877 }
878 }
879 if(st->codec->debug & FF_DEBUG_PTS)
880 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
881 pkt->stream_index,
882 pkt->pts,
883 pkt->dts,
884 pkt->size);
885
886 return 0;
887 }
888
889 static AVPacket *add_to_pktbuf(AVFormatContext *s, AVPacket *pkt){
890 AVPacketList *pktl= s->packet_buffer;
891 AVPacketList **plast_pktl= &s->packet_buffer;
892
893 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
894
895 pktl = av_mallocz(sizeof(AVPacketList));
896 if (!pktl)
897 return NULL;
898
899 /* add the packet in the buffered packet list */
900 *plast_pktl = pktl;
901 pktl->pkt= *pkt;
902 return &pktl->pkt;
903 }
904
905 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
906 {
907 AVPacketList *pktl;
908 int eof=0;
909 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
910
911 for(;;){
912 pktl = s->packet_buffer;
913 if (pktl) {
914 AVPacket *next_pkt= &pktl->pkt;
915
916 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
917 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
918 if( pktl->pkt.stream_index == next_pkt->stream_index
919 && next_pkt->dts < pktl->pkt.dts
920 && pktl->pkt.pts != pktl->pkt.dts //not b frame
921 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
922 next_pkt->pts= pktl->pkt.dts;
923 }
924 pktl= pktl->next;
925 }
926 pktl = s->packet_buffer;
927 }
928
929 if( next_pkt->pts != AV_NOPTS_VALUE
930 || next_pkt->dts == AV_NOPTS_VALUE
931 || !genpts || eof){
932 /* read packet from packet buffer, if there is data */
933 *pkt = *next_pkt;
934 s->packet_buffer = pktl->next;
935 av_free(pktl);
936 return 0;
937 }
938 }
939 if(genpts){
940 int ret= av_read_frame_internal(s, pkt);
941 if(ret<0){
942 if(pktl && ret != AVERROR(EAGAIN)){
943 eof=1;
944 continue;
945 }else
946 return ret;
947 }
948
949 if(av_dup_packet(add_to_pktbuf(s, pkt)) < 0)
950 return AVERROR(ENOMEM);
951 }else{
952 assert(!s->packet_buffer);
953 return av_read_frame_internal(s, pkt);
954 }
955 }
956 }
957
958 /* XXX: suppress the packet queue */
959 static void flush_packet_queue(AVFormatContext *s)
960 {
961 AVPacketList *pktl;
962
963 for(;;) {
964 pktl = s->packet_buffer;
965 if (!pktl)
966 break;
967 s->packet_buffer = pktl->next;
968 av_free_packet(&pktl->pkt);
969 av_free(pktl);
970 }
971 }
972
973 /*******************************************************/
974 /* seek support */
975
976 int av_find_default_stream_index(AVFormatContext *s)
977 {
978 int i;
979 AVStream *st;
980
981 if (s->nb_streams <= 0)
982 return -1;
983 for(i = 0; i < s->nb_streams; i++) {
984 st = s->streams[i];
985 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
986 return i;
987 }
988 }
989 return 0;
990 }
991
992 /**
993 * Flush the frame reader.
994 */
995 static void av_read_frame_flush(AVFormatContext *s)
996 {
997 AVStream *st;
998 int i;
999
1000 flush_packet_queue(s);
1001
1002 /* free previous packet */
1003 if (s->cur_st) {
1004 if (s->cur_st->parser)
1005 av_free_packet(&s->cur_pkt);
1006 s->cur_st = NULL;
1007 }
1008 /* fail safe */
1009 s->cur_ptr = NULL;
1010 s->cur_len = 0;
1011
1012 /* for each stream, reset read state */
1013 for(i = 0; i < s->nb_streams; i++) {
1014 st = s->streams[i];
1015
1016 if (st->parser) {
1017 av_parser_close(st->parser);
1018 st->parser = NULL;
1019 }
1020 st->last_IP_pts = AV_NOPTS_VALUE;
1021 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1022 }
1023 }
1024
1025 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1026 int i;
1027
1028 for(i = 0; i < s->nb_streams; i++) {
1029 AVStream *st = s->streams[i];
1030
1031 st->cur_dts = av_rescale(timestamp,
1032 st->time_base.den * (int64_t)ref_st->time_base.num,
1033 st->time_base.num * (int64_t)ref_st->time_base.den);
1034 }
1035 }
1036
1037 void ff_reduce_index(AVFormatContext *s, int stream_index)
1038 {
1039 AVStream *st= s->streams[stream_index];
1040 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1041
1042 if((unsigned)st->nb_index_entries >= max_entries){
1043 int i;
1044 for(i=0; 2*i<st->nb_index_entries; i++)
1045 st->index_entries[i]= st->index_entries[2*i];
1046 st->nb_index_entries= i;
1047 }
1048 }
1049
1050 int av_add_index_entry(AVStream *st,
1051 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1052 {
1053 AVIndexEntry *entries, *ie;
1054 int index;
1055
1056 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1057 return -1;
1058
1059 entries = av_fast_realloc(st->index_entries,
1060 &st->index_entries_allocated_size,
1061 (st->nb_index_entries + 1) *
1062 sizeof(AVIndexEntry));
1063 if(!entries)
1064 return -1;
1065
1066 st->index_entries= entries;
1067
1068 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1069
1070 if(index<0){
1071 index= st->nb_index_entries++;
1072 ie= &entries[index];
1073 assert(index==0 || ie[-1].timestamp < timestamp);
1074 }else{
1075 ie= &entries[index];
1076 if(ie->timestamp != timestamp){
1077 if(ie->timestamp <= timestamp)
1078 return -1;
1079 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1080 st->nb_index_entries++;
1081 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1082 distance= ie->min_distance;
1083 }
1084
1085 ie->pos = pos;
1086 ie->timestamp = timestamp;
1087 ie->min_distance= distance;
1088 ie->size= size;
1089 ie->flags = flags;
1090
1091 return index;
1092 }
1093
1094 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1095 int flags)
1096 {
1097 AVIndexEntry *entries= st->index_entries;
1098 int nb_entries= st->nb_index_entries;
1099 int a, b, m;
1100 int64_t timestamp;
1101
1102 a = - 1;
1103 b = nb_entries;
1104
1105 while (b - a > 1) {
1106 m = (a + b) >> 1;
1107 timestamp = entries[m].timestamp;
1108 if(timestamp >= wanted_timestamp)
1109 b = m;
1110 if(timestamp <= wanted_timestamp)
1111 a = m;
1112 }
1113 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1114
1115 if(!(flags & AVSEEK_FLAG_ANY)){
1116 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1117 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1118 }
1119 }
1120
1121 if(m == nb_entries)
1122 return -1;
1123 return m;
1124 }
1125
1126 #define DEBUG_SEEK
1127
1128 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1129 AVInputFormat *avif= s->iformat;
1130 int64_t pos_min, pos_max, pos, pos_limit;
1131 int64_t ts_min, ts_max, ts;
1132 int index;
1133 AVStream *st;
1134
1135 if (stream_index < 0)
1136 return -1;
1137
1138 #ifdef DEBUG_SEEK
1139 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1140 #endif
1141
1142 ts_max=
1143 ts_min= AV_NOPTS_VALUE;
1144 pos_limit= -1; //gcc falsely says it may be uninitialized
1145
1146 st= s->streams[stream_index];
1147 if(st->index_entries){
1148 AVIndexEntry *e;
1149
1150 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1151 index= FFMAX(index, 0);
1152 e= &st->index_entries[index];
1153
1154 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1155 pos_min= e->pos;
1156 ts_min= e->timestamp;
1157 #ifdef DEBUG_SEEK
1158 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1159 pos_min,ts_min);
1160 #endif
1161 }else{
1162 assert(index==0);
1163 }
1164
1165 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1166 assert(index < st->nb_index_entries);
1167 if(index >= 0){
1168 e= &st->index_entries[index];
1169 assert(e->timestamp >= target_ts);
1170 pos_max= e->pos;
1171 ts_max= e->timestamp;
1172 pos_limit= pos_max - e->min_distance;
1173 #ifdef DEBUG_SEEK
1174 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1175 pos_max,pos_limit, ts_max);
1176 #endif
1177 }
1178 }
1179
1180 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1181 if(pos<0)
1182 return -1;
1183
1184 /* do the seek */
1185 url_fseek(s->pb, pos, SEEK_SET);
1186
1187 av_update_cur_dts(s, st, ts);
1188
1189 return 0;
1190 }
1191
1192 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1193 int64_t pos, ts;
1194 int64_t start_pos, filesize;
1195 int no_change;
1196
1197 #ifdef DEBUG_SEEK
1198 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1199 #endif
1200
1201 if(ts_min == AV_NOPTS_VALUE){
1202 pos_min = s->data_offset;
1203 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1204 if (ts_min == AV_NOPTS_VALUE)
1205 return -1;
1206 }
1207
1208 if(ts_max == AV_NOPTS_VALUE){
1209 int step= 1024;
1210 filesize = url_fsize(s->pb);
1211 pos_max = filesize - 1;
1212 do{
1213 pos_max -= step;
1214 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1215 step += step;
1216 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1217 if (ts_max == AV_NOPTS_VALUE)
1218 return -1;
1219
1220 for(;;){
1221 int64_t tmp_pos= pos_max + 1;
1222 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1223 if(tmp_ts == AV_NOPTS_VALUE)
1224 break;
1225 ts_max= tmp_ts;
1226 pos_max= tmp_pos;
1227 if(tmp_pos >= filesize)
1228 break;
1229 }
1230 pos_limit= pos_max;
1231 }
1232
1233 if(ts_min > ts_max){
1234 return -1;
1235 }else if(ts_min == ts_max){
1236 pos_limit= pos_min;
1237 }
1238
1239 no_change=0;
1240 while (pos_min < pos_limit) {
1241 #ifdef DEBUG_SEEK
1242 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1243 pos_min, pos_max,
1244 ts_min, ts_max);
1245 #endif
1246 assert(pos_limit <= pos_max);
1247
1248 if(no_change==0){
1249 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1250 // interpolate position (better than dichotomy)
1251 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1252 + pos_min - approximate_keyframe_distance;
1253 }else if(no_change==1){
1254 // bisection, if interpolation failed to change min or max pos last time
1255 pos = (pos_min + pos_limit)>>1;
1256 }else{
1257 /* linear search if bisection failed, can only happen if there
1258 are very few or no keyframes between min/max */
1259 pos=pos_min;
1260 }
1261 if(pos <= pos_min)
1262 pos= pos_min + 1;
1263 else if(pos > pos_limit)
1264 pos= pos_limit;
1265 start_pos= pos;
1266
1267 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1268 if(pos == pos_max)
1269 no_change++;
1270 else
1271 no_change=0;
1272 #ifdef DEBUG_SEEK
1273 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1274 #endif
1275 if(ts == AV_NOPTS_VALUE){
1276 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1277 return -1;
1278 }
1279 assert(ts != AV_NOPTS_VALUE);
1280 if (target_ts <= ts) {
1281 pos_limit = start_pos - 1;
1282 pos_max = pos;
1283 ts_max = ts;
1284 }
1285 if (target_ts >= ts) {
1286 pos_min = pos;
1287 ts_min = ts;
1288 }
1289 }
1290
1291 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1292 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1293 #ifdef DEBUG_SEEK
1294 pos_min = pos;
1295 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1296 pos_min++;
1297 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1298 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1299 pos, ts_min, target_ts, ts_max);
1300 #endif
1301 *ts_ret= ts;
1302 return pos;
1303 }
1304
1305 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1306 int64_t pos_min, pos_max;
1307 #if 0
1308 AVStream *st;
1309
1310 if (stream_index < 0)
1311 return -1;
1312
1313 st= s->streams[stream_index];
1314 #endif
1315
1316 pos_min = s->data_offset;
1317 pos_max = url_fsize(s->pb) - 1;
1318
1319 if (pos < pos_min) pos= pos_min;
1320 else if(pos > pos_max) pos= pos_max;
1321
1322 url_fseek(s->pb, pos, SEEK_SET);
1323
1324 #if 0
1325 av_update_cur_dts(s, st, ts);
1326 #endif
1327 return 0;
1328 }
1329
1330 static int av_seek_frame_generic(AVFormatContext *s,
1331 int stream_index, int64_t timestamp, int flags)
1332 {
1333 int index;
1334 AVStream *st;
1335 AVIndexEntry *ie;
1336
1337 st = s->streams[stream_index];
1338
1339 index = av_index_search_timestamp(st, timestamp, flags);
1340
1341 if(index < 0 || index==st->nb_index_entries-1){
1342 int i;
1343 AVPacket pkt;
1344
1345 if(st->index_entries && st->nb_index_entries){
1346 ie= &st->index_entries[st->nb_index_entries-1];
1347 url_fseek(s->pb, ie->pos, SEEK_SET);
1348 av_update_cur_dts(s, st, ie->timestamp);
1349 }else
1350 url_fseek(s->pb, 0, SEEK_SET);
1351
1352 for(i=0;; i++) {
1353 int ret = av_read_frame(s, &pkt);
1354 if(ret<0)
1355 break;
1356 av_free_packet(&pkt);
1357 if(stream_index == pkt.stream_index){
1358 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1359 break;
1360 }
1361 }
1362 index = av_index_search_timestamp(st, timestamp, flags);
1363 }
1364 if (index < 0)
1365 return -1;
1366
1367 av_read_frame_flush(s);
1368 if (s->iformat->read_seek){
1369 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1370 return 0;
1371 }
1372 ie = &st->index_entries[index];
1373 url_fseek(s->pb, ie->pos, SEEK_SET);
1374
1375 av_update_cur_dts(s, st, ie->timestamp);
1376
1377 return 0;
1378 }
1379
1380 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1381 {
1382 int ret;
1383 AVStream *st;
1384
1385 av_read_frame_flush(s);
1386
1387 if(flags & AVSEEK_FLAG_BYTE)
1388 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1389
1390 if(stream_index < 0){
1391 stream_index= av_find_default_stream_index(s);
1392 if(stream_index < 0)
1393 return -1;
1394
1395 st= s->streams[stream_index];
1396 /* timestamp for default must be expressed in AV_TIME_BASE units */
1397 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1398 }
1399 st= s->streams[stream_index];
1400
1401 /* first, we try the format specific seek */
1402 if (s->iformat->read_seek)
1403 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1404 else
1405 ret = -1;
1406 if (ret >= 0) {
1407 return 0;
1408 }
1409
1410 if(s->iformat->read_timestamp)
1411 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1412 else
1413 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1414 }
1415
1416 /*******************************************************/
1417
1418 /**
1419 * Returns TRUE if the stream has accurate duration in any stream.
1420 *
1421 * @return TRUE if the stream has accurate duration for at least one component.
1422 */
1423 static int av_has_duration(AVFormatContext *ic)
1424 {
1425 int i;
1426 AVStream *st;
1427
1428 for(i = 0;i < ic->nb_streams; i++) {
1429 st = ic->streams[i];
1430 if (st->duration != AV_NOPTS_VALUE)
1431 return 1;
1432 }
1433 return 0;
1434 }
1435
1436 /**
1437 * Estimate the stream timings from the one of each components.
1438 *
1439 * Also computes the global bitrate if possible.
1440 */
1441 static void av_update_stream_timings(AVFormatContext *ic)
1442 {
1443 int64_t start_time, start_time1, end_time, end_time1;
1444 int64_t duration, duration1;
1445 int i;
1446 AVStream *st;
1447
1448 start_time = INT64_MAX;
1449 end_time = INT64_MIN;
1450 duration = INT64_MIN;
1451 for(i = 0;i < ic->nb_streams; i++) {
1452 st = ic->streams[i];
1453 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1454 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1455 if (start_time1 < start_time)
1456 start_time = start_time1;
1457 if (st->duration != AV_NOPTS_VALUE) {
1458 end_time1 = start_time1
1459 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1460 if (end_time1 > end_time)
1461 end_time = end_time1;
1462 }
1463 }
1464 if (st->duration != AV_NOPTS_VALUE) {
1465 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1466 if (duration1 > duration)
1467 duration = duration1;
1468 }
1469 }
1470 if (start_time != INT64_MAX) {
1471 ic->start_time = start_time;
1472 if (end_time != INT64_MIN) {
1473 if (end_time - start_time > duration)
1474 duration = end_time - start_time;
1475 }
1476 }
1477 if (duration != INT64_MIN) {
1478 ic->duration = duration;
1479 if (ic->file_size > 0) {
1480 /* compute the bitrate */
1481 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1482 (double)ic->duration;
1483 }
1484 }
1485 }
1486
1487 static void fill_all_stream_timings(AVFormatContext *ic)
1488 {
1489 int i;
1490 AVStream *st;
1491
1492 av_update_stream_timings(ic);
1493 for(i = 0;i < ic->nb_streams; i++) {
1494 st = ic->streams[i];
1495 if (st->start_time == AV_NOPTS_VALUE) {
1496 if(ic->start_time != AV_NOPTS_VALUE)
1497 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1498 if(ic->duration != AV_NOPTS_VALUE)
1499 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1500 }
1501 }
1502 }
1503
1504 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1505 {
1506 int64_t filesize, duration;
1507 int bit_rate, i;
1508 AVStream *st;
1509
1510 /* if bit_rate is already set, we believe it */
1511 if (ic->bit_rate == 0) {
1512 bit_rate = 0;
1513 for(i=0;i<ic->nb_streams;i++) {
1514 st = ic->streams[i];
1515 bit_rate += st->codec->bit_rate;
1516 }
1517 ic->bit_rate = bit_rate;
1518 }
1519
1520 /* if duration is already set, we believe it */
1521 if (ic->duration == AV_NOPTS_VALUE &&
1522 ic->bit_rate != 0 &&
1523 ic->file_size != 0) {
1524 filesize = ic->file_size;
1525 if (filesize > 0) {
1526 for(i = 0; i < ic->nb_streams; i++) {
1527 st = ic->streams[i];
1528 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1529 if (st->duration == AV_NOPTS_VALUE)
1530 st->duration = duration;
1531 }
1532 }
1533 }
1534 }
1535
1536 #define DURATION_MAX_READ_SIZE 250000
1537
1538 /* only usable for MPEG-PS streams */
1539 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1540 {
1541 AVPacket pkt1, *pkt = &pkt1;
1542 AVStream *st;
1543 int read_size, i, ret;
1544 int64_t end_time;
1545 int64_t filesize, offset, duration;
1546
1547 /* free previous packet */
1548 if (ic->cur_st && ic->cur_st->parser)
1549 av_free_packet(&ic->cur_pkt);
1550 ic->cur_st = NULL;
1551
1552 /* flush packet queue */
1553 flush_packet_queue(ic);
1554
1555 for(i=0;i<ic->nb_streams;i++) {
1556 st = ic->streams[i];
1557 if (st->parser) {
1558 av_parser_close(st->parser);
1559 st->parser= NULL;
1560 }
1561 }
1562
1563 /* we read the first packets to get the first PTS (not fully
1564 accurate, but it is enough now) */
1565 url_fseek(ic->pb, 0, SEEK_SET);
1566 read_size = 0;
1567 for(;;) {
1568 if (read_size >= DURATION_MAX_READ_SIZE)
1569 break;
1570 /* if all info is available, we can stop */
1571 for(i = 0;i < ic->nb_streams; i++) {
1572 st = ic->streams[i];
1573 if (st->start_time == AV_NOPTS_VALUE)
1574 break;
1575 }
1576 if (i == ic->nb_streams)
1577 break;
1578
1579 ret = av_read_packet(ic, pkt);
1580 if (ret != 0)
1581 break;
1582 read_size += pkt->size;
1583 st = ic->streams[pkt->stream_index];
1584 if (pkt->pts != AV_NOPTS_VALUE) {
1585 if (st->start_time == AV_NOPTS_VALUE)
1586 st->start_time = pkt->pts;
1587 }
1588 av_free_packet(pkt);
1589 }
1590
1591 /* estimate the end time (duration) */
1592 /* XXX: may need to support wrapping */
1593 filesize = ic->file_size;
1594 offset = filesize - DURATION_MAX_READ_SIZE;
1595 if (offset < 0)
1596 offset = 0;
1597
1598 url_fseek(ic->pb, offset, SEEK_SET);
1599 read_size = 0;
1600 for(;;) {
1601 if (read_size >= DURATION_MAX_READ_SIZE)
1602 break;
1603
1604 ret = av_read_packet(ic, pkt);
1605 if (ret != 0)
1606 break;
1607 read_size += pkt->size;
1608 st = ic->streams[pkt->stream_index];
1609 if (pkt->pts != AV_NOPTS_VALUE &&
1610 st->start_time != AV_NOPTS_VALUE) {
1611 end_time = pkt->pts;
1612 duration = end_time - st->start_time;
1613 if (duration > 0) {
1614 if (st->duration == AV_NOPTS_VALUE ||
1615 st->duration < duration)
1616 st->duration = duration;
1617 }
1618 }
1619 av_free_packet(pkt);
1620 }
1621
1622 fill_all_stream_timings(ic);
1623
1624 url_fseek(ic->pb, old_offset, SEEK_SET);
1625 for(i=0; i<ic->nb_streams; i++){
1626 st= ic->streams[i];
1627 st->cur_dts= st->first_dts;
1628 st->last_IP_pts = AV_NOPTS_VALUE;
1629 }
1630 }
1631
1632 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1633 {
1634 int64_t file_size;
1635
1636 /* get the file size, if possible */
1637 if (ic->iformat->flags & AVFMT_NOFILE) {
1638 file_size = 0;
1639 } else {
1640 file_size = url_fsize(ic->pb);
1641 if (file_size < 0)
1642 file_size = 0;
1643 }
1644 ic->file_size = file_size;
1645
1646 if ((!strcmp(ic->iformat->name, "mpeg") ||
1647 !strcmp(ic->iformat->name, "mpegts")) &&
1648 file_size && !url_is_streamed(ic->pb)) {
1649 /* get accurate estimate from the PTSes */
1650 av_estimate_timings_from_pts(ic, old_offset);
1651 } else if (av_has_duration(ic)) {
1652 /* at least one component has timings - we use them for all
1653 the components */
1654 fill_all_stream_timings(ic);
1655 } else {
1656 /* less precise: use bitrate info */
1657 av_estimate_timings_from_bit_rate(ic);
1658 }
1659 av_update_stream_timings(ic);
1660
1661 #if 0
1662 {
1663 int i;
1664 AVStream *st;
1665 for(i = 0;i < ic->nb_streams; i++) {
1666 st = ic->streams[i];
1667 printf("%d: start_time: %0.3f duration: %0.3f\n",
1668 i, (double)st->start_time / AV_TIME_BASE,
1669 (double)st->duration / AV_TIME_BASE);
1670 }
1671 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1672 (double)ic->start_time / AV_TIME_BASE,
1673 (double)ic->duration / AV_TIME_BASE,
1674 ic->bit_rate / 1000);
1675 }
1676 #endif
1677 }
1678
1679 static int has_codec_parameters(AVCodecContext *enc)
1680 {
1681 int val;
1682 switch(enc->codec_type) {
1683 case CODEC_TYPE_AUDIO:
1684 val = enc->sample_rate;
1685 break;
1686 case CODEC_TYPE_VIDEO:
1687 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1688 break;
1689 default:
1690 val = 1;
1691 break;
1692 }
1693 return enc->codec_id != CODEC_ID_NONE && val != 0;
1694 }
1695
1696 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1697 {
1698 int16_t *samples;
1699 AVCodec *codec;
1700 int got_picture, data_size, ret=0;
1701 AVFrame picture;
1702
1703 if(!st->codec->codec){
1704 codec = avcodec_find_decoder(st->codec->codec_id);
1705 if (!codec)
1706 return -1;
1707 ret = avcodec_open(st->codec, codec);
1708 if (ret < 0)
1709 return ret;
1710 }
1711
1712 if(!has_codec_parameters(st->codec)){
1713 switch(st->codec->codec_type) {
1714 case CODEC_TYPE_VIDEO:
1715 ret = avcodec_decode_video(st->codec, &picture,
1716 &got_picture, data, size);
1717 break;
1718 case CODEC_TYPE_AUDIO:
1719 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1720 samples = av_malloc(data_size);
1721 if (!samples)
1722 goto fail;
1723 ret = avcodec_decode_audio2(st->codec, samples,
1724 &data_size, data, size);
1725 av_free(samples);
1726 break;
1727 default:
1728 break;
1729 }
1730 }
1731 fail:
1732 return ret;
1733 }
1734
1735 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
1736 {
1737 AVInputFormat *fmt;
1738 fmt = av_probe_input_format2(pd, 1, &score);
1739
1740 if (fmt) {
1741 if (strncmp(fmt->name, "mp3", 3) == 0)
1742 st->codec->codec_id = CODEC_ID_MP3;
1743 else if (strncmp(fmt->name, "ac3", 3) == 0)
1744 st->codec->codec_id = CODEC_ID_AC3;
1745 }
1746 return !!fmt;
1747 }
1748
1749 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1750 {
1751 while (tags->id != CODEC_ID_NONE) {
1752 if (tags->id == id)
1753 return tags->tag;
1754 tags++;
1755 }
1756 return 0;
1757 }
1758
1759 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1760 {
1761 int i;
1762 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1763 if(tag == tags[i].tag)
1764 return tags[i].id;
1765 }
1766 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1767 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1768 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1769 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1770 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1771 return tags[i].id;
1772 }
1773 return CODEC_ID_NONE;
1774 }
1775
1776 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1777 {
1778 int i;
1779 for(i=0; tags && tags[i]; i++){
1780 int tag= codec_get_tag(tags[i], id);
1781 if(tag) return tag;
1782 }
1783 return 0;
1784 }
1785
1786 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1787 {
1788 int i;
1789 for(i=0; tags && tags[i]; i++){
1790 enum CodecID id= codec_get_id(tags[i], tag);
1791 if(id!=CODEC_ID_NONE) return id;
1792 }
1793 return CODEC_ID_NONE;
1794 }
1795
1796 /* absolute maximum size we read until we abort */
1797 #define MAX_READ_SIZE 5000000
1798
1799 #define MAX_STD_TIMEBASES (60*12+5)
1800 static int get_std_framerate(int i){
1801 if(i<60*12) return i*1001;
1802 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1803 }
1804
1805 /*
1806 * Is the time base unreliable.
1807 * This is a heuristic to balance between quick acceptance of the values in
1808 * the headers vs. some extra checks.
1809 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1810 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1811 * And there are "variable" fps files this needs to detect as well.
1812 */
1813 static int tb_unreliable(AVCodecContext *c){
1814 if( c->time_base.den >= 101L*c->time_base.num
1815 || c->time_base.den < 5L*c->time_base.num
1816 /* || c->codec_tag == ff_get_fourcc("DIVX")
1817 || c->codec_tag == ff_get_fourcc("XVID")*/
1818 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1819 return 1;
1820 return 0;
1821 }
1822
1823 int av_find_stream_info(AVFormatContext *ic)
1824 {
1825 int i, count, ret, read_size, j;
1826 AVStream *st;
1827 AVPacket pkt1, *pkt;
1828 int64_t last_dts[MAX_STREAMS];
1829 int duration_count[MAX_STREAMS]={0};
1830 double (*duration_error)[MAX_STD_TIMEBASES];
1831 offset_t old_offset = url_ftell(ic->pb);
1832 int64_t codec_info_duration[MAX_STREAMS]={0};
1833 int codec_info_nb_frames[MAX_STREAMS]={0};
1834 AVProbeData probe_data[MAX_STREAMS];
1835 int codec_identified[MAX_STREAMS]={0};
1836
1837 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1838 if (!duration_error) return AVERROR(ENOMEM);
1839
1840 for(i=0;i<ic->nb_streams;i++) {
1841 st = ic->streams[i];
1842 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1843 /* if(!st->time_base.num)
1844 st->time_base= */
1845 if(!st->codec->time_base.num)
1846 st->codec->time_base= st->time_base;
1847 }
1848 //only for the split stuff
1849 if (!st->parser) {
1850 st->parser = av_parser_init(st->codec->codec_id);
1851 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1852 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1853 }
1854 }
1855 }
1856
1857 for(i=0;i<MAX_STREAMS;i++){
1858 last_dts[i]= AV_NOPTS_VALUE;
1859 }
1860
1861 memset(probe_data, 0, sizeof(probe_data));
1862 count = 0;
1863 read_size = 0;
1864 for(;;) {
1865 /* check if one codec still needs to be handled */
1866 for(i=0;i<ic->nb_streams;i++) {
1867 st = ic->streams[i];
1868 if (!has_codec_parameters(st->codec))
1869 break;
1870 /* variable fps and no guess at the real fps */
1871 if( tb_unreliable(st->codec)
1872 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1873 break;
1874 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1875 break;
1876 if(st->first_dts == AV_NOPTS_VALUE)
1877 break;
1878 }
1879 if (i == ic->nb_streams) {
1880 /* NOTE: if the format has no header, then we need to read
1881 some packets to get most of the streams, so we cannot
1882 stop here */
1883 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1884 /* if we found the info for all the codecs, we can stop */
1885 ret = count;
1886 break;
1887 }
1888 }
1889 /* we did not get all the codec info, but we read too much data */
1890 if (read_size >= MAX_READ_SIZE) {
1891 ret = count;
1892 break;
1893 }
1894
1895 /* NOTE: a new stream can be added there if no header in file
1896 (AVFMTCTX_NOHEADER) */
1897 ret = av_read_frame_internal(ic, &pkt1);
1898 if (ret < 0) {
1899 /* EOF or error */
1900 ret = -1; /* we could not have all the codec parameters before EOF */
1901 for(i=0;i<ic->nb_streams;i++) {
1902 st = ic->streams[i];
1903 if (!has_codec_parameters(st->codec)){
1904 char buf[256];
1905 avcodec_string(buf, sizeof(buf), st->codec, 0);
1906 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1907 } else {
1908 ret = 0;
1909 }
1910 }
1911 break;
1912 }
1913
1914 pkt= add_to_pktbuf(ic, &pkt1);
1915 if(av_dup_packet(pkt) < 0)
1916 return AVERROR(ENOMEM);
1917
1918 read_size += pkt->size;
1919
1920 st = ic->streams[pkt->stream_index];
1921 if(codec_info_nb_frames[st->index]>1)
1922 codec_info_duration[st->index] += pkt->duration;
1923 if (pkt->duration != 0)
1924 codec_info_nb_frames[st->index]++;
1925
1926 {
1927 int index= pkt->stream_index;
1928 int64_t last= last_dts[index];
1929 int64_t duration= pkt->dts - last;
1930
1931 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1932 double dur= duration * av_q2d(st->time_base);
1933
1934 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1935 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1936 if(duration_count[index] < 2)
1937 memset(duration_error[index], 0, sizeof(*duration_error));
1938 for(i=1; i<MAX_STD_TIMEBASES; i++){
1939 int framerate= get_std_framerate(i);
1940 int ticks= lrintf(dur*framerate/(1001*12));
1941 double error= dur - ticks*1001*12/(double)framerate;
1942 duration_error[index][i] += error*error;
1943 }
1944 duration_count[index]++;
1945 }
1946 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1947 last_dts[pkt->stream_index]= pkt->dts;
1948
1949 if (st->codec->codec_id == CODEC_ID_NONE) {
1950 AVProbeData *pd = &(probe_data[st->index]);
1951 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
1952 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
1953 pd->buf_size += pkt->size;
1954 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
1955 }
1956 }
1957 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1958 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1959 if(i){
1960 st->codec->extradata_size= i;
1961 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1962 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1963 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1964 }
1965 }
1966
1967 /* if still no information, we try to open the codec and to
1968 decompress the frame. We try to avoid that in most cases as
1969 it takes longer and uses more memory. For MPEG-4, we need to
1970 decompress for QuickTime. */
1971 if (!has_codec_parameters(st->codec) /*&&
1972 (st->codec->codec_id == CODEC_ID_FLV1 ||
1973 st->codec->codec_id == CODEC_ID_H264 ||
1974 st->codec->codec_id == CODEC_ID_H263 ||
1975 st->codec->codec_id == CODEC_ID_H261 ||
1976 st->codec->codec_id == CODEC_ID_VORBIS ||
1977 st->codec->codec_id == CODEC_ID_MJPEG ||
1978 st->codec->codec_id == CODEC_ID_PNG ||
1979 st->codec->codec_id == CODEC_ID_PAM ||
1980 st->codec->codec_id == CODEC_ID_PGM ||
1981 st->codec->codec_id == CODEC_ID_PGMYUV ||
1982 st->codec->codec_id == CODEC_ID_PBM ||
1983 st->codec->codec_id == CODEC_ID_PPM ||
1984 st->codec->codec_id == CODEC_ID_SHORTEN ||
1985 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1986 try_decode_frame(st, pkt->data, pkt->size);
1987
1988 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1989 break;
1990 }
1991 count++;
1992 }
1993
1994 // close codecs which were opened in try_decode_frame()
1995 for(i=0;i<ic->nb_streams;i++) {
1996 st = ic->streams[i];
1997 if(st->codec->codec)
1998 avcodec_close(st->codec);
1999 }
2000 for(i=0;i<ic->nb_streams;i++) {
2001 st = ic->streams[i];
2002 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2003 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2004 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2005
2006 if(duration_count[i]
2007 && tb_unreliable(st->codec) /*&&
2008 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2009 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2010 double best_error= 2*av_q2d(st->time_base);
2011 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2012
2013 for(j=1; j<MAX_STD_TIMEBASES; j++){
2014 double error= duration_error[i][j] * get_std_framerate(j);
2015 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2016 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2017 if(error < best_error){
2018 best_error= error;
2019 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2020 }
2021 }
2022 }
2023
2024 if (!st->r_frame_rate.num){
2025 if( st->codec->time_base.den * (int64_t)st->time_base.num
2026 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2027 st->r_frame_rate.num = st->codec->time_base.den;
2028 st->r_frame_rate.den = st->codec->time_base.num;
2029 }else{
2030 st->r_frame_rate.num = st->time_base.den;
2031 st->r_frame_rate.den = st->time_base.num;
2032 }
2033 }
2034 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2035 if (st->codec->codec_id == CODEC_ID_NONE && probe_data[st->index].buf_size > 0) {
2036 codec_identified[st->index] = set_codec_from_probe_data(st, &(probe_data[st->index]), 1);
2037 if (codec_identified[st->index]) {
2038 st->need_parsing = AVSTREAM_PARSE_FULL;
2039 }
2040 }
2041 if(!st->codec->bits_per_sample)
2042 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2043 }
2044 }
2045
2046 av_estimate_timings(ic, old_offset);
2047
2048 for(i=0;i<ic->nb_streams;i++) {
2049 st = ic->streams[i];
2050 if (codec_identified[st->index])
2051 break;
2052 }
2053 //FIXME this is a mess
2054 if(i!=ic->nb_streams){
2055 av_read_frame_flush(ic);
2056 for(i=0;i<ic->nb_streams;i++) {
2057 st = ic->streams[i];
2058 if (codec_identified[st->index]) {
2059 av_seek_frame(ic, st->index, 0.0, 0);
2060 }
2061 st->cur_dts= st->first_dts;
2062 }
2063 url_fseek(ic->pb, ic->data_offset, SEEK_SET);
2064 }
2065
2066 #if 0
2067 /* correct DTS for B-frame streams with no timestamps */
2068 for(i=0;i<ic->nb_streams;i++) {
2069 st = ic->streams[i];
2070 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2071 if(b-frames){
2072 ppktl = &ic->packet_buffer;
2073 while(ppkt1){
2074 if(ppkt1->stream_index != i)
2075 continue;
2076 if(ppkt1->pkt->dts < 0)
2077 break;
2078 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2079 break;
2080 ppkt1->pkt->dts -= delta;
2081 ppkt1= ppkt1->next;
2082 }
2083 if(ppkt1)
2084 continue;
2085 st->cur_dts -= delta;
2086 }
2087 }
2088 }
2089 #endif
2090
2091 av_free(duration_error);
2092 for(i=0;i<MAX_STREAMS;i++){
2093 av_freep(&(probe_data[i].buf));
2094 }
2095
2096 return ret;
2097 }
2098
2099 /*******************************************************/
2100
2101 int av_read_play(AVFormatContext *s)
2102 {
2103 if (s->iformat->read_play)
2104 return s->iformat->read_play(s);
2105 if (s->pb)
2106 return av_url_read_fpause(s->pb, 0);
2107 return AVERROR(ENOSYS);
2108 }
2109
2110 int av_read_pause(AVFormatContext *s)
2111 {
2112 if (s->iformat->read_pause)
2113 return s->iformat->read_pause(s);
2114 if (s->pb)
2115 return av_url_read_fpause(s->pb, 1);
2116 return AVERROR(ENOSYS);
2117 }
2118
2119 void av_close_input_stream(AVFormatContext *s)
2120 {
2121 int i;
2122 AVStream *st;
2123
2124 /* free previous packet */
2125 if (s->cur_st && s->cur_st->parser)
2126 av_free_packet(&s->cur_pkt);
2127
2128 if (s->iformat->read_close)
2129 s->iformat->read_close(s);
2130 for(i=0;i<s->nb_streams;i++) {
2131 /* free all data in a stream component */
2132 st = s->streams[i];
2133 if (st->parser) {
2134 av_parser_close(st->parser);
2135 }
2136 av_free(st->index_entries);
2137 av_free(st->codec->extradata);
2138 av_free(st->codec);
2139 av_free(st->filename);
2140 av_free(st);
2141 }
2142 for(i=s->nb_programs-1; i>=0; i--) {
2143 av_freep(&s->programs[i]->provider_name);
2144 av_freep(&s->programs[i]->name);
2145 av_freep(&s->programs[i]->stream_index);
2146 av_freep(&s->programs[i]);
2147 }
2148 av_freep(&s->programs);
2149 flush_packet_queue(s);
2150 av_freep(&s->priv_data);
2151 while(s->num_chapters--) {
2152 av_free(s->chapters[s->num_chapters]->title);
2153 av_free(s->chapters[s->num_chapters]);
2154 }
2155 av_freep(&s->chapters);
2156 av_free(s);
2157 }
2158
2159 void av_close_input_file(AVFormatContext *s)
2160 {
2161 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2162 av_close_input_stream(s);
2163 if (pb)
2164 url_fclose(pb);
2165 }
2166
2167 AVStream *av_new_stream(AVFormatContext *s, int id)
2168 {
2169 AVStream *st;
2170 int i;
2171
2172 if (s->nb_streams >= MAX_STREAMS)
2173 return NULL;
2174
2175 st = av_mallocz(sizeof(AVStream));
2176 if (!st)
2177 return NULL;
2178
2179 st->codec= avcodec_alloc_context();
2180 if (s->iformat) {
2181 /* no default bitrate if decoding */
2182 st->codec->bit_rate = 0;
2183 }
2184 st->index = s->nb_streams;
2185 st->id = id;
2186 st->start_time = AV_NOPTS_VALUE;
2187 st->duration = AV_NOPTS_VALUE;
2188 st->cur_dts = AV_NOPTS_VALUE;
2189 st->first_dts = AV_NOPTS_VALUE;
2190
2191 /* default pts setting is MPEG-like */
2192 av_set_pts_info(st, 33, 1, 90000);
2193 st->last_IP_pts = AV_NOPTS_VALUE;
2194 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2195 st->pts_buffer[i]= AV_NOPTS_VALUE;
2196
2197 s->streams[s->nb_streams++] = st;
2198 return st;
2199 }
2200
2201 AVProgram *av_new_program(AVFormatContext *ac, int id)
2202 {
2203 AVProgram *program=NULL;
2204 int i;
2205
2206 #ifdef DEBUG_SI
2207 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2208 #endif
2209
2210 for(i=0; i<ac->nb_programs; i++)
2211 if(ac->programs[i]->id == id)
2212 program = ac->programs[i];
2213
2214 if(!program){
2215 program = av_mallocz(sizeof(AVProgram));
2216 if (!program)
2217 return NULL;
2218 dynarray_add(&ac->programs, &ac->nb_programs, program);
2219 program->discard = AVDISCARD_NONE;
2220 }
2221 program->id = id;
2222
2223 return program;
2224 }
2225
2226 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2227 {
2228 assert(!provider_name == !name);
2229 if(name){
2230 av_free(program->provider_name);
2231 av_free(program-> name);
2232 program->provider_name = av_strdup(provider_name);
2233 program-> name = av_strdup( name);
2234 }
2235 }
2236
2237 int ff_new_chapter(AVFormatContext *s, int id, int64_t start, int64_t end, const char *title)
2238 {
2239 AVChapter *chapter = NULL;
2240 int i;
2241
2242 for(i=0; i<s->num_chapters; i++)
2243 if(s->chapters[i]->id == id)
2244 chapter = s->chapters[i];
2245
2246 if(!chapter){
2247 chapter= av_mallocz(sizeof(AVChapter));
2248 if(!chapter)
2249 return AVERROR(ENOMEM);
2250 dynarray_add(&s->chapters, &s->num_chapters, chapter);
2251 }
2252 if(chapter->title)
2253 av_free(chapter->title);
2254 if (title)
2255 chapter->title = av_strdup(title);
2256 chapter->id = id;
2257 chapter->start = start;
2258 chapter->end = end;
2259
2260 return 0;
2261 }
2262
2263 /************************************************************/
2264 /* output media file */
2265
2266 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2267 {
2268 int ret;
2269
2270 if (s->oformat->priv_data_size > 0) {
2271 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2272 if (!s->priv_data)
2273 return AVERROR(ENOMEM);
2274 } else
2275 s->priv_data = NULL;
2276
2277 if (s->oformat->set_parameters) {
2278 ret = s->oformat->set_parameters(s, ap);
2279 if (ret < 0)
2280 return ret;
2281 }
2282 return 0;
2283 }
2284
2285 int av_write_header(AVFormatContext *s)
2286 {
2287 int ret, i;
2288 AVStream *st;
2289
2290 // some sanity checks
2291 for(i=0;i<s->nb_streams;i++) {
2292 st = s->streams[i];
2293
2294 switch (st->codec->codec_type) {
2295 case CODEC_TYPE_AUDIO:
2296 if(st->codec->sample_rate<=0){
2297 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2298 return -1;
2299 }
2300 break;
2301 case CODEC_TYPE_VIDEO:
2302 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2303 av_log(s, AV_LOG_ERROR, "time base not set\n");
2304 return -1;
2305 }
2306 if(st->codec->width<=0 || st->codec->height<=0){
2307 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2308 return -1;
2309 }
2310 break;
2311 }
2312
2313 if(s->oformat->codec_tag){
2314 if(st->codec->codec_tag){
2315 //FIXME
2316 //check that tag + id is in the table
2317 //if neither is in the table -> OK
2318 //if tag is in the table with another id -> FAIL
2319 //if id is in the table with another tag -> FAIL unless strict < ?
2320 }else
2321 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2322 }
2323 }
2324
2325 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2326 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2327 if (!s->priv_data)
2328 return AVERROR(ENOMEM);
2329 }
2330
2331 if(s->oformat->write_header){
2332 ret = s->oformat->write_header(s);
2333 if (ret < 0)
2334 return ret;
2335 }
2336
2337 /* init PTS generation */
2338 for(i=0;i<s->nb_streams;i++) {
2339 int64_t den = AV_NOPTS_VALUE;
2340 st = s->streams[i];
2341
2342 switch (st->codec->codec_type) {
2343 case CODEC_TYPE_AUDIO:
2344 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2345 break;
2346 case CODEC_TYPE_VIDEO:
2347 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2348 break;
2349 default:
2350 break;
2351 }
2352 if (den != AV_NOPTS_VALUE) {
2353 if (den <= 0)
2354 return AVERROR_INVALIDDATA;
2355 av_frac_init(&st->pts, 0, 0, den);
2356 }
2357 }
2358 return 0;
2359 }
2360
2361 //FIXME merge with compute_pkt_fields
2362 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2363 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2364 int num, den, frame_size, i;
2365
2366 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2367
2368 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2369 return -1;*/
2370
2371 /* duration field */
2372 if (pkt->duration == 0) {
2373 compute_frame_duration(&num, &den, st, NULL, pkt);
2374 if (den && num) {
2375 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2376 }
2377 }
2378
2379 //XXX/FIXME this is a temporary hack until all encoders output pts
2380 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2381 pkt->dts=
2382 // pkt->pts= st->cur_dts;
2383 pkt->pts= st->pts.val;
2384 }
2385
2386 //calculate dts from pts
2387 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2388 st->pts_buffer[0]= pkt->pts;
2389 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2390 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2391 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2392 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2393
2394 pkt->dts= st->pts_buffer[0];
2395 }
2396
2397 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2398 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2399 return -1;
2400 }
2401 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2402 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2403 return -1;
2404 }
2405
2406 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2407 st->cur_dts= pkt->dts;
2408 st->pts.val= pkt->dts;
2409
2410 /* update pts */
2411 switch (st->codec->codec_type) {
2412 case CODEC_TYPE_AUDIO:
2413 frame_size = get_audio_frame_size(st->codec, pkt->size);
2414
2415 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2416 likely equal to the encoder delay, but it would be better if we
2417 had the real timestamps from the encoder */
2418 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2419 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2420 }
2421 break;
2422 case CODEC_TYPE_VIDEO:
2423 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2424 break;
2425 default:
2426 break;
2427 }
2428 return 0;
2429 }
2430
2431 static void truncate_ts(AVStream *st, AVPacket *pkt){
2432 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2433
2434 // if(pkt->dts < 0)
2435 // pkt->dts= 0; //this happens for low_delay=0 and B-frames, FIXME, needs further investigation about what we should do here
2436
2437 if (pkt->pts != AV_NOPTS_VALUE)
2438 pkt->pts &= pts_mask;
2439 if (pkt->dts != AV_NOPTS_VALUE)
2440 pkt->dts &= pts_mask;
2441 }
2442
2443 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2444 {
2445 int ret;
2446
2447 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2448 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2449 return ret;
2450
2451 truncate_ts(s->streams[pkt->stream_index], pkt);
2452
2453 ret= s->oformat->write_packet(s, pkt);
2454 if(!ret)
2455 ret= url_ferror(s->pb);
2456 return ret;
2457 }
2458
2459 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2460 AVPacketList *pktl, **next_point, *this_pktl;
2461 int stream_count=0;
2462 int streams[MAX_STREAMS];
2463
2464 if(pkt){
2465 AVStream *st= s->streams[ pkt->stream_index];
2466
2467 // assert(pkt->destruct != av_destruct_packet); //FIXME
2468
2469 this_pktl = av_mallocz(sizeof(AVPacketList));
2470 this_pktl->pkt= *pkt;
2471 if(pkt->destruct == av_destruct_packet)
2472 pkt->destruct= NULL; // not shared -> must keep original from being freed
2473 else
2474 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2475
2476 next_point = &s->packet_buffer;
2477 while(*next_point){
2478 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2479 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2480 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2481 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2482 break;
2483 next_point= &(*next_point)->next;
2484 }
2485 this_pktl->next= *next_point;
2486 *next_point= this_pktl;
2487 }
2488
2489 memset(streams, 0, sizeof(streams));
2490 pktl= s->packet_buffer;
2491 while(pktl){
2492 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2493 if(streams[ pktl->pkt.stream_index ] == 0)
2494 stream_count++;
2495 streams[ pktl->pkt.stream_index ]++;
2496 pktl= pktl->next;
2497 }
2498
2499 if(s->nb_streams == stream_count || (flush && stream_count)){
2500 pktl= s->packet_buffer;
2501 *out= pktl->pkt;
2502
2503 s->packet_buffer= pktl->next;
2504 av_freep(&pktl);
2505 return 1;
2506 }else{
2507 av_init_packet(out);
2508 return 0;
2509 }
2510 }
2511
2512 /**
2513 * Interleaves an AVPacket correctly so it can be muxed.
2514 * @param out the interleaved packet will be output here
2515 * @param in the input packet
2516 * @param flush 1 if no further packets are available as input and all
2517 * remaining packets should be output
2518 * @return 1 if a packet was output, 0 if no packet could be output,
2519 * < 0 if an error occurred
2520 */
2521 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2522 if(s->oformat->interleave_packet)
2523 return s->oformat->interleave_packet(s, out, in, flush);
2524 else
2525 return av_interleave_packet_per_dts(s, out, in, flush);
2526 }
2527
2528 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2529 AVStream *st= s->streams[ pkt->stream_index];
2530
2531 //FIXME/XXX/HACK drop zero sized packets
2532 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2533 return 0;
2534
2535 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2536 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2537 return -1;
2538
2539 if(pkt->dts == AV_NOPTS_VALUE)
2540 return -1;
2541
2542 for(;;){
2543 AVPacket opkt;
2544 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2545 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2546 return ret;
2547
2548 truncate_ts(s->streams[opkt.stream_index], &opkt);
2549 ret= s->oformat->write_packet(s, &opkt);
2550
2551 av_free_packet(&opkt);
2552 pkt= NULL;
2553
2554 if(ret<0)
2555 return ret;
2556 if(url_ferror(s->pb))
2557 return url_ferror(s->pb);
2558 }
2559 }
2560
2561 int av_write_trailer(AVFormatContext *s)
2562 {
2563 int ret, i;
2564
2565 for(;;){
2566 AVPacket pkt;
2567 ret= av_interleave_packet(s, &pkt, NULL, 1);
2568 if(ret<0) //FIXME cleanup needed for ret<0 ?
2569 goto fail;
2570 if(!ret)
2571 break;
2572
2573 truncate_ts(s->streams[pkt.stream_index], &pkt);
2574 ret= s->oformat->write_packet(s, &pkt);
2575
2576 av_free_packet(&pkt);
2577
2578 if(ret<0)
2579 goto fail;
2580 if(url_ferror(s->pb))
2581 goto fail;
2582 }
2583
2584 if(s->oformat->write_trailer)
2585 ret = s->oformat->write_trailer(s);
2586 fail:
2587 if(ret == 0)
2588 ret=url_ferror(s->pb);
2589 for(i=0;i<s->nb_streams;i++)
2590 av_freep(&s->streams[i]->priv_data);
2591 av_freep(&s->priv_data);
2592 return ret;
2593 }
2594
2595 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2596 {
2597 int i, j;
2598 AVProgram *program=NULL;
2599 void *tmp;
2600
2601 for(i=0; i<ac->nb_programs; i++){
2602 if(ac->programs[i]->id != progid)
2603 continue;
2604 program = ac->programs[i];
2605 for(j=0; j<program->nb_stream_indexes; j++)
2606 if(program->stream_index[j] == idx)
2607 return;
2608
2609 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2610 if(!tmp)
2611 return;
2612 program->stream_index = tmp;
2613 program->stream_index[program->nb_stream_indexes++] = idx;
2614 return;
2615 }
2616 }
2617
2618 /* "user interface" functions */
2619 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2620 {
2621 char buf[256];
2622 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2623 AVStream *st = ic->streams[i];
2624 int g = ff_gcd(st->time_base.num, st->time_base.den);
2625 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2626 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2627 /* the pid is an important information, so we display it */
2628 /* XXX: add a generic system */
2629 if (flags & AVFMT_SHOW_IDS)
2630 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2631 if (strlen(st->language) > 0)
2632 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2633 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2634 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2635 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2636 if(st->r_frame_rate.den && st->r_frame_rate.num)
2637 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2638 /* else if(st->time_base.den && st->time_base.num)
2639 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2640 else
2641 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2642 }
2643 av_log(NULL, AV_LOG_INFO, "\n");
2644 }
2645
2646 void dump_format(AVFormatContext *ic,
2647 int index,
2648 const char *url,
2649 int is_output)
2650 {
2651 int i;
2652
2653 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2654 is_output ? "Output" : "Input",
2655 index,
2656 is_output ? ic->oformat->name : ic->iformat->name,
2657 is_output ? "to" : "from", url);
2658 if (!is_output) {
2659 av_log(NULL, AV_LOG_INFO, " Duration: ");
2660 if (ic->duration != AV_NOPTS_VALUE) {
2661 int hours, mins, secs, us;
2662 secs = ic->duration / AV_TIME_BASE;
2663 us = ic->duration % AV_TIME_BASE;
2664 mins = secs / 60;
2665 secs %= 60;
2666 hours = mins / 60;
2667 mins %= 60;
2668 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2669 (100 * us) / AV_TIME_BASE);
2670 } else {
2671 av_log(NULL, AV_LOG_INFO, "N/A");
2672 }
2673 if (ic->start_time != AV_NOPTS_VALUE) {
2674 int secs, us;
2675 av_log(NULL, AV_LOG_INFO, ", start: ");
2676 secs = ic->start_time / AV_TIME_BASE;
2677 us = ic->start_time % AV_TIME_BASE;
2678 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2679 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2680 }
2681 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2682 if (ic->bit_rate) {
2683 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2684 } else {
2685 av_log(NULL, AV_LOG_INFO, "N/A");
2686 }
2687 av_log(NULL, AV_LOG_INFO, "\n");
2688 }
2689 if(ic->nb_programs) {
2690 int j, k;
2691 for(j=0; j<ic->nb_programs; j++) {
2692 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2693 ic->programs[j]->name ? ic->programs[j]->name : "");
2694 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2695 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2696 }
2697 } else
2698 for(i=0;i<ic->nb_streams;i++)
2699 dump_stream_format(ic, i, index, is_output);
2700 }
2701
2702 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2703 {
2704 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2705 }
2706
2707 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2708 {
2709 AVRational frame_rate;
2710 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2711 *frame_rate_num= frame_rate.num;
2712 *frame_rate_den= frame_rate.den;
2713 return ret;
2714 }
2715
2716 /**
2717 * Gets the current time in microseconds.
2718 */
2719 int64_t av_gettime(void)
2720 {
2721 struct timeval tv;
2722 gettimeofday(&tv,NULL);
2723 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2724 }
2725
2726 int64_t parse_date(const char *datestr, int duration)
2727 {
2728 const char *p;
2729 int64_t t;
2730 struct tm dt;
2731 int i;
2732 static const char *date_fmt[] = {
2733 "%Y-%m-%d",
2734 "%Y%m%d",
2735 };
2736 static const char *time_fmt[] = {
2737 "%H:%M:%S",
2738 "%H%M%S",
2739 };
2740 const char *q;
2741 int is_utc, len;
2742 char lastch;
2743 int negative = 0;
2744
2745 #undef time
2746 time_t now = time(0);
2747
2748 len = strlen(datestr);
2749 if (len > 0)
2750 lastch = datestr[len - 1];
2751 else
2752 lastch = '\0';
2753 is_utc = (lastch == 'z' || lastch == 'Z');
2754
2755 memset(&dt, 0, sizeof(dt));
2756
2757 p = datestr;
2758 q = NULL;
2759 if (!duration) {
2760 /* parse the year-month-day part */
2761 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2762 q = small_strptime(p, date_fmt[i], &dt);
2763 if (q) {
2764 break;
2765 }
2766 }
2767
2768 /* if the year-month-day part is missing, then take the
2769 * current year-month-day time */
2770 if (!q) {
2771 if (is_utc) {
2772 dt = *gmtime(&now);
2773 } else {
2774 dt = *localtime(&now);
2775 }
2776 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2777 } else {
2778 p = q;
2779 }
2780
2781 if (*p == 'T' || *p == 't' || *p == ' ')
2782 p++;
2783
2784 /* parse the hour-minute-second part */
2785 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2786 q = small_strptime(p, time_fmt[i], &dt);
2787 if (q) {
2788 break;
2789 }
2790 }
2791 } else {
2792 /* parse datestr as a duration */
2793 if (p[0] == '-') {
2794 negative = 1;
2795 ++p;
2796 }
2797 /* parse datestr as HH:MM:SS */
2798 q = small_strptime(p, time_fmt[0], &dt);
2799 if (!q) {
2800 /* parse datestr as S+ */
2801 dt.tm_sec = strtol(p, (char **)&q, 10);
2802 if (q == p)
2803 /* the parsing didn't succeed */
2804 return INT64_MIN;
2805 dt.tm_min = 0;
2806 dt.tm_hour = 0;
2807 }
2808 }
2809
2810 /* Now we have all the fields that we can get */
2811 if (!q) {
2812 return INT64_MIN;
2813 }
2814
2815 if (duration) {
2816 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2817 } else {
2818 dt.tm_isdst = -1; /* unknown */
2819 if (is_utc) {
2820 t = mktimegm(&dt);
2821 } else {
2822 t = mktime(&dt);
2823 }
2824 }
2825
2826 t *= 1000000;
2827
2828 /* parse the .m... part */
2829 if (*q == '.') {
2830 int val, n;
2831 q++;
2832 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2833 if (!isdigit(*q))
2834 break;
2835 val += n * (*q - '0');
2836 }
2837 t += val;
2838 }
2839 return negative ? -t : t;
2840 }
2841
2842 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2843 {
2844 const char *p;
2845 char tag[128], *q;
2846
2847 p = info;
2848 if (*p == '?')
2849 p++;
2850 for(;;) {
2851 q = tag;
2852 while (*p != '\0' && *p != '=' && *p != '&') {
2853 if ((q - tag) < sizeof(tag) - 1)
2854 *q++ = *p;
2855 p++;
2856 }
2857 *q = '\0';
2858 q = arg;
2859 if (*p == '=') {
2860 p++;
2861 while (*p != '&' && *p != '\0') {
2862 if ((q - arg) < arg_size - 1) {
2863 if (*p == '+')
2864 *q++ = ' ';
2865 else
2866 *q++ = *p;
2867 }
2868 p++;
2869 }
2870 *q = '\0';
2871 }
2872 if (!strcmp(tag, tag1))
2873 return 1;
2874 if (*p != '&')
2875 break;
2876 p++;
2877 }
2878 return 0;
2879 }
2880
2881 int av_get_frame_filename(char *buf, int buf_size,
2882 const char *path, int number)
2883 {
2884 const char *p;
2885 char *q, buf1[20], c;
2886 int nd, len, percentd_found;
2887
2888 q = buf;
2889 p = path;
2890 percentd_found = 0;
2891 for(;;) {
2892 c = *p++;
2893 if (c == '\0')
2894 break;
2895 if (c == '%') {
2896 do {
2897 nd = 0;
2898 while (isdigit(*p)) {
2899 nd = nd * 10 + *p++ - '0';
2900 }
2901 c = *p++;
2902 } while (isdigit(c));
2903
2904 switch(c) {
2905 case '%':
2906 goto addchar;
2907 case 'd':
2908 if (percentd_found)
2909 goto fail;
2910 percentd_found = 1;
2911 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2912 len = strlen(buf1);
2913 if ((q - buf + len) > buf_size - 1)
2914 goto fail;
2915 memcpy(q, buf1, len);
2916 q += len;
2917 break;
2918 default:
2919 goto fail;
2920 }
2921 } else {
2922 addchar:
2923 if ((q - buf) < buf_size - 1)
2924 *q++ = c;
2925 }
2926 }
2927 if (!percentd_found)
2928 goto fail;
2929 *q = '\0';
2930 return 0;
2931 fail:
2932 *q = '\0';
2933 return -1;
2934 }
2935
2936 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
2937 {
2938 int len, i, j, c;
2939 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2940
2941 for(i=0;i<size;i+=16) {
2942 len = size - i;
2943 if (len > 16)
2944 len = 16;
2945 PRINT("%08x ", i);
2946 for(j=0;j<16;j++) {
2947 if (j < len)
2948 PRINT(" %02x", buf[i+j]);
2949 else
2950 PRINT(" ");
2951 }
2952 PRINT(" ");
2953 for(j=0;j<len;j++) {
2954 c = buf[i+j];
2955 if (c < ' ' || c > '~')
2956 c = '.';
2957 PRINT("%c", c);
2958 }
2959 PRINT("\n");
2960 }
2961 #undef PRINT
2962 }
2963
2964 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2965 {
2966 hex_dump_internal(NULL, f, 0, buf, size);
2967 }
2968
2969 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
2970 {
2971 hex_dump_internal(avcl, NULL, level, buf, size);
2972 }
2973
2974 //FIXME needs to know the time_base
2975 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
2976 {
2977 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2978 PRINT("stream #%d:\n", pkt->stream_index);
2979 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2980 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2981 /* DTS is _always_ valid after av_read_frame() */
2982 PRINT(" dts=");
2983 if (pkt->dts == AV_NOPTS_VALUE)
2984 PRINT("N/A");
2985 else
2986 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
2987 /* PTS may not be known if B-frames are present. */
2988 PRINT(" pts=");
2989 if (pkt->pts == AV_NOPTS_VALUE)
2990 PRINT("N/A");
2991 else
2992 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
2993 PRINT("\n");
2994 PRINT(" size=%d\n", pkt->size);
2995 #undef PRINT
2996 if (dump_payload)
2997 av_hex_dump(f, pkt->data, pkt->size);
2998 }
2999
3000 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3001 {
3002 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3003 }
3004
3005 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3006 {
3007 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3008 }
3009
3010 void url_split(char *proto, int proto_size,
3011 char *authorization, int authorization_size,
3012 char *hostname, int hostname_size,
3013 int *port_ptr,
3014 char *path, int path_size,
3015 const char *url)
3016 {
3017 const char *p, *ls, *at, *col, *brk;
3018
3019 if (port_ptr) *port_ptr = -1;
3020 if (proto_size > 0) proto[0] = 0;
3021 if (authorization_size > 0) authorization[0] = 0;
3022 if (hostname_size > 0) hostname[0] = 0;
3023 if (path_size > 0) path[0] = 0;
3024
3025 /* parse protocol */
3026 if ((p = strchr(url, ':'))) {
3027 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3028 p++; /* skip ':' */
3029 if (*p == '/') p++;
3030 if (*p == '/') p++;
3031 } else {
3032 /* no protocol means plain filename */
3033 av_strlcpy(path, url, path_size);
3034 return;
3035 }
3036
3037 /* separate path from hostname */
3038 ls = strchr(p, '/');
3039 if(!ls)
3040 ls = strchr(p, '?');
3041 if(ls)
3042 av_strlcpy(path, ls, path_size);
3043 else
3044 ls = &p[strlen(p)]; // XXX
3045
3046 /* the rest is hostname, use that to parse auth/port */
3047 if (ls != p) {
3048 /* authorization (user[:pass]@hostname) */
3049 if ((at = strchr(p, '@')) && at < ls) {
3050 av_strlcpy(authorization, p,
3051 FFMIN(authorization_size, at + 1 - p));
3052 p = at + 1; /* skip '@' */
3053 }
3054
3055 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3056 /* [host]:port */
3057 av_strlcpy(hostname, p + 1,
3058 FFMIN(hostname_size, brk - p));
3059 if (brk[1] == ':' && port_ptr)
3060 *port_ptr = atoi(brk + 2);
3061 } else if ((col = strchr(p, ':')) && col < ls) {
3062 av_strlcpy(hostname, p,
3063 FFMIN(col + 1 - p, hostname_size));
3064 if (port_ptr) *port_ptr = atoi(col + 1);
3065 } else
3066 av_strlcpy(hostname, p,
3067 FFMIN(ls + 1 - p, hostname_size));
3068 }
3069 }
3070
3071 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3072 int pts_num, int pts_den)
3073 {
3074 s->pts_wrap_bits = pts_wrap_bits;
3075 s->time_base.num = pts_num;
3076 s->time_base.den = pts_den;
3077 }
3078
3079 /* fraction handling */
3080
3081 /**
3082 * f = val + (num / den) + 0.5.
3083 *
3084 * 'num' is normalized so that it is such as 0 <= num < den.
3085 *
3086 * @param f fractional number
3087 * @param val integer value
3088 * @param num must be >= 0
3089 * @param den must be >= 1
3090 */
3091 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3092 {
3093 num += (den >> 1);
3094 if (num >= den) {
3095 val += num / den;
3096 num = num % den;
3097 }
3098 f->val = val;
3099 f->num = num;
3100 f->den = den;
3101 }
3102
3103 /**
3104 * Fractional addition to f: f = f + (incr / f->den).
3105 *
3106 * @param f fractional number
3107 * @param incr increment, can be positive or negative
3108 */
3109 static void av_frac_add(AVFrac *f, int64_t incr)
3110 {
3111 int64_t num, den;
3112
3113 num = f->num + incr;
3114 den = f->den;
3115 if (num < 0) {
3116 f->val += num / den;
3117 num = num % den;
3118 if (num < 0) {
3119 num += den;
3120 f->val--;
3121 }
3122 } else if (num >= den) {
3123 f->val += num / den;
3124 num = num % den;
3125 }
3126 f->num = num;
3127 }