Maintain pointer to end of AVFormatContext.packet_buffer list
[libav.git] / libavformat / utils.c
1 /*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21 #include "avformat.h"
22 #include "libavcodec/opt.h"
23 #include "libavutil/avstring.h"
24 #include "riff.h"
25 #include <sys/time.h>
26 #include <time.h>
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 /**
32 * @file libavformat/utils.c
33 * various utility functions for use within FFmpeg
34 */
35
36 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
37 static void av_frac_add(AVFrac *f, int64_t incr);
38
39 /** head of registered input format linked list */
40 AVInputFormat *first_iformat = NULL;
41 /** head of registered output format linked list */
42 AVOutputFormat *first_oformat = NULL;
43
44 AVInputFormat *av_iformat_next(AVInputFormat *f)
45 {
46 if(f) return f->next;
47 else return first_iformat;
48 }
49
50 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
51 {
52 if(f) return f->next;
53 else return first_oformat;
54 }
55
56 void av_register_input_format(AVInputFormat *format)
57 {
58 AVInputFormat **p;
59 p = &first_iformat;
60 while (*p != NULL) p = &(*p)->next;
61 *p = format;
62 format->next = NULL;
63 }
64
65 void av_register_output_format(AVOutputFormat *format)
66 {
67 AVOutputFormat **p;
68 p = &first_oformat;
69 while (*p != NULL) p = &(*p)->next;
70 *p = format;
71 format->next = NULL;
72 }
73
74 int match_ext(const char *filename, const char *extensions)
75 {
76 const char *ext, *p;
77 char ext1[32], *q;
78
79 if(!filename)
80 return 0;
81
82 ext = strrchr(filename, '.');
83 if (ext) {
84 ext++;
85 p = extensions;
86 for(;;) {
87 q = ext1;
88 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
89 *q++ = *p++;
90 *q = '\0';
91 if (!strcasecmp(ext1, ext))
92 return 1;
93 if (*p == '\0')
94 break;
95 p++;
96 }
97 }
98 return 0;
99 }
100
101 AVOutputFormat *guess_format(const char *short_name, const char *filename,
102 const char *mime_type)
103 {
104 AVOutputFormat *fmt, *fmt_found;
105 int score_max, score;
106
107 /* specific test for image sequences */
108 #ifdef CONFIG_IMAGE2_MUXER
109 if (!short_name && filename &&
110 av_filename_number_test(filename) &&
111 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
112 return guess_format("image2", NULL, NULL);
113 }
114 #endif
115 /* Find the proper file type. */
116 fmt_found = NULL;
117 score_max = 0;
118 fmt = first_oformat;
119 while (fmt != NULL) {
120 score = 0;
121 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
122 score += 100;
123 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
124 score += 10;
125 if (filename && fmt->extensions &&
126 match_ext(filename, fmt->extensions)) {
127 score += 5;
128 }
129 if (score > score_max) {
130 score_max = score;
131 fmt_found = fmt;
132 }
133 fmt = fmt->next;
134 }
135 return fmt_found;
136 }
137
138 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
139 const char *mime_type)
140 {
141 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
142
143 if (fmt) {
144 AVOutputFormat *stream_fmt;
145 char stream_format_name[64];
146
147 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
148 stream_fmt = guess_format(stream_format_name, NULL, NULL);
149
150 if (stream_fmt)
151 fmt = stream_fmt;
152 }
153
154 return fmt;
155 }
156
157 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
158 const char *filename, const char *mime_type, enum CodecType type){
159 if(type == CODEC_TYPE_VIDEO){
160 enum CodecID codec_id= CODEC_ID_NONE;
161
162 #ifdef CONFIG_IMAGE2_MUXER
163 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
164 codec_id= av_guess_image2_codec(filename);
165 }
166 #endif
167 if(codec_id == CODEC_ID_NONE)
168 codec_id= fmt->video_codec;
169 return codec_id;
170 }else if(type == CODEC_TYPE_AUDIO)
171 return fmt->audio_codec;
172 else
173 return CODEC_ID_NONE;
174 }
175
176 AVInputFormat *av_find_input_format(const char *short_name)
177 {
178 AVInputFormat *fmt;
179 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
180 if (!strcmp(fmt->name, short_name))
181 return fmt;
182 }
183 return NULL;
184 }
185
186 /* memory handling */
187
188 void av_destruct_packet(AVPacket *pkt)
189 {
190 av_free(pkt->data);
191 pkt->data = NULL; pkt->size = 0;
192 }
193
194 void av_init_packet(AVPacket *pkt)
195 {
196 pkt->pts = AV_NOPTS_VALUE;
197 pkt->dts = AV_NOPTS_VALUE;
198 pkt->pos = -1;
199 pkt->duration = 0;
200 pkt->flags = 0;
201 pkt->stream_index = 0;
202 pkt->destruct= av_destruct_packet_nofree;
203 }
204
205 int av_new_packet(AVPacket *pkt, int size)
206 {
207 uint8_t *data;
208 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
209 return AVERROR(ENOMEM);
210 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
211 if (!data)
212 return AVERROR(ENOMEM);
213 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
214
215 av_init_packet(pkt);
216 pkt->data = data;
217 pkt->size = size;
218 pkt->destruct = av_destruct_packet;
219 return 0;
220 }
221
222 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
223 {
224 int ret= av_new_packet(pkt, size);
225
226 if(ret<0)
227 return ret;
228
229 pkt->pos= url_ftell(s);
230
231 ret= get_buffer(s, pkt->data, size);
232 if(ret<=0)
233 av_free_packet(pkt);
234 else
235 pkt->size= ret;
236
237 return ret;
238 }
239
240 int av_dup_packet(AVPacket *pkt)
241 {
242 if (pkt->destruct != av_destruct_packet) {
243 uint8_t *data;
244 /* We duplicate the packet and don't forget to add the padding again. */
245 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
246 return AVERROR(ENOMEM);
247 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
248 if (!data) {
249 return AVERROR(ENOMEM);
250 }
251 memcpy(data, pkt->data, pkt->size);
252 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
253 pkt->data = data;
254 pkt->destruct = av_destruct_packet;
255 }
256 return 0;
257 }
258
259 int av_filename_number_test(const char *filename)
260 {
261 char buf[1024];
262 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
263 }
264
265 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
266 {
267 AVInputFormat *fmt1, *fmt;
268 int score;
269
270 fmt = NULL;
271 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
272 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
273 continue;
274 score = 0;
275 if (fmt1->read_probe) {
276 score = fmt1->read_probe(pd);
277 } else if (fmt1->extensions) {
278 if (match_ext(pd->filename, fmt1->extensions)) {
279 score = 50;
280 }
281 }
282 if (score > *score_max) {
283 *score_max = score;
284 fmt = fmt1;
285 }else if (score == *score_max)
286 fmt = NULL;
287 }
288 return fmt;
289 }
290
291 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
292 int score=0;
293 return av_probe_input_format2(pd, is_opened, &score);
294 }
295
296 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
297 {
298 AVInputFormat *fmt;
299 fmt = av_probe_input_format2(pd, 1, &score);
300
301 if (fmt) {
302 if (!strcmp(fmt->name, "mp3"))
303 st->codec->codec_id = CODEC_ID_MP3;
304 else if (!strcmp(fmt->name, "ac3"))
305 st->codec->codec_id = CODEC_ID_AC3;
306 else if (!strcmp(fmt->name, "mpegvideo"))
307 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
308 else if (!strcmp(fmt->name, "h264"))
309 st->codec->codec_id = CODEC_ID_H264;
310 }
311 return !!fmt;
312 }
313
314 /************************************************************/
315 /* input media file */
316
317 /**
318 * Open a media file from an IO stream. 'fmt' must be specified.
319 */
320 static const char* format_to_name(void* ptr)
321 {
322 AVFormatContext* fc = (AVFormatContext*) ptr;
323 if(fc->iformat) return fc->iformat->name;
324 else if(fc->oformat) return fc->oformat->name;
325 else return "NULL";
326 }
327
328 #define OFFSET(x) offsetof(AVFormatContext,x)
329 #define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
330 //these names are too long to be readable
331 #define E AV_OPT_FLAG_ENCODING_PARAM
332 #define D AV_OPT_FLAG_DECODING_PARAM
333
334 static const AVOption options[]={
335 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
336 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
337 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
338 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
339 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
340 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
341 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
342 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
343 {"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
344 {"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
345 {"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
346 {"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
347 {"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
348 {"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
349 {NULL},
350 };
351
352 #undef E
353 #undef D
354 #undef DEFAULT
355
356 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
357
358 static void avformat_get_context_defaults(AVFormatContext *s)
359 {
360 memset(s, 0, sizeof(AVFormatContext));
361
362 s->av_class = &av_format_context_class;
363
364 av_opt_set_defaults(s);
365 }
366
367 AVFormatContext *av_alloc_format_context(void)
368 {
369 AVFormatContext *ic;
370 ic = av_malloc(sizeof(AVFormatContext));
371 if (!ic) return ic;
372 avformat_get_context_defaults(ic);
373 ic->av_class = &av_format_context_class;
374 return ic;
375 }
376
377 int av_open_input_stream(AVFormatContext **ic_ptr,
378 ByteIOContext *pb, const char *filename,
379 AVInputFormat *fmt, AVFormatParameters *ap)
380 {
381 int err;
382 AVFormatContext *ic;
383 AVFormatParameters default_ap;
384
385 if(!ap){
386 ap=&default_ap;
387 memset(ap, 0, sizeof(default_ap));
388 }
389
390 if(!ap->prealloced_context)
391 ic = av_alloc_format_context();
392 else
393 ic = *ic_ptr;
394 if (!ic) {
395 err = AVERROR(ENOMEM);
396 goto fail;
397 }
398 ic->iformat = fmt;
399 ic->pb = pb;
400 ic->duration = AV_NOPTS_VALUE;
401 ic->start_time = AV_NOPTS_VALUE;
402 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
403
404 /* allocate private data */
405 if (fmt->priv_data_size > 0) {
406 ic->priv_data = av_mallocz(fmt->priv_data_size);
407 if (!ic->priv_data) {
408 err = AVERROR(ENOMEM);
409 goto fail;
410 }
411 } else {
412 ic->priv_data = NULL;
413 }
414
415 if (ic->iformat->read_header) {
416 err = ic->iformat->read_header(ic, ap);
417 if (err < 0)
418 goto fail;
419 }
420
421 if (pb && !ic->data_offset)
422 ic->data_offset = url_ftell(ic->pb);
423
424 *ic_ptr = ic;
425 return 0;
426 fail:
427 if (ic) {
428 int i;
429 av_freep(&ic->priv_data);
430 for(i=0;i<ic->nb_streams;i++) {
431 AVStream *st = ic->streams[i];
432 if (st) {
433 av_free(st->priv_data);
434 av_free(st->codec->extradata);
435 }
436 av_free(st);
437 }
438 }
439 av_free(ic);
440 *ic_ptr = NULL;
441 return err;
442 }
443
444 /** size of probe buffer, for guessing file type from file contents */
445 #define PROBE_BUF_MIN 2048
446 #define PROBE_BUF_MAX (1<<20)
447
448 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
449 AVInputFormat *fmt,
450 int buf_size,
451 AVFormatParameters *ap)
452 {
453 int err, probe_size;
454 AVProbeData probe_data, *pd = &probe_data;
455 ByteIOContext *pb = NULL;
456
457 pd->filename = "";
458 if (filename)
459 pd->filename = filename;
460 pd->buf = NULL;
461 pd->buf_size = 0;
462
463 if (!fmt) {
464 /* guess format if no file can be opened */
465 fmt = av_probe_input_format(pd, 0);
466 }
467
468 /* Do not open file if the format does not need it. XXX: specific
469 hack needed to handle RTSP/TCP */
470 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
471 /* if no file needed do not try to open one */
472 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
473 goto fail;
474 }
475 if (buf_size > 0) {
476 url_setbufsize(pb, buf_size);
477 }
478
479 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
480 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
481 /* read probe data */
482 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
483 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
484 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
485 if (url_fseek(pb, 0, SEEK_SET) < 0) {
486 url_fclose(pb);
487 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
488 pb = NULL;
489 err = AVERROR(EIO);
490 goto fail;
491 }
492 }
493 /* guess file format */
494 fmt = av_probe_input_format2(pd, 1, &score);
495 }
496 av_freep(&pd->buf);
497 }
498
499 /* if still no format found, error */
500 if (!fmt) {
501 err = AVERROR_NOFMT;
502 goto fail;
503 }
504
505 /* check filename in case an image number is expected */
506 if (fmt->flags & AVFMT_NEEDNUMBER) {
507 if (!av_filename_number_test(filename)) {
508 err = AVERROR_NUMEXPECTED;
509 goto fail;
510 }
511 }
512 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
513 if (err)
514 goto fail;
515 return 0;
516 fail:
517 av_freep(&pd->buf);
518 if (pb)
519 url_fclose(pb);
520 *ic_ptr = NULL;
521 return err;
522
523 }
524
525 /*******************************************************/
526
527 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
528 AVPacketList **plast_pktl){
529 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
530 if (!pktl)
531 return NULL;
532
533 if (*packet_buffer)
534 (*plast_pktl)->next = pktl;
535 else
536 *packet_buffer = pktl;
537
538 /* add the packet in the buffered packet list */
539 *plast_pktl = pktl;
540 pktl->pkt= *pkt;
541 return &pktl->pkt;
542 }
543
544 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
545 {
546 int ret;
547 AVStream *st;
548
549 for(;;){
550 AVPacketList *pktl = s->raw_packet_buffer;
551
552 if (pktl) {
553 *pkt = pktl->pkt;
554 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
555 s->raw_packet_buffer = pktl->next;
556 av_free(pktl);
557 return 0;
558 }
559 }
560
561 av_init_packet(pkt);
562 ret= s->iformat->read_packet(s, pkt);
563 if (ret < 0)
564 return ret;
565 st= s->streams[pkt->stream_index];
566
567 switch(st->codec->codec_type){
568 case CODEC_TYPE_VIDEO:
569 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
570 break;
571 case CODEC_TYPE_AUDIO:
572 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
573 break;
574 case CODEC_TYPE_SUBTITLE:
575 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
576 break;
577 }
578
579 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
580 return ret;
581
582 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
583
584 if(st->codec->codec_id == CODEC_ID_PROBE){
585 AVProbeData *pd = &st->probe_data;
586
587 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
588 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
589 pd->buf_size += pkt->size;
590 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
591
592 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
593 set_codec_from_probe_data(st, pd, 1);
594 if(st->codec->codec_id != CODEC_ID_PROBE){
595 pd->buf_size=0;
596 av_freep(&pd->buf);
597 }
598 }
599 }
600 }
601 }
602
603 /**********************************************************/
604
605 /**
606 * Get the number of samples of an audio frame. Return -1 on error.
607 */
608 static int get_audio_frame_size(AVCodecContext *enc, int size)
609 {
610 int frame_size;
611
612 if(enc->codec_id == CODEC_ID_VORBIS)
613 return -1;
614
615 if (enc->frame_size <= 1) {
616 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
617
618 if (bits_per_sample) {
619 if (enc->channels == 0)
620 return -1;
621 frame_size = (size << 3) / (bits_per_sample * enc->channels);
622 } else {
623 /* used for example by ADPCM codecs */
624 if (enc->bit_rate == 0)
625 return -1;
626 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
627 }
628 } else {
629 frame_size = enc->frame_size;
630 }
631 return frame_size;
632 }
633
634
635 /**
636 * Return the frame duration in seconds. Return 0 if not available.
637 */
638 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
639 AVCodecParserContext *pc, AVPacket *pkt)
640 {
641 int frame_size;
642
643 *pnum = 0;
644 *pden = 0;
645 switch(st->codec->codec_type) {
646 case CODEC_TYPE_VIDEO:
647 if(st->time_base.num*1000LL > st->time_base.den){
648 *pnum = st->time_base.num;
649 *pden = st->time_base.den;
650 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
651 *pnum = st->codec->time_base.num;
652 *pden = st->codec->time_base.den;
653 if (pc && pc->repeat_pict) {
654 *pden *= 2;
655 *pnum = (*pnum) * (2 + pc->repeat_pict);
656 }
657 }
658 break;
659 case CODEC_TYPE_AUDIO:
660 frame_size = get_audio_frame_size(st->codec, pkt->size);
661 if (frame_size < 0)
662 break;
663 *pnum = frame_size;
664 *pden = st->codec->sample_rate;
665 break;
666 default:
667 break;
668 }
669 }
670
671 static int is_intra_only(AVCodecContext *enc){
672 if(enc->codec_type == CODEC_TYPE_AUDIO){
673 return 1;
674 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
675 switch(enc->codec_id){
676 case CODEC_ID_MJPEG:
677 case CODEC_ID_MJPEGB:
678 case CODEC_ID_LJPEG:
679 case CODEC_ID_RAWVIDEO:
680 case CODEC_ID_DVVIDEO:
681 case CODEC_ID_HUFFYUV:
682 case CODEC_ID_FFVHUFF:
683 case CODEC_ID_ASV1:
684 case CODEC_ID_ASV2:
685 case CODEC_ID_VCR1:
686 return 1;
687 default: break;
688 }
689 }
690 return 0;
691 }
692
693 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
694 int64_t dts, int64_t pts)
695 {
696 AVStream *st= s->streams[stream_index];
697 AVPacketList *pktl= s->packet_buffer;
698
699 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
700 return;
701
702 st->first_dts= dts - st->cur_dts;
703 st->cur_dts= dts;
704
705 for(; pktl; pktl= pktl->next){
706 if(pktl->pkt.stream_index != stream_index)
707 continue;
708 //FIXME think more about this check
709 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
710 pktl->pkt.pts += st->first_dts;
711
712 if(pktl->pkt.dts != AV_NOPTS_VALUE)
713 pktl->pkt.dts += st->first_dts;
714
715 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
716 st->start_time= pktl->pkt.pts;
717 }
718 if (st->start_time == AV_NOPTS_VALUE)
719 st->start_time = pts;
720 }
721
722 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
723 {
724 AVPacketList *pktl= s->packet_buffer;
725 int64_t cur_dts= 0;
726
727 if(st->first_dts != AV_NOPTS_VALUE){
728 cur_dts= st->first_dts;
729 for(; pktl; pktl= pktl->next){
730 if(pktl->pkt.stream_index == pkt->stream_index){
731 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
732 break;
733 cur_dts -= pkt->duration;
734 }
735 }
736 pktl= s->packet_buffer;
737 st->first_dts = cur_dts;
738 }else if(st->cur_dts)
739 return;
740
741 for(; pktl; pktl= pktl->next){
742 if(pktl->pkt.stream_index != pkt->stream_index)
743 continue;
744 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
745 && !pktl->pkt.duration){
746 pktl->pkt.dts= cur_dts;
747 if(!st->codec->has_b_frames)
748 pktl->pkt.pts= cur_dts;
749 cur_dts += pkt->duration;
750 pktl->pkt.duration= pkt->duration;
751 }else
752 break;
753 }
754 if(st->first_dts == AV_NOPTS_VALUE)
755 st->cur_dts= cur_dts;
756 }
757
758 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
759 AVCodecParserContext *pc, AVPacket *pkt)
760 {
761 int num, den, presentation_delayed, delay, i;
762 int64_t offset;
763
764 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
765 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
766 pkt->dts -= 1LL<<st->pts_wrap_bits;
767 }
768
769 if (pkt->duration == 0) {
770 compute_frame_duration(&num, &den, st, pc, pkt);
771 if (den && num) {
772 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
773
774 if(pkt->duration != 0 && s->packet_buffer)
775 update_initial_durations(s, st, pkt);
776 }
777 }
778
779 /* correct timestamps with byte offset if demuxers only have timestamps
780 on packet boundaries */
781 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
782 /* this will estimate bitrate based on this frame's duration and size */
783 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
784 if(pkt->pts != AV_NOPTS_VALUE)
785 pkt->pts += offset;
786 if(pkt->dts != AV_NOPTS_VALUE)
787 pkt->dts += offset;
788 }
789
790 /* do we have a video B-frame ? */
791 delay= st->codec->has_b_frames;
792 presentation_delayed = 0;
793 /* XXX: need has_b_frame, but cannot get it if the codec is
794 not initialized */
795 if (delay &&
796 pc && pc->pict_type != FF_B_TYPE)
797 presentation_delayed = 1;
798 /* This may be redundant, but it should not hurt. */
799 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
800 presentation_delayed = 1;
801
802 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
803 /* interpolate PTS and DTS if they are not present */
804 if(delay==0 || (delay==1 && pc)){
805 if (presentation_delayed) {
806 /* DTS = decompression timestamp */
807 /* PTS = presentation timestamp */
808 if (pkt->dts == AV_NOPTS_VALUE)
809 pkt->dts = st->last_IP_pts;
810 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
811 if (pkt->dts == AV_NOPTS_VALUE)
812 pkt->dts = st->cur_dts;
813
814 /* this is tricky: the dts must be incremented by the duration
815 of the frame we are displaying, i.e. the last I- or P-frame */
816 if (st->last_IP_duration == 0)
817 st->last_IP_duration = pkt->duration;
818 if(pkt->dts != AV_NOPTS_VALUE)
819 st->cur_dts = pkt->dts + st->last_IP_duration;
820 st->last_IP_duration = pkt->duration;
821 st->last_IP_pts= pkt->pts;
822 /* cannot compute PTS if not present (we can compute it only
823 by knowing the future */
824 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
825 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
826 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
827 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
828 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
829 pkt->pts += pkt->duration;
830 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
831 }
832 }
833
834 /* presentation is not delayed : PTS and DTS are the same */
835 if(pkt->pts == AV_NOPTS_VALUE)
836 pkt->pts = pkt->dts;
837 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
838 if(pkt->pts == AV_NOPTS_VALUE)
839 pkt->pts = st->cur_dts;
840 pkt->dts = pkt->pts;
841 if(pkt->pts != AV_NOPTS_VALUE)
842 st->cur_dts = pkt->pts + pkt->duration;
843 }
844 }
845
846 if(pkt->pts != AV_NOPTS_VALUE){
847 st->pts_buffer[0]= pkt->pts;
848 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
849 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
850 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
851 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
852 if(pkt->dts == AV_NOPTS_VALUE)
853 pkt->dts= st->pts_buffer[0];
854 if(delay>1){
855 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
856 }
857 if(pkt->dts > st->cur_dts)
858 st->cur_dts = pkt->dts;
859 }
860
861 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
862
863 /* update flags */
864 if(is_intra_only(st->codec))
865 pkt->flags |= PKT_FLAG_KEY;
866 else if (pc) {
867 pkt->flags = 0;
868 /* keyframe computation */
869 if (pc->pict_type == FF_I_TYPE)
870 pkt->flags |= PKT_FLAG_KEY;
871 }
872 }
873
874 void av_destruct_packet_nofree(AVPacket *pkt)
875 {
876 pkt->data = NULL; pkt->size = 0;
877 }
878
879 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
880 {
881 AVStream *st;
882 int len, ret, i;
883
884 av_init_packet(pkt);
885
886 for(;;) {
887 /* select current input stream component */
888 st = s->cur_st;
889 if (st) {
890 if (!st->need_parsing || !st->parser) {
891 /* no parsing needed: we just output the packet as is */
892 /* raw data support */
893 *pkt = s->cur_pkt;
894 compute_pkt_fields(s, st, NULL, pkt);
895 s->cur_st = NULL;
896 break;
897 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
898 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
899 s->cur_ptr, s->cur_len,
900 s->cur_pkt.pts, s->cur_pkt.dts);
901 s->cur_pkt.pts = AV_NOPTS_VALUE;
902 s->cur_pkt.dts = AV_NOPTS_VALUE;
903 /* increment read pointer */
904 s->cur_ptr += len;
905 s->cur_len -= len;
906
907 /* return packet if any */
908 if (pkt->size) {
909 got_packet:
910 pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
911 pkt->duration = 0;
912 pkt->stream_index = st->index;
913 pkt->pts = st->parser->pts;
914 pkt->dts = st->parser->dts;
915 pkt->destruct = av_destruct_packet_nofree;
916 compute_pkt_fields(s, st, st->parser, pkt);
917
918 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
919 ff_reduce_index(s, st->index);
920 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
921 0, 0, AVINDEX_KEYFRAME);
922 }
923
924 break;
925 }
926 } else {
927 /* free packet */
928 av_free_packet(&s->cur_pkt);
929 s->cur_st = NULL;
930 }
931 } else {
932 /* read next packet */
933 ret = av_read_packet(s, &s->cur_pkt);
934 if (ret < 0) {
935 if (ret == AVERROR(EAGAIN))
936 return ret;
937 /* return the last frames, if any */
938 for(i = 0; i < s->nb_streams; i++) {
939 st = s->streams[i];
940 if (st->parser && st->need_parsing) {
941 av_parser_parse(st->parser, st->codec,
942 &pkt->data, &pkt->size,
943 NULL, 0,
944 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
945 if (pkt->size)
946 goto got_packet;
947 }
948 }
949 /* no more packets: really terminate parsing */
950 return ret;
951 }
952
953 if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
954 s->cur_pkt.dts != AV_NOPTS_VALUE &&
955 s->cur_pkt.pts < s->cur_pkt.dts){
956 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
957 s->cur_pkt.stream_index,
958 s->cur_pkt.pts,
959 s->cur_pkt.dts,
960 s->cur_pkt.size);
961 // av_free_packet(&s->cur_pkt);
962 // return -1;
963 }
964
965 st = s->streams[s->cur_pkt.stream_index];
966 if(s->debug & FF_FDEBUG_TS)
967 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
968 s->cur_pkt.stream_index,
969 s->cur_pkt.pts,
970 s->cur_pkt.dts,
971 s->cur_pkt.size,
972 s->cur_pkt.flags);
973
974 s->cur_st = st;
975 s->cur_ptr = s->cur_pkt.data;
976 s->cur_len = s->cur_pkt.size;
977 if (st->need_parsing && !st->parser) {
978 st->parser = av_parser_init(st->codec->codec_id);
979 if (!st->parser) {
980 /* no parser available: just output the raw packets */
981 st->need_parsing = AVSTREAM_PARSE_NONE;
982 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
983 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
984 }
985 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
986 st->parser->next_frame_offset=
987 st->parser->cur_offset= s->cur_pkt.pos;
988 }
989 }
990 }
991 }
992 if(s->debug & FF_FDEBUG_TS)
993 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
994 pkt->stream_index,
995 pkt->pts,
996 pkt->dts,
997 pkt->size,
998 pkt->flags);
999
1000 return 0;
1001 }
1002
1003 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1004 {
1005 AVPacketList *pktl;
1006 int eof=0;
1007 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1008
1009 for(;;){
1010 pktl = s->packet_buffer;
1011 if (pktl) {
1012 AVPacket *next_pkt= &pktl->pkt;
1013
1014 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1015 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1016 if( pktl->pkt.stream_index == next_pkt->stream_index
1017 && next_pkt->dts < pktl->pkt.dts
1018 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1019 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1020 next_pkt->pts= pktl->pkt.dts;
1021 }
1022 pktl= pktl->next;
1023 }
1024 pktl = s->packet_buffer;
1025 }
1026
1027 if( next_pkt->pts != AV_NOPTS_VALUE
1028 || next_pkt->dts == AV_NOPTS_VALUE
1029 || !genpts || eof){
1030 /* read packet from packet buffer, if there is data */
1031 *pkt = *next_pkt;
1032 s->packet_buffer = pktl->next;
1033 av_free(pktl);
1034 return 0;
1035 }
1036 }
1037 if(genpts){
1038 int ret= av_read_frame_internal(s, pkt);
1039 if(ret<0){
1040 if(pktl && ret != AVERROR(EAGAIN)){
1041 eof=1;
1042 continue;
1043 }else
1044 return ret;
1045 }
1046
1047 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1048 &s->packet_buffer_end)) < 0)
1049 return AVERROR(ENOMEM);
1050 }else{
1051 assert(!s->packet_buffer);
1052 return av_read_frame_internal(s, pkt);
1053 }
1054 }
1055 }
1056
1057 /* XXX: suppress the packet queue */
1058 static void flush_packet_queue(AVFormatContext *s)
1059 {
1060 AVPacketList *pktl;
1061
1062 for(;;) {
1063 pktl = s->packet_buffer;
1064 if (!pktl)
1065 break;
1066 s->packet_buffer = pktl->next;
1067 av_free_packet(&pktl->pkt);
1068 av_free(pktl);
1069 }
1070 }
1071
1072 /*******************************************************/
1073 /* seek support */
1074
1075 int av_find_default_stream_index(AVFormatContext *s)
1076 {
1077 int first_audio_index = -1;
1078 int i;
1079 AVStream *st;
1080
1081 if (s->nb_streams <= 0)
1082 return -1;
1083 for(i = 0; i < s->nb_streams; i++) {
1084 st = s->streams[i];
1085 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1086 return i;
1087 }
1088 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1089 first_audio_index = i;
1090 }
1091 return first_audio_index >= 0 ? first_audio_index : 0;
1092 }
1093
1094 /**
1095 * Flush the frame reader.
1096 */
1097 static void av_read_frame_flush(AVFormatContext *s)
1098 {
1099 AVStream *st;
1100 int i;
1101
1102 flush_packet_queue(s);
1103
1104 /* free previous packet */
1105 if (s->cur_st) {
1106 if (s->cur_st->parser)
1107 av_free_packet(&s->cur_pkt);
1108 s->cur_st = NULL;
1109 }
1110 /* fail safe */
1111 s->cur_ptr = NULL;
1112 s->cur_len = 0;
1113
1114 /* for each stream, reset read state */
1115 for(i = 0; i < s->nb_streams; i++) {
1116 st = s->streams[i];
1117
1118 if (st->parser) {
1119 av_parser_close(st->parser);
1120 st->parser = NULL;
1121 }
1122 st->last_IP_pts = AV_NOPTS_VALUE;
1123 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1124 }
1125 }
1126
1127 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1128 int i;
1129
1130 for(i = 0; i < s->nb_streams; i++) {
1131 AVStream *st = s->streams[i];
1132
1133 st->cur_dts = av_rescale(timestamp,
1134 st->time_base.den * (int64_t)ref_st->time_base.num,
1135 st->time_base.num * (int64_t)ref_st->time_base.den);
1136 }
1137 }
1138
1139 void ff_reduce_index(AVFormatContext *s, int stream_index)
1140 {
1141 AVStream *st= s->streams[stream_index];
1142 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1143
1144 if((unsigned)st->nb_index_entries >= max_entries){
1145 int i;
1146 for(i=0; 2*i<st->nb_index_entries; i++)
1147 st->index_entries[i]= st->index_entries[2*i];
1148 st->nb_index_entries= i;
1149 }
1150 }
1151
1152 int av_add_index_entry(AVStream *st,
1153 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1154 {
1155 AVIndexEntry *entries, *ie;
1156 int index;
1157
1158 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1159 return -1;
1160
1161 entries = av_fast_realloc(st->index_entries,
1162 &st->index_entries_allocated_size,
1163 (st->nb_index_entries + 1) *
1164 sizeof(AVIndexEntry));
1165 if(!entries)
1166 return -1;
1167
1168 st->index_entries= entries;
1169
1170 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1171
1172 if(index<0){
1173 index= st->nb_index_entries++;
1174 ie= &entries[index];
1175 assert(index==0 || ie[-1].timestamp < timestamp);
1176 }else{
1177 ie= &entries[index];
1178 if(ie->timestamp != timestamp){
1179 if(ie->timestamp <= timestamp)
1180 return -1;
1181 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1182 st->nb_index_entries++;
1183 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1184 distance= ie->min_distance;
1185 }
1186
1187 ie->pos = pos;
1188 ie->timestamp = timestamp;
1189 ie->min_distance= distance;
1190 ie->size= size;
1191 ie->flags = flags;
1192
1193 return index;
1194 }
1195
1196 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1197 int flags)
1198 {
1199 AVIndexEntry *entries= st->index_entries;
1200 int nb_entries= st->nb_index_entries;
1201 int a, b, m;
1202 int64_t timestamp;
1203
1204 a = - 1;
1205 b = nb_entries;
1206
1207 while (b - a > 1) {
1208 m = (a + b) >> 1;
1209 timestamp = entries[m].timestamp;
1210 if(timestamp >= wanted_timestamp)
1211 b = m;
1212 if(timestamp <= wanted_timestamp)
1213 a = m;
1214 }
1215 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1216
1217 if(!(flags & AVSEEK_FLAG_ANY)){
1218 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1219 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1220 }
1221 }
1222
1223 if(m == nb_entries)
1224 return -1;
1225 return m;
1226 }
1227
1228 #define DEBUG_SEEK
1229
1230 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1231 AVInputFormat *avif= s->iformat;
1232 int64_t pos_min, pos_max, pos, pos_limit;
1233 int64_t ts_min, ts_max, ts;
1234 int index;
1235 AVStream *st;
1236
1237 if (stream_index < 0)
1238 return -1;
1239
1240 #ifdef DEBUG_SEEK
1241 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1242 #endif
1243
1244 ts_max=
1245 ts_min= AV_NOPTS_VALUE;
1246 pos_limit= -1; //gcc falsely says it may be uninitialized
1247
1248 st= s->streams[stream_index];
1249 if(st->index_entries){
1250 AVIndexEntry *e;
1251
1252 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1253 index= FFMAX(index, 0);
1254 e= &st->index_entries[index];
1255
1256 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1257 pos_min= e->pos;
1258 ts_min= e->timestamp;
1259 #ifdef DEBUG_SEEK
1260 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1261 pos_min,ts_min);
1262 #endif
1263 }else{
1264 assert(index==0);
1265 }
1266
1267 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1268 assert(index < st->nb_index_entries);
1269 if(index >= 0){
1270 e= &st->index_entries[index];
1271 assert(e->timestamp >= target_ts);
1272 pos_max= e->pos;
1273 ts_max= e->timestamp;
1274 pos_limit= pos_max - e->min_distance;
1275 #ifdef DEBUG_SEEK
1276 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1277 pos_max,pos_limit, ts_max);
1278 #endif
1279 }
1280 }
1281
1282 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1283 if(pos<0)
1284 return -1;
1285
1286 /* do the seek */
1287 url_fseek(s->pb, pos, SEEK_SET);
1288
1289 av_update_cur_dts(s, st, ts);
1290
1291 return 0;
1292 }
1293
1294 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1295 int64_t pos, ts;
1296 int64_t start_pos, filesize;
1297 int no_change;
1298
1299 #ifdef DEBUG_SEEK
1300 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1301 #endif
1302
1303 if(ts_min == AV_NOPTS_VALUE){
1304 pos_min = s->data_offset;
1305 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1306 if (ts_min == AV_NOPTS_VALUE)
1307 return -1;
1308 }
1309
1310 if(ts_max == AV_NOPTS_VALUE){
1311 int step= 1024;
1312 filesize = url_fsize(s->pb);
1313 pos_max = filesize - 1;
1314 do{
1315 pos_max -= step;
1316 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1317 step += step;
1318 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1319 if (ts_max == AV_NOPTS_VALUE)
1320 return -1;
1321
1322 for(;;){
1323 int64_t tmp_pos= pos_max + 1;
1324 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1325 if(tmp_ts == AV_NOPTS_VALUE)
1326 break;
1327 ts_max= tmp_ts;
1328 pos_max= tmp_pos;
1329 if(tmp_pos >= filesize)
1330 break;
1331 }
1332 pos_limit= pos_max;
1333 }
1334
1335 if(ts_min > ts_max){
1336 return -1;
1337 }else if(ts_min == ts_max){
1338 pos_limit= pos_min;
1339 }
1340
1341 no_change=0;
1342 while (pos_min < pos_limit) {
1343 #ifdef DEBUG_SEEK
1344 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1345 pos_min, pos_max,
1346 ts_min, ts_max);
1347 #endif
1348 assert(pos_limit <= pos_max);
1349
1350 if(no_change==0){
1351 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1352 // interpolate position (better than dichotomy)
1353 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1354 + pos_min - approximate_keyframe_distance;
1355 }else if(no_change==1){
1356 // bisection, if interpolation failed to change min or max pos last time
1357 pos = (pos_min + pos_limit)>>1;
1358 }else{
1359 /* linear search if bisection failed, can only happen if there
1360 are very few or no keyframes between min/max */
1361 pos=pos_min;
1362 }
1363 if(pos <= pos_min)
1364 pos= pos_min + 1;
1365 else if(pos > pos_limit)
1366 pos= pos_limit;
1367 start_pos= pos;
1368
1369 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1370 if(pos == pos_max)
1371 no_change++;
1372 else
1373 no_change=0;
1374 #ifdef DEBUG_SEEK
1375 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1376 #endif
1377 if(ts == AV_NOPTS_VALUE){
1378 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1379 return -1;
1380 }
1381 assert(ts != AV_NOPTS_VALUE);
1382 if (target_ts <= ts) {
1383 pos_limit = start_pos - 1;
1384 pos_max = pos;
1385 ts_max = ts;
1386 }
1387 if (target_ts >= ts) {
1388 pos_min = pos;
1389 ts_min = ts;
1390 }
1391 }
1392
1393 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1394 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1395 #ifdef DEBUG_SEEK
1396 pos_min = pos;
1397 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1398 pos_min++;
1399 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1400 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1401 pos, ts_min, target_ts, ts_max);
1402 #endif
1403 *ts_ret= ts;
1404 return pos;
1405 }
1406
1407 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1408 int64_t pos_min, pos_max;
1409 #if 0
1410 AVStream *st;
1411
1412 if (stream_index < 0)
1413 return -1;
1414
1415 st= s->streams[stream_index];
1416 #endif
1417
1418 pos_min = s->data_offset;
1419 pos_max = url_fsize(s->pb) - 1;
1420
1421 if (pos < pos_min) pos= pos_min;
1422 else if(pos > pos_max) pos= pos_max;
1423
1424 url_fseek(s->pb, pos, SEEK_SET);
1425
1426 #if 0
1427 av_update_cur_dts(s, st, ts);
1428 #endif
1429 return 0;
1430 }
1431
1432 static int av_seek_frame_generic(AVFormatContext *s,
1433 int stream_index, int64_t timestamp, int flags)
1434 {
1435 int index, ret;
1436 AVStream *st;
1437 AVIndexEntry *ie;
1438
1439 st = s->streams[stream_index];
1440
1441 index = av_index_search_timestamp(st, timestamp, flags);
1442
1443 if(index < 0 || index==st->nb_index_entries-1){
1444 int i;
1445 AVPacket pkt;
1446
1447 if(st->nb_index_entries){
1448 assert(st->index_entries);
1449 ie= &st->index_entries[st->nb_index_entries-1];
1450 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1451 return ret;
1452 av_update_cur_dts(s, st, ie->timestamp);
1453 }else{
1454 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1455 return ret;
1456 }
1457 for(i=0;; i++) {
1458 int ret = av_read_frame(s, &pkt);
1459 if(ret<0)
1460 break;
1461 av_free_packet(&pkt);
1462 if(stream_index == pkt.stream_index){
1463 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1464 break;
1465 }
1466 }
1467 index = av_index_search_timestamp(st, timestamp, flags);
1468 }
1469 if (index < 0)
1470 return -1;
1471
1472 av_read_frame_flush(s);
1473 if (s->iformat->read_seek){
1474 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1475 return 0;
1476 }
1477 ie = &st->index_entries[index];
1478 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1479 return ret;
1480 av_update_cur_dts(s, st, ie->timestamp);
1481
1482 return 0;
1483 }
1484
1485 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1486 {
1487 int ret;
1488 AVStream *st;
1489
1490 av_read_frame_flush(s);
1491
1492 if(flags & AVSEEK_FLAG_BYTE)
1493 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1494
1495 if(stream_index < 0){
1496 stream_index= av_find_default_stream_index(s);
1497 if(stream_index < 0)
1498 return -1;
1499
1500 st= s->streams[stream_index];
1501 /* timestamp for default must be expressed in AV_TIME_BASE units */
1502 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1503 }
1504 st= s->streams[stream_index];
1505
1506 /* first, we try the format specific seek */
1507 if (s->iformat->read_seek)
1508 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1509 else
1510 ret = -1;
1511 if (ret >= 0) {
1512 return 0;
1513 }
1514
1515 if(s->iformat->read_timestamp)
1516 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1517 else
1518 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1519 }
1520
1521 /*******************************************************/
1522
1523 /**
1524 * Returns TRUE if the stream has accurate duration in any stream.
1525 *
1526 * @return TRUE if the stream has accurate duration for at least one component.
1527 */
1528 static int av_has_duration(AVFormatContext *ic)
1529 {
1530 int i;
1531 AVStream *st;
1532
1533 for(i = 0;i < ic->nb_streams; i++) {
1534 st = ic->streams[i];
1535 if (st->duration != AV_NOPTS_VALUE)
1536 return 1;
1537 }
1538 return 0;
1539 }
1540
1541 /**
1542 * Estimate the stream timings from the one of each components.
1543 *
1544 * Also computes the global bitrate if possible.
1545 */
1546 static void av_update_stream_timings(AVFormatContext *ic)
1547 {
1548 int64_t start_time, start_time1, end_time, end_time1;
1549 int64_t duration, duration1;
1550 int i;
1551 AVStream *st;
1552
1553 start_time = INT64_MAX;
1554 end_time = INT64_MIN;
1555 duration = INT64_MIN;
1556 for(i = 0;i < ic->nb_streams; i++) {
1557 st = ic->streams[i];
1558 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1559 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1560 if (start_time1 < start_time)
1561 start_time = start_time1;
1562 if (st->duration != AV_NOPTS_VALUE) {
1563 end_time1 = start_time1
1564 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1565 if (end_time1 > end_time)
1566 end_time = end_time1;
1567 }
1568 }
1569 if (st->duration != AV_NOPTS_VALUE) {
1570 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1571 if (duration1 > duration)
1572 duration = duration1;
1573 }
1574 }
1575 if (start_time != INT64_MAX) {
1576 ic->start_time = start_time;
1577 if (end_time != INT64_MIN) {
1578 if (end_time - start_time > duration)
1579 duration = end_time - start_time;
1580 }
1581 }
1582 if (duration != INT64_MIN) {
1583 ic->duration = duration;
1584 if (ic->file_size > 0) {
1585 /* compute the bitrate */
1586 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1587 (double)ic->duration;
1588 }
1589 }
1590 }
1591
1592 static void fill_all_stream_timings(AVFormatContext *ic)
1593 {
1594 int i;
1595 AVStream *st;
1596
1597 av_update_stream_timings(ic);
1598 for(i = 0;i < ic->nb_streams; i++) {
1599 st = ic->streams[i];
1600 if (st->start_time == AV_NOPTS_VALUE) {
1601 if(ic->start_time != AV_NOPTS_VALUE)
1602 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1603 if(ic->duration != AV_NOPTS_VALUE)
1604 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1605 }
1606 }
1607 }
1608
1609 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1610 {
1611 int64_t filesize, duration;
1612 int bit_rate, i;
1613 AVStream *st;
1614
1615 /* if bit_rate is already set, we believe it */
1616 if (ic->bit_rate == 0) {
1617 bit_rate = 0;
1618 for(i=0;i<ic->nb_streams;i++) {
1619 st = ic->streams[i];
1620 bit_rate += st->codec->bit_rate;
1621 }
1622 ic->bit_rate = bit_rate;
1623 }
1624
1625 /* if duration is already set, we believe it */
1626 if (ic->duration == AV_NOPTS_VALUE &&
1627 ic->bit_rate != 0 &&
1628 ic->file_size != 0) {
1629 filesize = ic->file_size;
1630 if (filesize > 0) {
1631 for(i = 0; i < ic->nb_streams; i++) {
1632 st = ic->streams[i];
1633 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1634 if (st->duration == AV_NOPTS_VALUE)
1635 st->duration = duration;
1636 }
1637 }
1638 }
1639 }
1640
1641 #define DURATION_MAX_READ_SIZE 250000
1642
1643 /* only usable for MPEG-PS streams */
1644 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1645 {
1646 AVPacket pkt1, *pkt = &pkt1;
1647 AVStream *st;
1648 int read_size, i, ret;
1649 int64_t end_time;
1650 int64_t filesize, offset, duration;
1651
1652 /* free previous packet */
1653 if (ic->cur_st && ic->cur_st->parser)
1654 av_free_packet(&ic->cur_pkt);
1655 ic->cur_st = NULL;
1656
1657 /* flush packet queue */
1658 flush_packet_queue(ic);
1659
1660 for(i=0;i<ic->nb_streams;i++) {
1661 st = ic->streams[i];
1662 if (st->parser) {
1663 av_parser_close(st->parser);
1664 st->parser= NULL;
1665 }
1666 }
1667
1668 /* we read the first packets to get the first PTS (not fully
1669 accurate, but it is enough now) */
1670 url_fseek(ic->pb, 0, SEEK_SET);
1671 read_size = 0;
1672 for(;;) {
1673 if (read_size >= DURATION_MAX_READ_SIZE)
1674 break;
1675 /* if all info is available, we can stop */
1676 for(i = 0;i < ic->nb_streams; i++) {
1677 st = ic->streams[i];
1678 if (st->start_time == AV_NOPTS_VALUE)
1679 break;
1680 }
1681 if (i == ic->nb_streams)
1682 break;
1683
1684 ret = av_read_packet(ic, pkt);
1685 if (ret != 0)
1686 break;
1687 read_size += pkt->size;
1688 st = ic->streams[pkt->stream_index];
1689 if (pkt->pts != AV_NOPTS_VALUE) {
1690 if (st->start_time == AV_NOPTS_VALUE)
1691 st->start_time = pkt->pts;
1692 }
1693 av_free_packet(pkt);
1694 }
1695
1696 /* estimate the end time (duration) */
1697 /* XXX: may need to support wrapping */
1698 filesize = ic->file_size;
1699 offset = filesize - DURATION_MAX_READ_SIZE;
1700 if (offset < 0)
1701 offset = 0;
1702
1703 url_fseek(ic->pb, offset, SEEK_SET);
1704 read_size = 0;
1705 for(;;) {
1706 if (read_size >= DURATION_MAX_READ_SIZE)
1707 break;
1708
1709 ret = av_read_packet(ic, pkt);
1710 if (ret != 0)
1711 break;
1712 read_size += pkt->size;
1713 st = ic->streams[pkt->stream_index];
1714 if (pkt->pts != AV_NOPTS_VALUE &&
1715 st->start_time != AV_NOPTS_VALUE) {
1716 end_time = pkt->pts;
1717 duration = end_time - st->start_time;
1718 if (duration > 0) {
1719 if (st->duration == AV_NOPTS_VALUE ||
1720 st->duration < duration)
1721 st->duration = duration;
1722 }
1723 }
1724 av_free_packet(pkt);
1725 }
1726
1727 fill_all_stream_timings(ic);
1728
1729 url_fseek(ic->pb, old_offset, SEEK_SET);
1730 for(i=0; i<ic->nb_streams; i++){
1731 st= ic->streams[i];
1732 st->cur_dts= st->first_dts;
1733 st->last_IP_pts = AV_NOPTS_VALUE;
1734 }
1735 }
1736
1737 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1738 {
1739 int64_t file_size;
1740
1741 /* get the file size, if possible */
1742 if (ic->iformat->flags & AVFMT_NOFILE) {
1743 file_size = 0;
1744 } else {
1745 file_size = url_fsize(ic->pb);
1746 if (file_size < 0)
1747 file_size = 0;
1748 }
1749 ic->file_size = file_size;
1750
1751 if ((!strcmp(ic->iformat->name, "mpeg") ||
1752 !strcmp(ic->iformat->name, "mpegts")) &&
1753 file_size && !url_is_streamed(ic->pb)) {
1754 /* get accurate estimate from the PTSes */
1755 av_estimate_timings_from_pts(ic, old_offset);
1756 } else if (av_has_duration(ic)) {
1757 /* at least one component has timings - we use them for all
1758 the components */
1759 fill_all_stream_timings(ic);
1760 } else {
1761 /* less precise: use bitrate info */
1762 av_estimate_timings_from_bit_rate(ic);
1763 }
1764 av_update_stream_timings(ic);
1765
1766 #if 0
1767 {
1768 int i;
1769 AVStream *st;
1770 for(i = 0;i < ic->nb_streams; i++) {
1771 st = ic->streams[i];
1772 printf("%d: start_time: %0.3f duration: %0.3f\n",
1773 i, (double)st->start_time / AV_TIME_BASE,
1774 (double)st->duration / AV_TIME_BASE);
1775 }
1776 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1777 (double)ic->start_time / AV_TIME_BASE,
1778 (double)ic->duration / AV_TIME_BASE,
1779 ic->bit_rate / 1000);
1780 }
1781 #endif
1782 }
1783
1784 static int has_codec_parameters(AVCodecContext *enc)
1785 {
1786 int val;
1787 switch(enc->codec_type) {
1788 case CODEC_TYPE_AUDIO:
1789 val = enc->sample_rate && enc->channels;
1790 if(!enc->frame_size &&
1791 (enc->codec_id == CODEC_ID_VORBIS ||
1792 enc->codec_id == CODEC_ID_AAC))
1793 return 0;
1794 break;
1795 case CODEC_TYPE_VIDEO:
1796 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1797 break;
1798 default:
1799 val = 1;
1800 break;
1801 }
1802 return enc->codec_id != CODEC_ID_NONE && val != 0;
1803 }
1804
1805 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1806 {
1807 int16_t *samples;
1808 AVCodec *codec;
1809 int got_picture, data_size, ret=0;
1810 AVFrame picture;
1811
1812 if(!st->codec->codec){
1813 codec = avcodec_find_decoder(st->codec->codec_id);
1814 if (!codec)
1815 return -1;
1816 ret = avcodec_open(st->codec, codec);
1817 if (ret < 0)
1818 return ret;
1819 }
1820
1821 if(!has_codec_parameters(st->codec)){
1822 switch(st->codec->codec_type) {
1823 case CODEC_TYPE_VIDEO:
1824 ret = avcodec_decode_video(st->codec, &picture,
1825 &got_picture, data, size);
1826 break;
1827 case CODEC_TYPE_AUDIO:
1828 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1829 samples = av_malloc(data_size);
1830 if (!samples)
1831 goto fail;
1832 ret = avcodec_decode_audio2(st->codec, samples,
1833 &data_size, data, size);
1834 av_free(samples);
1835 break;
1836 default:
1837 break;
1838 }
1839 }
1840 fail:
1841 return ret;
1842 }
1843
1844 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1845 {
1846 while (tags->id != CODEC_ID_NONE) {
1847 if (tags->id == id)
1848 return tags->tag;
1849 tags++;
1850 }
1851 return 0;
1852 }
1853
1854 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1855 {
1856 int i;
1857 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1858 if(tag == tags[i].tag)
1859 return tags[i].id;
1860 }
1861 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1862 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1863 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1864 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1865 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1866 return tags[i].id;
1867 }
1868 return CODEC_ID_NONE;
1869 }
1870
1871 unsigned int av_codec_get_tag(const AVCodecTag *tags[4], enum CodecID id)
1872 {
1873 int i;
1874 for(i=0; tags && tags[i]; i++){
1875 int tag= codec_get_tag(tags[i], id);
1876 if(tag) return tag;
1877 }
1878 return 0;
1879 }
1880
1881 enum CodecID av_codec_get_id(const AVCodecTag *tags[4], unsigned int tag)
1882 {
1883 int i;
1884 for(i=0; tags && tags[i]; i++){
1885 enum CodecID id= codec_get_id(tags[i], tag);
1886 if(id!=CODEC_ID_NONE) return id;
1887 }
1888 return CODEC_ID_NONE;
1889 }
1890
1891 static void compute_chapters_end(AVFormatContext *s)
1892 {
1893 unsigned int i;
1894
1895 for (i=0; i+1<s->nb_chapters; i++)
1896 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1897 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1898 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1899 s->chapters[i]->end = s->chapters[i+1]->start;
1900 }
1901
1902 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1903 assert(s->start_time != AV_NOPTS_VALUE);
1904 assert(s->duration > 0);
1905 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1906 AV_TIME_BASE_Q,
1907 s->chapters[i]->time_base);
1908 }
1909 }
1910
1911 /* absolute maximum size we read until we abort */
1912 #define MAX_READ_SIZE 5000000
1913
1914 #define MAX_STD_TIMEBASES (60*12+5)
1915 static int get_std_framerate(int i){
1916 if(i<60*12) return i*1001;
1917 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1918 }
1919
1920 /*
1921 * Is the time base unreliable.
1922 * This is a heuristic to balance between quick acceptance of the values in
1923 * the headers vs. some extra checks.
1924 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1925 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1926 * And there are "variable" fps files this needs to detect as well.
1927 */
1928 static int tb_unreliable(AVCodecContext *c){
1929 if( c->time_base.den >= 101L*c->time_base.num
1930 || c->time_base.den < 5L*c->time_base.num
1931 /* || c->codec_tag == ff_get_fourcc("DIVX")
1932 || c->codec_tag == ff_get_fourcc("XVID")*/
1933 || c->codec_id == CODEC_ID_MPEG2VIDEO)
1934 return 1;
1935 return 0;
1936 }
1937
1938 int av_find_stream_info(AVFormatContext *ic)
1939 {
1940 int i, count, ret, read_size, j;
1941 AVStream *st;
1942 AVPacket pkt1, *pkt;
1943 int64_t last_dts[MAX_STREAMS];
1944 int duration_count[MAX_STREAMS]={0};
1945 double (*duration_error)[MAX_STD_TIMEBASES];
1946 offset_t old_offset = url_ftell(ic->pb);
1947 int64_t codec_info_duration[MAX_STREAMS]={0};
1948 int codec_info_nb_frames[MAX_STREAMS]={0};
1949
1950 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1951 if (!duration_error) return AVERROR(ENOMEM);
1952
1953 for(i=0;i<ic->nb_streams;i++) {
1954 st = ic->streams[i];
1955 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1956 /* if(!st->time_base.num)
1957 st->time_base= */
1958 if(!st->codec->time_base.num)
1959 st->codec->time_base= st->time_base;
1960 }
1961 //only for the split stuff
1962 if (!st->parser) {
1963 st->parser = av_parser_init(st->codec->codec_id);
1964 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
1965 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1966 }
1967 }
1968 }
1969
1970 for(i=0;i<MAX_STREAMS;i++){
1971 last_dts[i]= AV_NOPTS_VALUE;
1972 }
1973
1974 count = 0;
1975 read_size = 0;
1976 for(;;) {
1977 /* check if one codec still needs to be handled */
1978 for(i=0;i<ic->nb_streams;i++) {
1979 st = ic->streams[i];
1980 if (!has_codec_parameters(st->codec))
1981 break;
1982 /* variable fps and no guess at the real fps */
1983 if( tb_unreliable(st->codec)
1984 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1985 break;
1986 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1987 break;
1988 if(st->first_dts == AV_NOPTS_VALUE)
1989 break;
1990 }
1991 if (i == ic->nb_streams) {
1992 /* NOTE: if the format has no header, then we need to read
1993 some packets to get most of the streams, so we cannot
1994 stop here */
1995 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1996 /* if we found the info for all the codecs, we can stop */
1997 ret = count;
1998 break;
1999 }
2000 }
2001 /* we did not get all the codec info, but we read too much data */
2002 if (read_size >= MAX_READ_SIZE) {
2003 ret = count;
2004 break;
2005 }
2006
2007 /* NOTE: a new stream can be added there if no header in file
2008 (AVFMTCTX_NOHEADER) */
2009 ret = av_read_frame_internal(ic, &pkt1);
2010 if (ret < 0) {
2011 /* EOF or error */
2012 ret = -1; /* we could not have all the codec parameters before EOF */
2013 for(i=0;i<ic->nb_streams;i++) {
2014 st = ic->streams[i];
2015 if (!has_codec_parameters(st->codec)){
2016 char buf[256];
2017 avcodec_string(buf, sizeof(buf), st->codec, 0);
2018 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2019 } else {
2020 ret = 0;
2021 }
2022 }
2023 break;
2024 }
2025
2026 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2027 if(av_dup_packet(pkt) < 0) {
2028 av_free(duration_error);
2029 return AVERROR(ENOMEM);
2030 }
2031
2032 read_size += pkt->size;
2033
2034 st = ic->streams[pkt->stream_index];
2035 if(codec_info_nb_frames[st->index]>1)
2036 codec_info_duration[st->index] += pkt->duration;
2037 if (pkt->duration != 0)
2038 codec_info_nb_frames[st->index]++;
2039
2040 {
2041 int index= pkt->stream_index;
2042 int64_t last= last_dts[index];
2043 int64_t duration= pkt->dts - last;
2044
2045 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2046 double dur= duration * av_q2d(st->time_base);
2047
2048 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2049 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2050 if(duration_count[index] < 2)
2051 memset(duration_error[index], 0, sizeof(*duration_error));
2052 for(i=1; i<MAX_STD_TIMEBASES; i++){
2053 int framerate= get_std_framerate(i);
2054 int ticks= lrintf(dur*framerate/(1001*12));
2055 double error= dur - ticks*1001*12/(double)framerate;
2056 duration_error[index][i] += error*error;
2057 }
2058 duration_count[index]++;
2059 }
2060 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2061 last_dts[pkt->stream_index]= pkt->dts;
2062 }
2063 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2064 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2065 if(i){
2066 st->codec->extradata_size= i;
2067 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2068 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2069 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2070 }
2071 }
2072
2073 /* if still no information, we try to open the codec and to
2074 decompress the frame. We try to avoid that in most cases as
2075 it takes longer and uses more memory. For MPEG-4, we need to
2076 decompress for QuickTime. */
2077 if (!has_codec_parameters(st->codec) /*&&
2078 (st->codec->codec_id == CODEC_ID_FLV1 ||
2079 st->codec->codec_id == CODEC_ID_H264 ||
2080 st->codec->codec_id == CODEC_ID_H263 ||
2081 st->codec->codec_id == CODEC_ID_H261 ||
2082 st->codec->codec_id == CODEC_ID_VORBIS ||
2083 st->codec->codec_id == CODEC_ID_MJPEG ||
2084 st->codec->codec_id == CODEC_ID_PNG ||
2085 st->codec->codec_id == CODEC_ID_PAM ||
2086 st->codec->codec_id == CODEC_ID_PGM ||
2087 st->codec->codec_id == CODEC_ID_PGMYUV ||
2088 st->codec->codec_id == CODEC_ID_PBM ||
2089 st->codec->codec_id == CODEC_ID_PPM ||
2090 st->codec->codec_id == CODEC_ID_SHORTEN ||
2091 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2092 try_decode_frame(st, pkt->data, pkt->size);
2093
2094 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2095 break;
2096 }
2097 count++;
2098 }
2099
2100 // close codecs which were opened in try_decode_frame()
2101 for(i=0;i<ic->nb_streams;i++) {
2102 st = ic->streams[i];
2103 if(st->codec->codec)
2104 avcodec_close(st->codec);
2105 }
2106 for(i=0;i<ic->nb_streams;i++) {
2107 st = ic->streams[i];
2108 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2109 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
2110 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2111
2112 if(duration_count[i]
2113 && tb_unreliable(st->codec) /*&&
2114 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2115 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2116 double best_error= 2*av_q2d(st->time_base);
2117 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2118
2119 for(j=1; j<MAX_STD_TIMEBASES; j++){
2120 double error= duration_error[i][j] * get_std_framerate(j);
2121 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2122 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2123 if(error < best_error){
2124 best_error= error;
2125 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
2126 }
2127 }
2128 }
2129
2130 if (!st->r_frame_rate.num){
2131 if( st->codec->time_base.den * (int64_t)st->time_base.num
2132 <= st->codec->time_base.num * (int64_t)st->time_base.den){
2133 st->r_frame_rate.num = st->codec->time_base.den;
2134 st->r_frame_rate.den = st->codec->time_base.num;
2135 }else{
2136 st->r_frame_rate.num = st->time_base.den;
2137 st->r_frame_rate.den = st->time_base.num;
2138 }
2139 }
2140 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2141 if(!st->codec->bits_per_sample)
2142 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
2143 }
2144 }
2145
2146 av_estimate_timings(ic, old_offset);
2147
2148 compute_chapters_end(ic);
2149
2150 #if 0
2151 /* correct DTS for B-frame streams with no timestamps */
2152 for(i=0;i<ic->nb_streams;i++) {
2153 st = ic->streams[i];
2154 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2155 if(b-frames){
2156 ppktl = &ic->packet_buffer;
2157 while(ppkt1){
2158 if(ppkt1->stream_index != i)
2159 continue;
2160 if(ppkt1->pkt->dts < 0)
2161 break;
2162 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2163 break;
2164 ppkt1->pkt->dts -= delta;
2165 ppkt1= ppkt1->next;
2166 }
2167 if(ppkt1)
2168 continue;
2169 st->cur_dts -= delta;
2170 }
2171 }
2172 }
2173 #endif
2174
2175 av_free(duration_error);
2176
2177 return ret;
2178 }
2179
2180 /*******************************************************/
2181
2182 int av_read_play(AVFormatContext *s)
2183 {
2184 if (s->iformat->read_play)
2185 return s->iformat->read_play(s);
2186 if (s->pb)
2187 return av_url_read_fpause(s->pb, 0);
2188 return AVERROR(ENOSYS);
2189 }
2190
2191 int av_read_pause(AVFormatContext *s)
2192 {
2193 if (s->iformat->read_pause)
2194 return s->iformat->read_pause(s);
2195 if (s->pb)
2196 return av_url_read_fpause(s->pb, 1);
2197 return AVERROR(ENOSYS);
2198 }
2199
2200 void av_close_input_stream(AVFormatContext *s)
2201 {
2202 int i;
2203 AVStream *st;
2204
2205 /* free previous packet */
2206 if (s->cur_st && s->cur_st->parser)
2207 av_free_packet(&s->cur_pkt);
2208
2209 if (s->iformat->read_close)
2210 s->iformat->read_close(s);
2211 for(i=0;i<s->nb_streams;i++) {
2212 /* free all data in a stream component */
2213 st = s->streams[i];
2214 if (st->parser) {
2215 av_parser_close(st->parser);
2216 }
2217 av_free(st->index_entries);
2218 av_free(st->codec->extradata);
2219 av_free(st->codec);
2220 av_free(st->filename);
2221 av_free(st->priv_data);
2222 av_free(st);
2223 }
2224 for(i=s->nb_programs-1; i>=0; i--) {
2225 av_freep(&s->programs[i]->provider_name);
2226 av_freep(&s->programs[i]->name);
2227 av_freep(&s->programs[i]->stream_index);
2228 av_freep(&s->programs[i]);
2229 }
2230 av_freep(&s->programs);
2231 flush_packet_queue(s);
2232 av_freep(&s->priv_data);
2233 while(s->nb_chapters--) {
2234 av_free(s->chapters[s->nb_chapters]->title);
2235 av_free(s->chapters[s->nb_chapters]);
2236 }
2237 av_freep(&s->chapters);
2238 av_free(s);
2239 }
2240
2241 void av_close_input_file(AVFormatContext *s)
2242 {
2243 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2244 av_close_input_stream(s);
2245 if (pb)
2246 url_fclose(pb);
2247 }
2248
2249 AVStream *av_new_stream(AVFormatContext *s, int id)
2250 {
2251 AVStream *st;
2252 int i;
2253
2254 if (s->nb_streams >= MAX_STREAMS)
2255 return NULL;
2256
2257 st = av_mallocz(sizeof(AVStream));
2258 if (!st)
2259 return NULL;
2260
2261 st->codec= avcodec_alloc_context();
2262 if (s->iformat) {
2263 /* no default bitrate if decoding */
2264 st->codec->bit_rate = 0;
2265 }
2266 st->index = s->nb_streams;
2267 st->id = id;
2268 st->start_time = AV_NOPTS_VALUE;
2269 st->duration = AV_NOPTS_VALUE;
2270 /* we set the current DTS to 0 so that formats without any timestamps
2271 but durations get some timestamps, formats with some unknown
2272 timestamps have their first few packets buffered and the
2273 timestamps corrected before they are returned to the user */
2274 st->cur_dts = 0;
2275 st->first_dts = AV_NOPTS_VALUE;
2276
2277 /* default pts setting is MPEG-like */
2278 av_set_pts_info(st, 33, 1, 90000);
2279 st->last_IP_pts = AV_NOPTS_VALUE;
2280 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2281 st->pts_buffer[i]= AV_NOPTS_VALUE;
2282
2283 s->streams[s->nb_streams++] = st;
2284 return st;
2285 }
2286
2287 AVProgram *av_new_program(AVFormatContext *ac, int id)
2288 {
2289 AVProgram *program=NULL;
2290 int i;
2291
2292 #ifdef DEBUG_SI
2293 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2294 #endif
2295
2296 for(i=0; i<ac->nb_programs; i++)
2297 if(ac->programs[i]->id == id)
2298 program = ac->programs[i];
2299
2300 if(!program){
2301 program = av_mallocz(sizeof(AVProgram));
2302 if (!program)
2303 return NULL;
2304 dynarray_add(&ac->programs, &ac->nb_programs, program);
2305 program->discard = AVDISCARD_NONE;
2306 }
2307 program->id = id;
2308
2309 return program;
2310 }
2311
2312 void av_set_program_name(AVProgram *program, char *provider_name, char *name)
2313 {
2314 assert(!provider_name == !name);
2315 if(name){
2316 av_free(program->provider_name);
2317 av_free(program-> name);
2318 program->provider_name = av_strdup(provider_name);
2319 program-> name = av_strdup( name);
2320 }
2321 }
2322
2323 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2324 {
2325 AVChapter *chapter = NULL;
2326 int i;
2327
2328 for(i=0; i<s->nb_chapters; i++)
2329 if(s->chapters[i]->id == id)
2330 chapter = s->chapters[i];
2331
2332 if(!chapter){
2333 chapter= av_mallocz(sizeof(AVChapter));
2334 if(!chapter)
2335 return NULL;
2336 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2337 }
2338 av_free(chapter->title);
2339 chapter->title = av_strdup(title);
2340 chapter->id = id;
2341 chapter->time_base= time_base;
2342 chapter->start = start;
2343 chapter->end = end;
2344
2345 return chapter;
2346 }
2347
2348 /************************************************************/
2349 /* output media file */
2350
2351 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2352 {
2353 int ret;
2354
2355 if (s->oformat->priv_data_size > 0) {
2356 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2357 if (!s->priv_data)
2358 return AVERROR(ENOMEM);
2359 } else
2360 s->priv_data = NULL;
2361
2362 if (s->oformat->set_parameters) {
2363 ret = s->oformat->set_parameters(s, ap);
2364 if (ret < 0)
2365 return ret;
2366 }
2367 return 0;
2368 }
2369
2370 int av_write_header(AVFormatContext *s)
2371 {
2372 int ret, i;
2373 AVStream *st;
2374
2375 // some sanity checks
2376 for(i=0;i<s->nb_streams;i++) {
2377 st = s->streams[i];
2378
2379 switch (st->codec->codec_type) {
2380 case CODEC_TYPE_AUDIO:
2381 if(st->codec->sample_rate<=0){
2382 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2383 return -1;
2384 }
2385 break;
2386 case CODEC_TYPE_VIDEO:
2387 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2388 av_log(s, AV_LOG_ERROR, "time base not set\n");
2389 return -1;
2390 }
2391 if(st->codec->width<=0 || st->codec->height<=0){
2392 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2393 return -1;
2394 }
2395 break;
2396 }
2397
2398 if(s->oformat->codec_tag){
2399 if(st->codec->codec_tag){
2400 //FIXME
2401 //check that tag + id is in the table
2402 //if neither is in the table -> OK
2403 //if tag is in the table with another id -> FAIL
2404 //if id is in the table with another tag -> FAIL unless strict < ?
2405 }else
2406 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2407 }
2408 }
2409
2410 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2411 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2412 if (!s->priv_data)
2413 return AVERROR(ENOMEM);
2414 }
2415
2416 if(s->oformat->write_header){
2417 ret = s->oformat->write_header(s);
2418 if (ret < 0)
2419 return ret;
2420 }
2421
2422 /* init PTS generation */
2423 for(i=0;i<s->nb_streams;i++) {
2424 int64_t den = AV_NOPTS_VALUE;
2425 st = s->streams[i];
2426
2427 switch (st->codec->codec_type) {
2428 case CODEC_TYPE_AUDIO:
2429 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2430 break;
2431 case CODEC_TYPE_VIDEO:
2432 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2433 break;
2434 default:
2435 break;
2436 }
2437 if (den != AV_NOPTS_VALUE) {
2438 if (den <= 0)
2439 return AVERROR_INVALIDDATA;
2440 av_frac_init(&st->pts, 0, 0, den);
2441 }
2442 }
2443 return 0;
2444 }
2445
2446 //FIXME merge with compute_pkt_fields
2447 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2448 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2449 int num, den, frame_size, i;
2450
2451 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2452
2453 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2454 return -1;*/
2455
2456 /* duration field */
2457 if (pkt->duration == 0) {
2458 compute_frame_duration(&num, &den, st, NULL, pkt);
2459 if (den && num) {
2460 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2461 }
2462 }
2463
2464 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2465 pkt->pts= pkt->dts;
2466
2467 //XXX/FIXME this is a temporary hack until all encoders output pts
2468 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2469 pkt->dts=
2470 // pkt->pts= st->cur_dts;
2471 pkt->pts= st->pts.val;
2472 }
2473
2474 //calculate dts from pts
2475 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2476 st->pts_buffer[0]= pkt->pts;
2477 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2478 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2479 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2480 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2481
2482 pkt->dts= st->pts_buffer[0];
2483 }
2484
2485 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2486 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2487 return -1;
2488 }
2489 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2490 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2491 return -1;
2492 }
2493
2494 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2495 st->cur_dts= pkt->dts;
2496 st->pts.val= pkt->dts;
2497
2498 /* update pts */
2499 switch (st->codec->codec_type) {
2500 case CODEC_TYPE_AUDIO:
2501 frame_size = get_audio_frame_size(st->codec, pkt->size);
2502
2503 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2504 likely equal to the encoder delay, but it would be better if we
2505 had the real timestamps from the encoder */
2506 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2507 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2508 }
2509 break;
2510 case CODEC_TYPE_VIDEO:
2511 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2512 break;
2513 default:
2514 break;
2515 }
2516 return 0;
2517 }
2518
2519 static void truncate_ts(AVStream *st, AVPacket *pkt){
2520 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2521
2522 // if(pkt->dts < 0)
2523 // pkt->dts= 0; //this happens for low_delay=0 and B-frames, FIXME, needs further investigation about what we should do here
2524
2525 if (pkt->pts != AV_NOPTS_VALUE)
2526 pkt->pts &= pts_mask;
2527 if (pkt->dts != AV_NOPTS_VALUE)
2528 pkt->dts &= pts_mask;
2529 }
2530
2531 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2532 {
2533 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2534
2535 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2536 return ret;
2537
2538 truncate_ts(s->streams[pkt->stream_index], pkt);
2539
2540 ret= s->oformat->write_packet(s, pkt);
2541 if(!ret)
2542 ret= url_ferror(s->pb);
2543 return ret;
2544 }
2545
2546 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2547 AVPacketList *pktl, **next_point, *this_pktl;
2548 int stream_count=0;
2549 int streams[MAX_STREAMS];
2550
2551 if(pkt){
2552 AVStream *st= s->streams[ pkt->stream_index];
2553
2554 // assert(pkt->destruct != av_destruct_packet); //FIXME
2555
2556 this_pktl = av_mallocz(sizeof(AVPacketList));
2557 this_pktl->pkt= *pkt;
2558 if(pkt->destruct == av_destruct_packet)
2559 pkt->destruct= NULL; // not shared -> must keep original from being freed
2560 else
2561 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2562
2563 next_point = &s->packet_buffer;
2564 while(*next_point){
2565 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2566 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2567 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2568 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2569 break;
2570 next_point= &(*next_point)->next;
2571 }
2572 this_pktl->next= *next_point;
2573 *next_point= this_pktl;
2574 }
2575
2576 memset(streams, 0, sizeof(streams));
2577 pktl= s->packet_buffer;
2578 while(pktl){
2579 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2580 if(streams[ pktl->pkt.stream_index ] == 0)
2581 stream_count++;
2582 streams[ pktl->pkt.stream_index ]++;
2583 pktl= pktl->next;
2584 }
2585
2586 if(stream_count && (s->nb_streams == stream_count || flush)){
2587 pktl= s->packet_buffer;
2588 *out= pktl->pkt;
2589
2590 s->packet_buffer= pktl->next;
2591 av_freep(&pktl);
2592 return 1;
2593 }else{
2594 av_init_packet(out);
2595 return 0;
2596 }
2597 }
2598
2599 /**
2600 * Interleaves an AVPacket correctly so it can be muxed.
2601 * @param out the interleaved packet will be output here
2602 * @param in the input packet
2603 * @param flush 1 if no further packets are available as input and all
2604 * remaining packets should be output
2605 * @return 1 if a packet was output, 0 if no packet could be output,
2606 * < 0 if an error occurred
2607 */
2608 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2609 if(s->oformat->interleave_packet)
2610 return s->oformat->interleave_packet(s, out, in, flush);
2611 else
2612 return av_interleave_packet_per_dts(s, out, in, flush);
2613 }
2614
2615 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2616 AVStream *st= s->streams[ pkt->stream_index];
2617
2618 //FIXME/XXX/HACK drop zero sized packets
2619 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2620 return 0;
2621
2622 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2623 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2624 return -1;
2625
2626 if(pkt->dts == AV_NOPTS_VALUE)
2627 return -1;
2628
2629 for(;;){
2630 AVPacket opkt;
2631 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2632 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2633 return ret;
2634
2635 truncate_ts(s->streams[opkt.stream_index], &opkt);
2636 ret= s->oformat->write_packet(s, &opkt);
2637
2638 av_free_packet(&opkt);
2639 pkt= NULL;
2640
2641 if(ret<0)
2642 return ret;
2643 if(url_ferror(s->pb))
2644 return url_ferror(s->pb);
2645 }
2646 }
2647
2648 int av_write_trailer(AVFormatContext *s)
2649 {
2650 int ret, i;
2651
2652 for(;;){
2653 AVPacket pkt;
2654 ret= av_interleave_packet(s, &pkt, NULL, 1);
2655 if(ret<0) //FIXME cleanup needed for ret<0 ?
2656 goto fail;
2657 if(!ret)
2658 break;
2659
2660 truncate_ts(s->streams[pkt.stream_index], &pkt);
2661 ret= s->oformat->write_packet(s, &pkt);
2662
2663 av_free_packet(&pkt);
2664
2665 if(ret<0)
2666 goto fail;
2667 if(url_ferror(s->pb))
2668 goto fail;
2669 }
2670
2671 if(s->oformat->write_trailer)
2672 ret = s->oformat->write_trailer(s);
2673 fail:
2674 if(ret == 0)
2675 ret=url_ferror(s->pb);
2676 for(i=0;i<s->nb_streams;i++)
2677 av_freep(&s->streams[i]->priv_data);
2678 av_freep(&s->priv_data);
2679 return ret;
2680 }
2681
2682 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2683 {
2684 int i, j;
2685 AVProgram *program=NULL;
2686 void *tmp;
2687
2688 for(i=0; i<ac->nb_programs; i++){
2689 if(ac->programs[i]->id != progid)
2690 continue;
2691 program = ac->programs[i];
2692 for(j=0; j<program->nb_stream_indexes; j++)
2693 if(program->stream_index[j] == idx)
2694 return;
2695
2696 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2697 if(!tmp)
2698 return;
2699 program->stream_index = tmp;
2700 program->stream_index[program->nb_stream_indexes++] = idx;
2701 return;
2702 }
2703 }
2704
2705 /* "user interface" functions */
2706 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2707 {
2708 char buf[256];
2709 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2710 AVStream *st = ic->streams[i];
2711 int g = ff_gcd(st->time_base.num, st->time_base.den);
2712 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2713 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2714 /* the pid is an important information, so we display it */
2715 /* XXX: add a generic system */
2716 if (flags & AVFMT_SHOW_IDS)
2717 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2718 if (strlen(st->language) > 0)
2719 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2720 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2721 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2722 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2723 if(st->r_frame_rate.den && st->r_frame_rate.num)
2724 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
2725 /* else if(st->time_base.den && st->time_base.num)
2726 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
2727 else
2728 av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
2729 }
2730 av_log(NULL, AV_LOG_INFO, "\n");
2731 }
2732
2733 void dump_format(AVFormatContext *ic,
2734 int index,
2735 const char *url,
2736 int is_output)
2737 {
2738 int i;
2739
2740 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2741 is_output ? "Output" : "Input",
2742 index,
2743 is_output ? ic->oformat->name : ic->iformat->name,
2744 is_output ? "to" : "from", url);
2745 if (!is_output) {
2746 av_log(NULL, AV_LOG_INFO, " Duration: ");
2747 if (ic->duration != AV_NOPTS_VALUE) {
2748 int hours, mins, secs, us;
2749 secs = ic->duration / AV_TIME_BASE;
2750 us = ic->duration % AV_TIME_BASE;
2751 mins = secs / 60;
2752 secs %= 60;
2753 hours = mins / 60;
2754 mins %= 60;
2755 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2756 (100 * us) / AV_TIME_BASE);
2757 } else {
2758 av_log(NULL, AV_LOG_INFO, "N/A");
2759 }
2760 if (ic->start_time != AV_NOPTS_VALUE) {
2761 int secs, us;
2762 av_log(NULL, AV_LOG_INFO, ", start: ");
2763 secs = ic->start_time / AV_TIME_BASE;
2764 us = ic->start_time % AV_TIME_BASE;
2765 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2766 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2767 }
2768 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2769 if (ic->bit_rate) {
2770 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2771 } else {
2772 av_log(NULL, AV_LOG_INFO, "N/A");
2773 }
2774 av_log(NULL, AV_LOG_INFO, "\n");
2775 }
2776 if(ic->nb_programs) {
2777 int j, k;
2778 for(j=0; j<ic->nb_programs; j++) {
2779 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2780 ic->programs[j]->name ? ic->programs[j]->name : "");
2781 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2782 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2783 }
2784 } else
2785 for(i=0;i<ic->nb_streams;i++)
2786 dump_stream_format(ic, i, index, is_output);
2787 }
2788
2789 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2790 {
2791 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2792 }
2793
2794 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2795 {
2796 AVRational frame_rate;
2797 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2798 *frame_rate_num= frame_rate.num;
2799 *frame_rate_den= frame_rate.den;
2800 return ret;
2801 }
2802
2803 /**
2804 * Gets the current time in microseconds.
2805 */
2806 int64_t av_gettime(void)
2807 {
2808 struct timeval tv;
2809 gettimeofday(&tv,NULL);
2810 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2811 }
2812
2813 int64_t parse_date(const char *datestr, int duration)
2814 {
2815 const char *p;
2816 int64_t t;
2817 struct tm dt;
2818 int i;
2819 static const char *date_fmt[] = {
2820 "%Y-%m-%d",
2821 "%Y%m%d",
2822 };
2823 static const char *time_fmt[] = {
2824 "%H:%M:%S",
2825 "%H%M%S",
2826 };
2827 const char *q;
2828 int is_utc, len;
2829 char lastch;
2830 int negative = 0;
2831
2832 #undef time
2833 time_t now = time(0);
2834
2835 len = strlen(datestr);
2836 if (len > 0)
2837 lastch = datestr[len - 1];
2838 else
2839 lastch = '\0';
2840 is_utc = (lastch == 'z' || lastch == 'Z');
2841
2842 memset(&dt, 0, sizeof(dt));
2843
2844 p = datestr;
2845 q = NULL;
2846 if (!duration) {
2847 /* parse the year-month-day part */
2848 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2849 q = small_strptime(p, date_fmt[i], &dt);
2850 if (q) {
2851 break;
2852 }
2853 }
2854
2855 /* if the year-month-day part is missing, then take the
2856 * current year-month-day time */
2857 if (!q) {
2858 if (is_utc) {
2859 dt = *gmtime(&now);
2860 } else {
2861 dt = *localtime(&now);
2862 }
2863 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2864 } else {
2865 p = q;
2866 }
2867
2868 if (*p == 'T' || *p == 't' || *p == ' ')
2869 p++;
2870
2871 /* parse the hour-minute-second part */
2872 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2873 q = small_strptime(p, time_fmt[i], &dt);
2874 if (q) {
2875 break;
2876 }
2877 }
2878 } else {
2879 /* parse datestr as a duration */
2880 if (p[0] == '-') {
2881 negative = 1;
2882 ++p;
2883 }
2884 /* parse datestr as HH:MM:SS */
2885 q = small_strptime(p, time_fmt[0], &dt);
2886 if (!q) {
2887 /* parse datestr as S+ */
2888 dt.tm_sec = strtol(p, (char **)&q, 10);
2889 if (q == p)
2890 /* the parsing didn't succeed */
2891 return INT64_MIN;
2892 dt.tm_min = 0;
2893 dt.tm_hour = 0;
2894 }
2895 }
2896
2897 /* Now we have all the fields that we can get */
2898 if (!q) {
2899 return INT64_MIN;
2900 }
2901
2902 if (duration) {
2903 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2904 } else {
2905 dt.tm_isdst = -1; /* unknown */
2906 if (is_utc) {
2907 t = mktimegm(&dt);
2908 } else {
2909 t = mktime(&dt);
2910 }
2911 }
2912
2913 t *= 1000000;
2914
2915 /* parse the .m... part */
2916 if (*q == '.') {
2917 int val, n;
2918 q++;
2919 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2920 if (!isdigit(*q))
2921 break;
2922 val += n * (*q - '0');
2923 }
2924 t += val;
2925 }
2926 return negative ? -t : t;
2927 }
2928
2929 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2930 {
2931 const char *p;
2932 char tag[128], *q;
2933
2934 p = info;
2935 if (*p == '?')
2936 p++;
2937 for(;;) {
2938 q = tag;
2939 while (*p != '\0' && *p != '=' && *p != '&') {
2940 if ((q - tag) < sizeof(tag) - 1)
2941 *q++ = *p;
2942 p++;
2943 }
2944 *q = '\0';
2945 q = arg;
2946 if (*p == '=') {
2947 p++;
2948 while (*p != '&' && *p != '\0') {
2949 if ((q - arg) < arg_size - 1) {
2950 if (*p == '+')
2951 *q++ = ' ';
2952 else
2953 *q++ = *p;
2954 }
2955 p++;
2956 }
2957 *q = '\0';
2958 }
2959 if (!strcmp(tag, tag1))
2960 return 1;
2961 if (*p != '&')
2962 break;
2963 p++;
2964 }
2965 return 0;
2966 }
2967
2968 int av_get_frame_filename(char *buf, int buf_size,
2969 const char *path, int number)
2970 {
2971 const char *p;
2972 char *q, buf1[20], c;
2973 int nd, len, percentd_found;
2974
2975 q = buf;
2976 p = path;
2977 percentd_found = 0;
2978 for(;;) {
2979 c = *p++;
2980 if (c == '\0')
2981 break;
2982 if (c == '%') {
2983 do {
2984 nd = 0;
2985 while (isdigit(*p)) {
2986 nd = nd * 10 + *p++ - '0';
2987 }
2988 c = *p++;
2989 } while (isdigit(c));
2990
2991 switch(c) {
2992 case '%':
2993 goto addchar;
2994 case 'd':
2995 if (percentd_found)
2996 goto fail;
2997 percentd_found = 1;
2998 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2999 len = strlen(buf1);
3000 if ((q - buf + len) > buf_size - 1)
3001 goto fail;
3002 memcpy(q, buf1, len);
3003 q += len;
3004 break;
3005 default:
3006 goto fail;
3007 }
3008 } else {
3009 addchar:
3010 if ((q - buf) < buf_size - 1)
3011 *q++ = c;
3012 }
3013 }
3014 if (!percentd_found)
3015 goto fail;
3016 *q = '\0';
3017 return 0;
3018 fail:
3019 *q = '\0';
3020 return -1;
3021 }
3022
3023 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3024 {
3025 int len, i, j, c;
3026 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3027
3028 for(i=0;i<size;i+=16) {
3029 len = size - i;
3030 if (len > 16)
3031 len = 16;
3032 PRINT("%08x ", i);
3033 for(j=0;j<16;j++) {
3034 if (j < len)
3035 PRINT(" %02x", buf[i+j]);
3036 else
3037 PRINT(" ");
3038 }
3039 PRINT(" ");
3040 for(j=0;j<len;j++) {
3041 c = buf[i+j];
3042 if (c < ' ' || c > '~')
3043 c = '.';
3044 PRINT("%c", c);
3045 }
3046 PRINT("\n");
3047 }
3048 #undef PRINT
3049 }
3050
3051 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3052 {
3053 hex_dump_internal(NULL, f, 0, buf, size);
3054 }
3055
3056 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3057 {
3058 hex_dump_internal(avcl, NULL, level, buf, size);
3059 }
3060
3061 //FIXME needs to know the time_base
3062 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3063 {
3064 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3065 PRINT("stream #%d:\n", pkt->stream_index);
3066 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3067 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3068 /* DTS is _always_ valid after av_read_frame() */
3069 PRINT(" dts=");
3070 if (pkt->dts == AV_NOPTS_VALUE)
3071 PRINT("N/A");
3072 else
3073 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3074 /* PTS may not be known if B-frames are present. */
3075 PRINT(" pts=");
3076 if (pkt->pts == AV_NOPTS_VALUE)
3077 PRINT("N/A");
3078 else
3079 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3080 PRINT("\n");
3081 PRINT(" size=%d\n", pkt->size);
3082 #undef PRINT
3083 if (dump_payload)
3084 av_hex_dump(f, pkt->data, pkt->size);
3085 }
3086
3087 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3088 {
3089 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3090 }
3091
3092 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3093 {
3094 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3095 }
3096
3097 void url_split(char *proto, int proto_size,
3098 char *authorization, int authorization_size,
3099 char *hostname, int hostname_size,
3100 int *port_ptr,
3101 char *path, int path_size,
3102 const char *url)
3103 {
3104 const char *p, *ls, *at, *col, *brk;
3105
3106 if (port_ptr) *port_ptr = -1;
3107 if (proto_size > 0) proto[0] = 0;
3108 if (authorization_size > 0) authorization[0] = 0;
3109 if (hostname_size > 0) hostname[0] = 0;
3110 if (path_size > 0) path[0] = 0;
3111
3112 /* parse protocol */
3113 if ((p = strchr(url, ':'))) {
3114 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3115 p++; /* skip ':' */
3116 if (*p == '/') p++;
3117 if (*p == '/') p++;
3118 } else {
3119 /* no protocol means plain filename */
3120 av_strlcpy(path, url, path_size);
3121 return;
3122 }
3123
3124 /* separate path from hostname */
3125 ls = strchr(p, '/');
3126 if(!ls)
3127 ls = strchr(p, '?');
3128 if(ls)
3129 av_strlcpy(path, ls, path_size);
3130 else
3131 ls = &p[strlen(p)]; // XXX