ffmpeg: merge input_codecs into input_streams.
[libav.git] / ffmpeg.c
1 /*
2 * ffmpeg main
3 * Copyright (c) 2000-2003 Fabrice Bellard
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include <unistd.h>
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavutil/opt.h"
35 #include "libavcodec/audioconvert.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavformat/os_support.h"
48
49 #if CONFIG_AVFILTER
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/vsrc_buffer.h"
53 #endif
54
55 #if HAVE_SYS_RESOURCE_H
56 #include <sys/types.h>
57 #include <sys/time.h>
58 #include <sys/resource.h>
59 #elif HAVE_GETPROCESSTIMES
60 #include <windows.h>
61 #endif
62 #if HAVE_GETPROCESSMEMORYINFO
63 #include <windows.h>
64 #include <psapi.h>
65 #endif
66
67 #if HAVE_SYS_SELECT_H
68 #include <sys/select.h>
69 #endif
70
71 #include <time.h>
72
73 #include "cmdutils.h"
74
75 #include "libavutil/avassert.h"
76
77 const char program_name[] = "ffmpeg";
78 const int program_birth_year = 2000;
79
80 /* select an input stream for an output stream */
81 typedef struct StreamMap {
82 int file_index;
83 int stream_index;
84 int sync_file_index;
85 int sync_stream_index;
86 } StreamMap;
87
88 /**
89 * select an input file for an output file
90 */
91 typedef struct MetadataMap {
92 int file; //< file index
93 char type; //< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
94 int index; //< stream/chapter/program number
95 } MetadataMap;
96
97 typedef struct ChapterMap {
98 int in_file;
99 int out_file;
100 } ChapterMap;
101
102 static const OptionDef options[];
103
104 #define MAX_FILES 100
105 #define MAX_STREAMS 1024 /* arbitrary sanity check value */
106
107 #define FFM_PACKET_SIZE 4096 //XXX a duplicate of the line in ffm.h
108
109 static const char *last_asked_format = NULL;
110 static int64_t input_files_ts_offset[MAX_FILES];
111 static double *input_files_ts_scale[MAX_FILES] = {NULL};
112 static int nb_input_files_ts_scale[MAX_FILES] = {0};
113
114 static AVFormatContext *output_files[MAX_FILES];
115 static AVDictionary *output_opts[MAX_FILES];
116 static int nb_output_files = 0;
117
118 static StreamMap *stream_maps = NULL;
119 static int nb_stream_maps;
120
121 /* first item specifies output metadata, second is input */
122 static MetadataMap (*meta_data_maps)[2] = NULL;
123 static int nb_meta_data_maps;
124 static int metadata_global_autocopy = 1;
125 static int metadata_streams_autocopy = 1;
126 static int metadata_chapters_autocopy = 1;
127
128 static ChapterMap *chapter_maps = NULL;
129 static int nb_chapter_maps;
130
131 /* indexed by output file stream index */
132 static int *streamid_map = NULL;
133 static int nb_streamid_map = 0;
134
135 static int frame_width = 0;
136 static int frame_height = 0;
137 static float frame_aspect_ratio = 0;
138 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
139 static enum AVSampleFormat audio_sample_fmt = AV_SAMPLE_FMT_NONE;
140 static int max_frames[4] = {INT_MAX, INT_MAX, INT_MAX, INT_MAX};
141 static AVRational frame_rate;
142 static float video_qscale = 0;
143 static uint16_t *intra_matrix = NULL;
144 static uint16_t *inter_matrix = NULL;
145 static const char *video_rc_override_string=NULL;
146 static int video_disable = 0;
147 static int video_discard = 0;
148 static char *video_codec_name = NULL;
149 static unsigned int video_codec_tag = 0;
150 static char *video_language = NULL;
151 static int same_quality = 0;
152 static int do_deinterlace = 0;
153 static int top_field_first = -1;
154 static int me_threshold = 0;
155 static int intra_dc_precision = 8;
156 static int loop_input = 0;
157 static int loop_output = AVFMT_NOOUTPUTLOOP;
158 static int qp_hist = 0;
159 #if CONFIG_AVFILTER
160 static char *vfilters = NULL;
161 #endif
162
163 static int intra_only = 0;
164 static int audio_sample_rate = 0;
165 static int64_t channel_layout = 0;
166 #define QSCALE_NONE -99999
167 static float audio_qscale = QSCALE_NONE;
168 static int audio_disable = 0;
169 static int audio_channels = 0;
170 static char *audio_codec_name = NULL;
171 static unsigned int audio_codec_tag = 0;
172 static char *audio_language = NULL;
173
174 static int subtitle_disable = 0;
175 static char *subtitle_codec_name = NULL;
176 static char *subtitle_language = NULL;
177 static unsigned int subtitle_codec_tag = 0;
178
179 static int data_disable = 0;
180 static char *data_codec_name = NULL;
181 static unsigned int data_codec_tag = 0;
182
183 static float mux_preload= 0.5;
184 static float mux_max_delay= 0.7;
185
186 static int64_t recording_time = INT64_MAX;
187 static int64_t start_time = 0;
188 static int64_t recording_timestamp = 0;
189 static int64_t input_ts_offset = 0;
190 static int file_overwrite = 0;
191 static AVDictionary *metadata;
192 static int do_benchmark = 0;
193 static int do_hex_dump = 0;
194 static int do_pkt_dump = 0;
195 static int do_psnr = 0;
196 static int do_pass = 0;
197 static char *pass_logfilename_prefix = NULL;
198 static int audio_stream_copy = 0;
199 static int video_stream_copy = 0;
200 static int subtitle_stream_copy = 0;
201 static int data_stream_copy = 0;
202 static int video_sync_method= -1;
203 static int audio_sync_method= 0;
204 static float audio_drift_threshold= 0.1;
205 static int copy_ts= 0;
206 static int copy_tb;
207 static int opt_shortest = 0;
208 static char *vstats_filename;
209 static FILE *vstats_file;
210 static int opt_programid = 0;
211 static int copy_initial_nonkeyframes = 0;
212
213 static int rate_emu = 0;
214
215 static int audio_volume = 256;
216
217 static int exit_on_error = 0;
218 static int using_stdin = 0;
219 static int verbose = 1;
220 static int thread_count= 1;
221 static int64_t video_size = 0;
222 static int64_t audio_size = 0;
223 static int64_t extra_size = 0;
224 static int nb_frames_dup = 0;
225 static int nb_frames_drop = 0;
226 static int input_sync;
227 static uint64_t limit_filesize = 0;
228 static int force_fps = 0;
229 static char *forced_key_frames = NULL;
230
231 static float dts_delta_threshold = 10;
232
233 static int64_t timer_start;
234
235 static uint8_t *audio_buf;
236 static uint8_t *audio_out;
237 static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
238
239 static short *samples;
240
241 static AVBitStreamFilterContext *video_bitstream_filters=NULL;
242 static AVBitStreamFilterContext *audio_bitstream_filters=NULL;
243 static AVBitStreamFilterContext *subtitle_bitstream_filters=NULL;
244
245 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
246
247 struct InputStream;
248
249 typedef struct OutputStream {
250 int file_index; /* file index */
251 int index; /* stream index in the output file */
252 int source_index; /* InputStream index */
253 AVStream *st; /* stream in the output file */
254 int encoding_needed; /* true if encoding needed for this stream */
255 int frame_number;
256 /* input pts and corresponding output pts
257 for A/V sync */
258 //double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
259 struct InputStream *sync_ist; /* input stream to sync against */
260 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
261 AVBitStreamFilterContext *bitstream_filters;
262 AVCodec *enc;
263
264 /* video only */
265 int video_resample;
266 AVFrame pict_tmp; /* temporary image for resampling */
267 struct SwsContext *img_resample_ctx; /* for image resampling */
268 int resample_height;
269 int resample_width;
270 int resample_pix_fmt;
271 AVRational frame_rate;
272
273 float frame_aspect_ratio;
274
275 /* forced key frames */
276 int64_t *forced_kf_pts;
277 int forced_kf_count;
278 int forced_kf_index;
279
280 /* audio only */
281 int audio_resample;
282 ReSampleContext *resample; /* for audio resampling */
283 int resample_sample_fmt;
284 int resample_channels;
285 int resample_sample_rate;
286 int reformat_pair;
287 AVAudioConvert *reformat_ctx;
288 AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
289 FILE *logfile;
290
291 #if CONFIG_AVFILTER
292 AVFilterContext *output_video_filter;
293 AVFilterContext *input_video_filter;
294 AVFilterBufferRef *picref;
295 char *avfilter;
296 AVFilterGraph *graph;
297 #endif
298
299 int sws_flags;
300 } OutputStream;
301
302 static OutputStream **output_streams_for_file[MAX_FILES] = { NULL };
303 static int nb_output_streams_for_file[MAX_FILES] = { 0 };
304
305 typedef struct InputStream {
306 int file_index;
307 AVStream *st;
308 int discard; /* true if stream data should be discarded */
309 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
310 int64_t sample_index; /* current sample */
311 AVCodec *dec;
312
313 int64_t start; /* time when read started */
314 int64_t next_pts; /* synthetic pts for cases where pkt.pts
315 is not defined */
316 int64_t pts; /* current pts */
317 PtsCorrectionContext pts_ctx;
318 int is_start; /* is 1 at the start and after a discontinuity */
319 int showed_multi_packet_warning;
320 int is_past_recording_time;
321 #if CONFIG_AVFILTER
322 AVFrame *filter_frame;
323 int has_filter_frame;
324 #endif
325 } InputStream;
326
327 typedef struct InputFile {
328 AVFormatContext *ctx;
329 int eof_reached; /* true if eof reached */
330 int ist_index; /* index of first stream in ist_table */
331 int buffer_size; /* current total buffer size */
332 } InputFile;
333
334 static InputStream *input_streams = NULL;
335 static int nb_input_streams = 0;
336 static InputFile *input_files = NULL;
337 static int nb_input_files = 0;
338
339 #if CONFIG_AVFILTER
340
341 static int configure_video_filters(InputStream *ist, OutputStream *ost)
342 {
343 AVFilterContext *last_filter, *filter;
344 /** filter graph containing all filters including input & output */
345 AVCodecContext *codec = ost->st->codec;
346 AVCodecContext *icodec = ist->st->codec;
347 FFSinkContext ffsink_ctx = { .pix_fmt = codec->pix_fmt };
348 AVRational sample_aspect_ratio;
349 char args[255];
350 int ret;
351
352 ost->graph = avfilter_graph_alloc();
353
354 if (ist->st->sample_aspect_ratio.num){
355 sample_aspect_ratio = ist->st->sample_aspect_ratio;
356 }else
357 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
358
359 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
360 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
361 sample_aspect_ratio.num, sample_aspect_ratio.den);
362
363 ret = avfilter_graph_create_filter(&ost->input_video_filter, avfilter_get_by_name("buffer"),
364 "src", args, NULL, ost->graph);
365 if (ret < 0)
366 return ret;
367 ret = avfilter_graph_create_filter(&ost->output_video_filter, &ffsink,
368 "out", NULL, &ffsink_ctx, ost->graph);
369 if (ret < 0)
370 return ret;
371 last_filter = ost->input_video_filter;
372
373 if (codec->width != icodec->width || codec->height != icodec->height) {
374 snprintf(args, 255, "%d:%d:flags=0x%X",
375 codec->width,
376 codec->height,
377 ost->sws_flags);
378 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
379 NULL, args, NULL, ost->graph)) < 0)
380 return ret;
381 if ((ret = avfilter_link(last_filter, 0, filter, 0)) < 0)
382 return ret;
383 last_filter = filter;
384 }
385
386 snprintf(args, sizeof(args), "flags=0x%X", ost->sws_flags);
387 ost->graph->scale_sws_opts = av_strdup(args);
388
389 if (ost->avfilter) {
390 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
391 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
392
393 outputs->name = av_strdup("in");
394 outputs->filter_ctx = last_filter;
395 outputs->pad_idx = 0;
396 outputs->next = NULL;
397
398 inputs->name = av_strdup("out");
399 inputs->filter_ctx = ost->output_video_filter;
400 inputs->pad_idx = 0;
401 inputs->next = NULL;
402
403 if ((ret = avfilter_graph_parse(ost->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
404 return ret;
405 av_freep(&ost->avfilter);
406 } else {
407 if ((ret = avfilter_link(last_filter, 0, ost->output_video_filter, 0)) < 0)
408 return ret;
409 }
410
411 if ((ret = avfilter_graph_config(ost->graph, NULL)) < 0)
412 return ret;
413
414 codec->width = ost->output_video_filter->inputs[0]->w;
415 codec->height = ost->output_video_filter->inputs[0]->h;
416 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
417 ost->frame_aspect_ratio ? // overriden by the -aspect cli option
418 av_d2q(ost->frame_aspect_ratio*codec->height/codec->width, 255) :
419 ost->output_video_filter->inputs[0]->sample_aspect_ratio;
420
421 return 0;
422 }
423 #endif /* CONFIG_AVFILTER */
424
425 static void term_exit(void)
426 {
427 av_log(NULL, AV_LOG_QUIET, "");
428 }
429
430 static volatile int received_sigterm = 0;
431 static volatile int received_nb_signals = 0;
432
433 static void
434 sigterm_handler(int sig)
435 {
436 received_sigterm = sig;
437 received_nb_signals++;
438 term_exit();
439 }
440
441 static void term_init(void)
442 {
443 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446 signal(SIGXCPU, sigterm_handler);
447 #endif
448 }
449
450 static int decode_interrupt_cb(void)
451 {
452 return received_nb_signals > 1;
453 }
454
455 static int ffmpeg_exit(int ret)
456 {
457 int i;
458
459 /* close files */
460 for(i=0;i<nb_output_files;i++) {
461 AVFormatContext *s = output_files[i];
462 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
463 avio_close(s->pb);
464 avformat_free_context(s);
465 av_free(output_streams_for_file[i]);
466 av_dict_free(&output_opts[i]);
467 }
468 for(i=0;i<nb_input_files;i++) {
469 av_close_input_file(input_files[i].ctx);
470 av_free(input_files_ts_scale[i]);
471 }
472
473 av_free(intra_matrix);
474 av_free(inter_matrix);
475
476 if (vstats_file)
477 fclose(vstats_file);
478 av_free(vstats_filename);
479
480 av_free(streamid_map);
481 av_free(stream_maps);
482 av_free(meta_data_maps);
483
484 av_freep(&input_streams);
485 av_freep(&input_files);
486
487 av_free(video_codec_name);
488 av_free(audio_codec_name);
489 av_free(subtitle_codec_name);
490 av_free(data_codec_name);
491
492 uninit_opts();
493 av_free(audio_buf);
494 av_free(audio_out);
495 allocated_audio_buf_size= allocated_audio_out_size= 0;
496 av_free(samples);
497
498 #if CONFIG_AVFILTER
499 avfilter_uninit();
500 #endif
501
502 if (received_sigterm) {
503 fprintf(stderr,
504 "Received signal %d: terminating.\n",
505 (int) received_sigterm);
506 exit (255);
507 }
508
509 exit(ret); /* not all OS-es handle main() return value */
510 return ret;
511 }
512
513 static void assert_avoptions(AVDictionary *m)
514 {
515 AVDictionaryEntry *t;
516 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
517 av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
518 ffmpeg_exit(1);
519 }
520 }
521
522 /* similar to ff_dynarray_add() and av_fast_realloc() */
523 static void *grow_array(void *array, int elem_size, int *size, int new_size)
524 {
525 if (new_size >= INT_MAX / elem_size) {
526 fprintf(stderr, "Array too big.\n");
527 ffmpeg_exit(1);
528 }
529 if (*size < new_size) {
530 uint8_t *tmp = av_realloc(array, new_size*elem_size);
531 if (!tmp) {
532 fprintf(stderr, "Could not alloc buffer.\n");
533 ffmpeg_exit(1);
534 }
535 memset(tmp + *size*elem_size, 0, (new_size-*size) * elem_size);
536 *size = new_size;
537 return tmp;
538 }
539 return array;
540 }
541
542 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
543 {
544 if(codec && codec->sample_fmts){
545 const enum AVSampleFormat *p= codec->sample_fmts;
546 for(; *p!=-1; p++){
547 if(*p == st->codec->sample_fmt)
548 break;
549 }
550 if (*p == -1) {
551 av_log(NULL, AV_LOG_WARNING,
552 "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
553 av_get_sample_fmt_name(st->codec->sample_fmt),
554 codec->name,
555 av_get_sample_fmt_name(codec->sample_fmts[0]));
556 st->codec->sample_fmt = codec->sample_fmts[0];
557 }
558 }
559 }
560
561 /**
562 * Update the requested input sample format based on the output sample format.
563 * This is currently only used to request float output from decoders which
564 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
565 * Ideally this will be removed in the future when decoders do not do format
566 * conversion and only output in their native format.
567 */
568 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
569 AVCodecContext *enc)
570 {
571 /* if sample formats match or a decoder sample format has already been
572 requested, just return */
573 if (enc->sample_fmt == dec->sample_fmt ||
574 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
575 return;
576
577 /* if decoder supports more than one output format */
578 if (dec_codec && dec_codec->sample_fmts &&
579 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
580 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
581 const enum AVSampleFormat *p;
582 int min_dec = -1, min_inc = -1;
583
584 /* find a matching sample format in the encoder */
585 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
586 if (*p == enc->sample_fmt) {
587 dec->request_sample_fmt = *p;
588 return;
589 } else if (*p > enc->sample_fmt) {
590 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
591 } else
592 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
593 }
594
595 /* if none match, provide the one that matches quality closest */
596 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
597 enc->sample_fmt - min_dec;
598 }
599 }
600
601 static void choose_sample_rate(AVStream *st, AVCodec *codec)
602 {
603 if(codec && codec->supported_samplerates){
604 const int *p= codec->supported_samplerates;
605 int best=0;
606 int best_dist=INT_MAX;
607 for(; *p; p++){
608 int dist= abs(st->codec->sample_rate - *p);
609 if(dist < best_dist){
610 best_dist= dist;
611 best= *p;
612 }
613 }
614 if(best_dist){
615 av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
616 }
617 st->codec->sample_rate= best;
618 }
619 }
620
621 static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
622 {
623 if(codec && codec->pix_fmts){
624 const enum PixelFormat *p= codec->pix_fmts;
625 if(st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL){
626 if(st->codec->codec_id==CODEC_ID_MJPEG){
627 p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE};
628 }else if(st->codec->codec_id==CODEC_ID_LJPEG){
629 p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE};
630 }
631 }
632 for(; *p!=-1; p++){
633 if(*p == st->codec->pix_fmt)
634 break;
635 }
636 if (*p == -1) {
637 if(st->codec->pix_fmt != PIX_FMT_NONE)
638 av_log(NULL, AV_LOG_WARNING,
639 "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
640 av_pix_fmt_descriptors[st->codec->pix_fmt].name,
641 codec->name,
642 av_pix_fmt_descriptors[codec->pix_fmts[0]].name);
643 st->codec->pix_fmt = codec->pix_fmts[0];
644 }
645 }
646 }
647
648 static OutputStream *new_output_stream(AVFormatContext *oc, int file_idx)
649 {
650 int idx = oc->nb_streams - 1;
651 OutputStream *ost;
652
653 output_streams_for_file[file_idx] =
654 grow_array(output_streams_for_file[file_idx],
655 sizeof(*output_streams_for_file[file_idx]),
656 &nb_output_streams_for_file[file_idx],
657 oc->nb_streams);
658 ost = output_streams_for_file[file_idx][idx] =
659 av_mallocz(sizeof(OutputStream));
660 if (!ost) {
661 fprintf(stderr, "Could not alloc output stream\n");
662 ffmpeg_exit(1);
663 }
664 ost->file_index = file_idx;
665 ost->index = idx;
666
667 ost->sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
668 return ost;
669 }
670
671 static int read_ffserver_streams(AVFormatContext *s, const char *filename)
672 {
673 int i, err;
674 AVFormatContext *ic = NULL;
675 int nopts = 0;
676
677 err = avformat_open_input(&ic, filename, NULL, NULL);
678 if (err < 0)
679 return err;
680 /* copy stream format */
681 s->nb_streams = 0;
682 s->streams = av_mallocz(sizeof(AVStream *) * ic->nb_streams);
683 for(i=0;i<ic->nb_streams;i++) {
684 AVStream *st;
685 AVCodec *codec;
686
687 s->nb_streams++;
688
689 // FIXME: a more elegant solution is needed
690 st = av_mallocz(sizeof(AVStream));
691 memcpy(st, ic->streams[i], sizeof(AVStream));
692 st->info = NULL;
693 st->codec = avcodec_alloc_context();
694 if (!st->codec) {
695 print_error(filename, AVERROR(ENOMEM));
696 ffmpeg_exit(1);
697 }
698 avcodec_copy_context(st->codec, ic->streams[i]->codec);
699 s->streams[i] = st;
700
701 codec = avcodec_find_encoder(st->codec->codec_id);
702 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
703 if (audio_stream_copy) {
704 st->stream_copy = 1;
705 } else
706 choose_sample_fmt(st, codec);
707 } else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
708 if (video_stream_copy) {
709 st->stream_copy = 1;
710 } else
711 choose_pixel_fmt(st, codec);
712 }
713
714 if(st->codec->flags & CODEC_FLAG_BITEXACT)
715 nopts = 1;
716
717 new_output_stream(s, nb_output_files);
718 }
719
720 if (!nopts)
721 s->timestamp = av_gettime();
722
723 av_close_input_file(ic);
724 return 0;
725 }
726
727 static double
728 get_sync_ipts(const OutputStream *ost)
729 {
730 const InputStream *ist = ost->sync_ist;
731 return (double)(ist->pts - start_time)/AV_TIME_BASE;
732 }
733
734 static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
735 int ret;
736
737 while(bsfc){
738 AVPacket new_pkt= *pkt;
739 int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
740 &new_pkt.data, &new_pkt.size,
741 pkt->data, pkt->size,
742 pkt->flags & AV_PKT_FLAG_KEY);
743 if(a>0){
744 av_free_packet(pkt);
745 new_pkt.destruct= av_destruct_packet;
746 } else if(a<0){
747 fprintf(stderr, "%s failed for stream %d, codec %s",
748 bsfc->filter->name, pkt->stream_index,
749 avctx->codec ? avctx->codec->name : "copy");
750 print_error("", a);
751 if (exit_on_error)
752 ffmpeg_exit(1);
753 }
754 *pkt= new_pkt;
755
756 bsfc= bsfc->next;
757 }
758
759 ret= av_interleaved_write_frame(s, pkt);
760 if(ret < 0){
761 print_error("av_interleaved_write_frame()", ret);
762 ffmpeg_exit(1);
763 }
764 }
765
766 #define MAX_AUDIO_PACKET_SIZE (128 * 1024)
767
768 static void do_audio_out(AVFormatContext *s,
769 OutputStream *ost,
770 InputStream *ist,
771 unsigned char *buf, int size)
772 {
773 uint8_t *buftmp;
774 int64_t audio_out_size, audio_buf_size;
775 int64_t allocated_for_size= size;
776
777 int size_out, frame_bytes, ret, resample_changed;
778 AVCodecContext *enc= ost->st->codec;
779 AVCodecContext *dec= ist->st->codec;
780 int osize = av_get_bytes_per_sample(enc->sample_fmt);
781 int isize = av_get_bytes_per_sample(dec->sample_fmt);
782 const int coded_bps = av_get_bits_per_sample(enc->codec->id);
783
784 need_realloc:
785 audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
786 audio_buf_size= (audio_buf_size*enc->sample_rate + dec->sample_rate) / dec->sample_rate;
787 audio_buf_size= audio_buf_size*2 + 10000; //safety factors for the deprecated resampling API
788 audio_buf_size= FFMAX(audio_buf_size, enc->frame_size);
789 audio_buf_size*= osize*enc->channels;
790
791 audio_out_size= FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
792 if(coded_bps > 8*osize)
793 audio_out_size= audio_out_size * coded_bps / (8*osize);
794 audio_out_size += FF_MIN_BUFFER_SIZE;
795
796 if(audio_out_size > INT_MAX || audio_buf_size > INT_MAX){
797 fprintf(stderr, "Buffer sizes too large\n");
798 ffmpeg_exit(1);
799 }
800
801 av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
802 av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size);
803 if (!audio_buf || !audio_out){
804 fprintf(stderr, "Out of memory in do_audio_out\n");
805 ffmpeg_exit(1);
806 }
807
808 if (enc->channels != dec->channels || enc->sample_rate != dec->sample_rate)
809 ost->audio_resample = 1;
810
811 resample_changed = ost->resample_sample_fmt != dec->sample_fmt ||
812 ost->resample_channels != dec->channels ||
813 ost->resample_sample_rate != dec->sample_rate;
814
815 if ((ost->audio_resample && !ost->resample) || resample_changed) {
816 if (resample_changed) {
817 av_log(NULL, AV_LOG_INFO, "Input stream #%d.%d frame changed from rate:%d fmt:%s ch:%d to rate:%d fmt:%s ch:%d\n",
818 ist->file_index, ist->st->index,
819 ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt), ost->resample_channels,
820 dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels);
821 ost->resample_sample_fmt = dec->sample_fmt;
822 ost->resample_channels = dec->channels;
823 ost->resample_sample_rate = dec->sample_rate;
824 if (ost->resample)
825 audio_resample_close(ost->resample);
826 }
827 /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */
828 if (audio_sync_method <= 1 &&
829 ost->resample_sample_fmt == enc->sample_fmt &&
830 ost->resample_channels == enc->channels &&
831 ost->resample_sample_rate == enc->sample_rate) {
832 ost->resample = NULL;
833 ost->audio_resample = 0;
834 } else if (ost->audio_resample) {
835 if (dec->sample_fmt != AV_SAMPLE_FMT_S16)
836 fprintf(stderr, "Warning, using s16 intermediate sample format for resampling\n");
837 ost->resample = av_audio_resample_init(enc->channels, dec->channels,
838 enc->sample_rate, dec->sample_rate,
839 enc->sample_fmt, dec->sample_fmt,
840 16, 10, 0, 0.8);
841 if (!ost->resample) {
842 fprintf(stderr, "Can not resample %d channels @ %d Hz to %d channels @ %d Hz\n",
843 dec->channels, dec->sample_rate,
844 enc->channels, enc->sample_rate);
845 ffmpeg_exit(1);
846 }
847 }
848 }
849
850 #define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b))
851 if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt &&
852 MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt)!=ost->reformat_pair) {
853 if (ost->reformat_ctx)
854 av_audio_convert_free(ost->reformat_ctx);
855 ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1,
856 dec->sample_fmt, 1, NULL, 0);
857 if (!ost->reformat_ctx) {
858 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
859 av_get_sample_fmt_name(dec->sample_fmt),
860 av_get_sample_fmt_name(enc->sample_fmt));
861 ffmpeg_exit(1);
862 }
863 ost->reformat_pair=MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
864 }
865
866 if(audio_sync_method){
867 double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
868 - av_fifo_size(ost->fifo)/(enc->channels * 2);
869 double idelta= delta*dec->sample_rate / enc->sample_rate;
870 int byte_delta= ((int)idelta)*2*dec->channels;
871
872 //FIXME resample delay
873 if(fabs(delta) > 50){
874 if(ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate){
875 if(byte_delta < 0){
876 byte_delta= FFMAX(byte_delta, -size);
877 size += byte_delta;
878 buf -= byte_delta;
879 if(verbose > 2)
880 fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
881 if(!size)
882 return;
883 ist->is_start=0;
884 }else{
885 static uint8_t *input_tmp= NULL;
886 input_tmp= av_realloc(input_tmp, byte_delta + size);
887
888 if(byte_delta > allocated_for_size - size){
889 allocated_for_size= byte_delta + (int64_t)size;
890 goto need_realloc;
891 }
892 ist->is_start=0;
893
894 memset(input_tmp, 0, byte_delta);
895 memcpy(input_tmp + byte_delta, buf, size);
896 buf= input_tmp;
897 size += byte_delta;
898 if(verbose > 2)
899 fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
900 }
901 }else if(audio_sync_method>1){
902 int comp= av_clip(delta, -audio_sync_method, audio_sync_method);
903 av_assert0(ost->audio_resample);
904 if(verbose > 2)
905 fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
906 // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
907 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
908 }
909 }
910 }else
911 ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
912 - av_fifo_size(ost->fifo)/(enc->channels * 2); //FIXME wrong
913
914 if (ost->audio_resample) {
915 buftmp = audio_buf;
916 size_out = audio_resample(ost->resample,
917 (short *)buftmp, (short *)buf,
918 size / (dec->channels * isize));
919 size_out = size_out * enc->channels * osize;
920 } else {
921 buftmp = buf;
922 size_out = size;
923 }
924
925 if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt) {
926 const void *ibuf[6]= {buftmp};
927 void *obuf[6]= {audio_buf};
928 int istride[6]= {isize};
929 int ostride[6]= {osize};
930 int len= size_out/istride[0];
931 if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
932 printf("av_audio_convert() failed\n");
933 if (exit_on_error)
934 ffmpeg_exit(1);
935 return;
936 }
937 buftmp = audio_buf;
938 size_out = len*osize;
939 }
940
941 /* now encode as many frames as possible */
942 if (enc->frame_size > 1) {
943 /* output resampled raw samples */
944 if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
945 fprintf(stderr, "av_fifo_realloc2() failed\n");
946 ffmpeg_exit(1);
947 }
948 av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
949
950 frame_bytes = enc->frame_size * osize * enc->channels;
951
952 while (av_fifo_size(ost->fifo) >= frame_bytes) {
953 AVPacket pkt;
954 av_init_packet(&pkt);
955
956 av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
957
958 //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
959
960 ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
961 (short *)audio_buf);
962 if (ret < 0) {
963 fprintf(stderr, "Audio encoding failed\n");
964 ffmpeg_exit(1);
965 }
966 audio_size += ret;
967 pkt.stream_index= ost->index;
968 pkt.data= audio_out;
969 pkt.size= ret;
970 if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
971 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
972 pkt.flags |= AV_PKT_FLAG_KEY;
973 write_frame(s, &pkt, enc, ost->bitstream_filters);
974
975 ost->sync_opts += enc->frame_size;
976 }
977 } else {
978 AVPacket pkt;
979 av_init_packet(&pkt);
980
981 ost->sync_opts += size_out / (osize * enc->channels);
982
983 /* output a pcm frame */
984 /* determine the size of the coded buffer */
985 size_out /= osize;
986 if (coded_bps)
987 size_out = size_out*coded_bps/8;
988
989 if(size_out > audio_out_size){
990 fprintf(stderr, "Internal error, buffer size too small\n");
991 ffmpeg_exit(1);
992 }
993
994 //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
995 ret = avcodec_encode_audio(enc, audio_out, size_out,
996 (short *)buftmp);
997 if (ret < 0) {
998 fprintf(stderr, "Audio encoding failed\n");
999 ffmpeg_exit(1);
1000 }
1001 audio_size += ret;
1002 pkt.stream_index= ost->index;
1003 pkt.data= audio_out;
1004 pkt.size= ret;
1005 if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1006 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1007 pkt.flags |= AV_PKT_FLAG_KEY;
1008 write_frame(s, &pkt, enc, ost->bitstream_filters);
1009 }
1010 }
1011
1012 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1013 {
1014 AVCodecContext *dec;
1015 AVPicture *picture2;
1016 AVPicture picture_tmp;
1017 uint8_t *buf = 0;
1018
1019 dec = ist->st->codec;
1020
1021 /* deinterlace : must be done before any resize */
1022 if (do_deinterlace) {
1023 int size;
1024
1025 /* create temporary picture */
1026 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1027 buf = av_malloc(size);
1028 if (!buf)
1029 return;
1030
1031 picture2 = &picture_tmp;
1032 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1033
1034 if(avpicture_deinterlace(picture2, picture,
1035 dec->pix_fmt, dec->width, dec->height) < 0) {
1036 /* if error, do not deinterlace */
1037 fprintf(stderr, "Deinterlacing failed\n");
1038 av_free(buf);
1039 buf = NULL;
1040 picture2 = picture;
1041 }
1042 } else {
1043 picture2 = picture;
1044 }
1045
1046 if (picture != picture2)
1047 *picture = *picture2;
1048 *bufp = buf;
1049 }
1050
1051 /* we begin to correct av delay at this threshold */
1052 #define AV_DELAY_MAX 0.100
1053
1054 static void do_subtitle_out(AVFormatContext *s,
1055 OutputStream *ost,
1056 InputStream *ist,
1057 AVSubtitle *sub,
1058 int64_t pts)
1059 {
1060 static uint8_t *subtitle_out = NULL;
1061 int subtitle_out_max_size = 1024 * 1024;
1062 int subtitle_out_size, nb, i;
1063 AVCodecContext *enc;
1064 AVPacket pkt;
1065
1066 if (pts == AV_NOPTS_VALUE) {
1067 fprintf(stderr, "Subtitle packets must have a pts\n");
1068 if (exit_on_error)
1069 ffmpeg_exit(1);
1070 return;
1071 }
1072
1073 enc = ost->st->codec;
1074
1075 if (!subtitle_out) {
1076 subtitle_out = av_malloc(subtitle_out_max_size);
1077 }
1078
1079 /* Note: DVB subtitle need one packet to draw them and one other
1080 packet to clear them */
1081 /* XXX: signal it in the codec context ? */
1082 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1083 nb = 2;
1084 else
1085 nb = 1;
1086
1087 for(i = 0; i < nb; i++) {
1088 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1089 // start_display_time is required to be 0
1090 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){1, 1000}, AV_TIME_BASE_Q);
1091 sub->end_display_time -= sub->start_display_time;
1092 sub->start_display_time = 0;
1093 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1094 subtitle_out_max_size, sub);
1095 if (subtitle_out_size < 0) {
1096 fprintf(stderr, "Subtitle encoding failed\n");
1097 ffmpeg_exit(1);
1098 }
1099
1100 av_init_packet(&pkt);
1101 pkt.stream_index = ost->index;
1102 pkt.data = subtitle_out;
1103 pkt.size = subtitle_out_size;
1104 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1105 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1106 /* XXX: the pts correction is handled here. Maybe handling
1107 it in the codec would be better */
1108 if (i == 0)
1109 pkt.pts += 90 * sub->start_display_time;
1110 else
1111 pkt.pts += 90 * sub->end_display_time;
1112 }
1113 write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
1114 }
1115 }
1116
1117 static int bit_buffer_size= 1024*256;
1118 static uint8_t *bit_buffer= NULL;
1119
1120 static void do_video_out(AVFormatContext *s,
1121 OutputStream *ost,
1122 InputStream *ist,
1123 AVFrame *in_picture,
1124 int *frame_size, float quality)
1125 {
1126 int nb_frames, i, ret, resample_changed;
1127 AVFrame *final_picture, *formatted_picture;
1128 AVCodecContext *enc, *dec;
1129 double sync_ipts;
1130
1131 enc = ost->st->codec;
1132 dec = ist->st->codec;
1133
1134 sync_ipts = get_sync_ipts(ost) / av_q2d(enc->time_base);
1135
1136 /* by default, we output a single frame */
1137 nb_frames = 1;
1138
1139 *frame_size = 0;
1140
1141 if(video_sync_method){
1142 double vdelta = sync_ipts - ost->sync_opts;
1143 //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1144 if (vdelta < -1.1)
1145 nb_frames = 0;
1146 else if (video_sync_method == 2 || (video_sync_method<0 && (s->oformat->flags & AVFMT_VARIABLE_FPS))){
1147 if(vdelta<=-0.6){
1148 nb_frames=0;
1149 }else if(vdelta>0.6)
1150 ost->sync_opts= lrintf(sync_ipts);
1151 }else if (vdelta > 1.1)
1152 nb_frames = lrintf(vdelta);
1153 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames);
1154 if (nb_frames == 0){
1155 ++nb_frames_drop;
1156 if (verbose>2)
1157 fprintf(stderr, "*** drop!\n");
1158 }else if (nb_frames > 1) {
1159 nb_frames_dup += nb_frames - 1;
1160 if (verbose>2)
1161 fprintf(stderr, "*** %d dup!\n", nb_frames-1);
1162 }
1163 }else
1164 ost->sync_opts= lrintf(sync_ipts);
1165
1166 nb_frames= FFMIN(nb_frames, max_frames[AVMEDIA_TYPE_VIDEO] - ost->frame_number);
1167 if (nb_frames <= 0)
1168 return;
1169
1170 formatted_picture = in_picture;
1171 final_picture = formatted_picture;
1172
1173 resample_changed = ost->resample_width != dec->width ||
1174 ost->resample_height != dec->height ||
1175 ost->resample_pix_fmt != dec->pix_fmt;
1176
1177 if (resample_changed) {
1178 av_log(NULL, AV_LOG_INFO,
1179 "Input stream #%d.%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1180 ist->file_index, ist->st->index,
1181 ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
1182 dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt));
1183 if(!ost->video_resample)
1184 ffmpeg_exit(1);
1185 }
1186
1187 #if !CONFIG_AVFILTER
1188 if (ost->video_resample) {
1189 final_picture = &ost->pict_tmp;
1190 if (resample_changed) {
1191 /* initialize a new scaler context */
1192 sws_freeContext(ost->img_resample_ctx);
1193 ost->img_resample_ctx = sws_getContext(
1194 ist->st->codec->width,
1195 ist->st->codec->height,
1196 ist->st->codec->pix_fmt,
1197 ost->st->codec->width,
1198 ost->st->codec->height,
1199 ost->st->codec->pix_fmt,
1200 ost->sws_flags, NULL, NULL, NULL);
1201 if (ost->img_resample_ctx == NULL) {
1202 fprintf(stderr, "Cannot get resampling context\n");
1203 ffmpeg_exit(1);
1204 }
1205 }
1206 sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize,
1207 0, ost->resample_height, final_picture->data, final_picture->linesize);
1208 }
1209 #endif
1210
1211 /* duplicates frame if needed */
1212 for(i=0;i<nb_frames;i++) {
1213 AVPacket pkt;
1214 av_init_packet(&pkt);
1215 pkt.stream_index= ost->index;
1216
1217 if (s->oformat->flags & AVFMT_RAWPICTURE) {
1218 /* raw pictures are written as AVPicture structure to
1219 avoid any copies. We support temorarily the older
1220 method. */
1221 AVFrame* old_frame = enc->coded_frame;
1222 enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
1223 pkt.data= (uint8_t *)final_picture;
1224 pkt.size= sizeof(AVPicture);
1225 pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1226 pkt.flags |= AV_PKT_FLAG_KEY;
1227
1228 write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
1229 enc->coded_frame = old_frame;
1230 } else {
1231 AVFrame big_picture;
1232
1233 big_picture= *final_picture;
1234 /* better than nothing: use input picture interlaced
1235 settings */
1236 big_picture.interlaced_frame = in_picture->interlaced_frame;
1237 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1238 if(top_field_first == -1)
1239 big_picture.top_field_first = in_picture->top_field_first;
1240 else
1241 big_picture.top_field_first = top_field_first;
1242 }
1243
1244 /* handles sameq here. This is not correct because it may
1245 not be a global option */
1246 big_picture.quality = quality;
1247 if(!me_threshold)
1248 big_picture.pict_type = 0;
1249 // big_picture.pts = AV_NOPTS_VALUE;
1250 big_picture.pts= ost->sync_opts;
1251 // big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
1252 //av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
1253 if (ost->forced_kf_index < ost->forced_kf_count &&
1254 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1255 big_picture.pict_type = AV_PICTURE_TYPE_I;
1256 ost->forced_kf_index++;
1257 }
1258 ret = avcodec_encode_video(enc,
1259 bit_buffer, bit_buffer_size,
1260 &big_picture);
1261 if (ret < 0) {
1262 fprintf(stderr, "Video encoding failed\n");
1263 ffmpeg_exit(1);
1264 }
1265
1266 if(ret>0){
1267 pkt.data= bit_buffer;
1268 pkt.size= ret;
1269 if(enc->coded_frame->pts != AV_NOPTS_VALUE)
1270 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1271 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
1272 pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
1273 pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
1274
1275 if(enc->coded_frame->key_frame)
1276 pkt.flags |= AV_PKT_FLAG_KEY;
1277 write_frame(s, &pkt, ost->st->codec, ost->bitstream_filters);
1278 *frame_size = ret;
1279 video_size += ret;
1280 //fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
1281 // enc->frame_number-1, ret, enc->pict_type);
1282 /* if two pass, output log */
1283 if (ost->logfile && enc->stats_out) {
1284 fprintf(ost->logfile, "%s", enc->stats_out);
1285 }
1286 }
1287 }
1288 ost->sync_opts++;
1289 ost->frame_number++;
1290 }
1291 }
1292
1293 static double psnr(double d){
1294 return -10.0*log(d)/log(10.0);
1295 }
1296
1297 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1298 int frame_size)
1299 {
1300 AVCodecContext *enc;
1301 int frame_number;
1302 double ti1, bitrate, avg_bitrate;
1303
1304 /* this is executed just the first time do_video_stats is called */
1305 if (!vstats_file) {
1306 vstats_file = fopen(vstats_filename, "w");
1307 if (!vstats_file) {
1308 perror("fopen");
1309 ffmpeg_exit(1);
1310 }
1311 }
1312
1313 enc = ost->st->codec;
1314 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1315 frame_number = ost->frame_number;
1316 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1317 if (enc->flags&CODEC_FLAG_PSNR)
1318 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
1319
1320 fprintf(vstats_file,"f_size= %6d ", frame_size);
1321 /* compute pts value */
1322 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1323 if (ti1 < 0.01)
1324 ti1 = 0.01;
1325
1326 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1327 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1328 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1329 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1330 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1331 }
1332 }
1333
1334 static void print_report(AVFormatContext **output_files,
1335 OutputStream **ost_table, int nb_ostreams,
1336 int is_last_report)
1337 {
1338 char buf[1024];
1339 OutputStream *ost;
1340 AVFormatContext *oc;
1341 int64_t total_size;
1342 AVCodecContext *enc;
1343 int frame_number, vid, i;
1344 double bitrate, ti1, pts;
1345 static int64_t last_time = -1;
1346 static int qp_histogram[52];
1347
1348 if (!is_last_report) {
1349 int64_t cur_time;
1350 /* display the report every 0.5 seconds */
1351 cur_time = av_gettime();
1352 if (last_time == -1) {
1353 last_time = cur_time;
1354 return;
1355 }
1356 if ((cur_time - last_time) < 500000)
1357 return;
1358 last_time = cur_time;
1359 }
1360
1361
1362 oc = output_files[0];
1363
1364 total_size = avio_size(oc->pb);
1365 if(total_size<0) // FIXME improve avio_size() so it works with non seekable output too
1366 total_size= avio_tell(oc->pb);
1367
1368 buf[0] = '\0';
1369 ti1 = 1e10;
1370 vid = 0;
1371 for(i=0;i<nb_ostreams;i++) {
1372 float q = -1;
1373 ost = ost_table[i];
1374 enc = ost->st->codec;
1375 if (!ost->st->stream_copy && enc->coded_frame)
1376 q = enc->coded_frame->quality/(float)FF_QP2LAMBDA;
1377 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1378 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1379 }
1380 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1381 float t = (av_gettime()-timer_start) / 1000000.0;
1382
1383 frame_number = ost->frame_number;
1384 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1385 frame_number, (t>1)?(int)(frame_number/t+0.5) : 0, q);
1386 if(is_last_report)
1387 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1388 if(qp_hist){
1389 int j;
1390 int qp = lrintf(q);
1391 if(qp>=0 && qp<FF_ARRAY_ELEMS(qp_histogram))
1392 qp_histogram[qp]++;
1393 for(j=0; j<32; j++)
1394 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j]+1)/log(2)));
1395 }
1396 if (enc->flags&CODEC_FLAG_PSNR){
1397 int j;
1398 double error, error_sum=0;
1399 double scale, scale_sum=0;
1400 char type[3]= {'Y','U','V'};
1401 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1402 for(j=0; j<3; j++){
1403 if(is_last_report){
1404 error= enc->error[j];
1405 scale= enc->width*enc->height*255.0*255.0*frame_number;
1406 }else{
1407 error= enc->coded_frame->error[j];
1408 scale= enc->width*enc->height*255.0*255.0;
1409 }
1410 if(j) scale/=4;
1411 error_sum += error;
1412 scale_sum += scale;
1413 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
1414 }
1415 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
1416 }
1417 vid = 1;
1418 }
1419 /* compute min output value */
1420 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1421 if ((pts < ti1) && (pts > 0))
1422 ti1 = pts;
1423 }
1424 if (ti1 < 0.01)
1425 ti1 = 0.01;
1426
1427 if (verbose > 0 || is_last_report) {
1428 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1429
1430 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1431 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1432 (double)total_size / 1024, ti1, bitrate);
1433
1434 if (nb_frames_dup || nb_frames_drop)
1435 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1436 nb_frames_dup, nb_frames_drop);
1437
1438 if (verbose >= 0)
1439 fprintf(stderr, "%s \r", buf);
1440
1441 fflush(stderr);
1442 }
1443
1444 if (is_last_report && verbose >= 0){
1445 int64_t raw= audio_size + video_size + extra_size;
1446 fprintf(stderr, "\n");
1447 fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1448 video_size/1024.0,
1449 audio_size/1024.0,
1450 extra_size/1024.0,
1451 100.0*(total_size - raw)/raw
1452 );
1453 }
1454 }
1455
1456 static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
1457 {
1458 int fill_char = 0x00;
1459 if (sample_fmt == AV_SAMPLE_FMT_U8)
1460 fill_char = 0x80;
1461 memset(buf, fill_char, size);
1462 }
1463
1464 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1465 static int output_packet(InputStream *ist, int ist_index,
1466 OutputStream **ost_table, int nb_ostreams,
1467 const AVPacket *pkt)
1468 {
1469 AVFormatContext *os;
1470 OutputStream *ost;
1471 int ret, i;
1472 int got_output;
1473 AVFrame picture;
1474 void *buffer_to_free = NULL;
1475 static unsigned int samples_size= 0;
1476 AVSubtitle subtitle, *subtitle_to_free;
1477 int64_t pkt_pts = AV_NOPTS_VALUE;
1478 #if CONFIG_AVFILTER
1479 int frame_available;
1480 #endif
1481 float quality;
1482
1483 AVPacket avpkt;
1484 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
1485
1486 if(ist->next_pts == AV_NOPTS_VALUE)
1487 ist->next_pts= ist->pts;
1488
1489 if (pkt == NULL) {
1490 /* EOF handling */
1491 av_init_packet(&avpkt);
1492 avpkt.data = NULL;
1493 avpkt.size = 0;
1494 goto handle_eof;
1495 } else {
1496 avpkt = *pkt;
1497 }
1498
1499 if(pkt->dts != AV_NOPTS_VALUE)
1500 ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1501 if(pkt->pts != AV_NOPTS_VALUE)
1502 pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
1503
1504 //while we have more to decode or while the decoder did output something on EOF
1505 while (avpkt.size > 0 || (!pkt && got_output)) {
1506 uint8_t *data_buf, *decoded_data_buf;
1507 int data_size, decoded_data_size;
1508 handle_eof:
1509 ist->pts= ist->next_pts;
1510
1511 if(avpkt.size && avpkt.size != pkt->size &&
1512 ((!ist->showed_multi_packet_warning && verbose>0) || verbose>1)){
1513 fprintf(stderr, "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1514 ist->showed_multi_packet_warning=1;
1515 }
1516
1517 /* decode the packet if needed */
1518 decoded_data_buf = NULL; /* fail safe */
1519 decoded_data_size= 0;
1520 data_buf = avpkt.data;
1521 data_size = avpkt.size;
1522 subtitle_to_free = NULL;
1523 if (ist->decoding_needed) {
1524 switch(ist->st->codec->codec_type) {
1525 case AVMEDIA_TYPE_AUDIO:{
1526 if(pkt && samples_size < FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
1527 samples_size = FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE);
1528 av_free(samples);
1529 samples= av_malloc(samples_size);
1530 }
1531 decoded_data_size= samples_size;
1532 /* XXX: could avoid copy if PCM 16 bits with same
1533 endianness as CPU */
1534 ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
1535 &avpkt);
1536 if (ret < 0)
1537 goto fail_decode;
1538 avpkt.data += ret;
1539 avpkt.size -= ret;
1540 data_size = ret;
1541 got_output = decoded_data_size > 0;
1542 /* Some bug in mpeg audio decoder gives */
1543 /* decoded_data_size < 0, it seems they are overflows */
1544 if (!got_output) {
1545 /* no audio frame */
1546 continue;
1547 }
1548 decoded_data_buf = (uint8_t *)samples;
1549 ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
1550 (ist->st->codec->sample_rate * ist->st->codec->channels);
1551 break;}
1552 case AVMEDIA_TYPE_VIDEO:
1553 decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
1554 /* XXX: allocate picture correctly */
1555 avcodec_get_frame_defaults(&picture);
1556 avpkt.pts = pkt_pts;
1557 avpkt.dts = ist->pts;
1558 pkt_pts = AV_NOPTS_VALUE;
1559
1560 ret = avcodec_decode_video2(ist->st->codec,
1561 &picture, &got_output, &avpkt);
1562 quality = same_quality ? picture.quality : 0;
1563 if (ret < 0)
1564 goto fail_decode;
1565 if (!got_output) {
1566 /* no picture yet */
1567 goto discard_packet;
1568 }
1569 ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, picture.pkt_pts, picture.pkt_dts);
1570 if (ist->st->codec->time_base.num != 0) {
1571 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
1572 ist->next_pts += ((int64_t)AV_TIME_BASE *
1573 ist->st->codec->time_base.num * ticks) /
1574 ist->st->codec->time_base.den;
1575 }
1576 avpkt.size = 0;
1577 buffer_to_free = NULL;
1578 pre_process_video_frame(ist, (AVPicture *)&picture, &buffer_to_free);
1579 break;
1580 case AVMEDIA_TYPE_SUBTITLE:
1581 ret = avcodec_decode_subtitle2(ist->st->codec,
1582 &subtitle, &got_output, &avpkt);
1583 if (ret < 0)
1584 goto fail_decode;
1585 if (!got_output) {
1586 goto discard_packet;
1587 }
1588 subtitle_to_free = &subtitle;
1589 avpkt.size = 0;
1590 break;
1591 default:
1592 goto fail_decode;
1593 }
1594 } else {
1595 switch(ist->st->codec->codec_type) {
1596 case AVMEDIA_TYPE_AUDIO:
1597 ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1598 ist->st->codec->sample_rate;
1599 break;
1600 case AVMEDIA_TYPE_VIDEO:
1601 if (ist->st->codec->time_base.num != 0) {
1602 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
1603 ist->next_pts += ((int64_t)AV_TIME_BASE *
1604 ist->st->codec->time_base.num * ticks) /
1605 ist->st->codec->time_base.den;
1606 }
1607 break;
1608 }
1609 ret = avpkt.size;
1610 avpkt.size = 0;
1611 }
1612
1613 #if CONFIG_AVFILTER
1614 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1615 for (i = 0; i < nb_ostreams; i++) {
1616 ost = ost_table[i];
1617 if (ost->input_video_filter && ost->source_index == ist_index) {
1618 AVRational sar;
1619 if (ist->st->sample_aspect_ratio.num)
1620 sar = ist->st->sample_aspect_ratio;
1621 else
1622 sar = ist->st->codec->sample_aspect_ratio;
1623 // add it to be filtered
1624 av_vsrc_buffer_add_frame(ost->input_video_filter, &picture,
1625 ist->pts,
1626 sar);
1627 }
1628 }
1629 }
1630 #endif
1631
1632 // preprocess audio (volume)
1633 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1634 if (audio_volume != 256) {
1635 short *volp;
1636 volp = samples;
1637 for(i=0;i<(decoded_data_size / sizeof(short));i++) {
1638 int v = ((*volp) * audio_volume + 128) >> 8;
1639 if (v < -32768) v = -32768;
1640 if (v > 32767) v = 32767;
1641 *volp++ = v;
1642 }
1643 }
1644 }
1645
1646 /* frame rate emulation */
1647 if (rate_emu) {
1648 int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
1649 int64_t now = av_gettime() - ist->start;
1650 if (pts > now)
1651 usleep(pts - now);
1652 }
1653 /* if output time reached then transcode raw format,
1654 encode packets and output them */
1655 if (start_time == 0 || ist->pts >= start_time)
1656 for(i=0;i<nb_ostreams;i++) {
1657 int frame_size;
1658
1659 ost = ost_table[i];
1660 if (ost->source_index == ist_index) {
1661 #if CONFIG_AVFILTER
1662 frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
1663 !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
1664 while (frame_available) {
1665 AVRational ist_pts_tb;
1666 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter)
1667 get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb);
1668 if (ost->picref)
1669 ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
1670 #endif
1671 os = output_files[ost->file_index];
1672
1673 /* set the input output pts pairs */
1674 //ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/ AV_TIME_BASE;
1675
1676 if (ost->encoding_needed) {
1677 av_assert0(ist->decoding_needed);
1678 switch(ost->st->codec->codec_type) {
1679 case AVMEDIA_TYPE_AUDIO:
1680 do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
1681 break;
1682 case AVMEDIA_TYPE_VIDEO:
1683 #if CONFIG_AVFILTER
1684 if (ost->picref->video && !ost->frame_aspect_ratio)
1685 ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
1686 #endif
1687 do_video_out(os, ost, ist, &picture, &frame_size,
1688 same_quality ? quality : ost->st->codec->global_quality);
1689 if (vstats_filename && frame_size)
1690 do_video_stats(os, ost, frame_size);
1691 break;
1692 case AVMEDIA_TYPE_SUBTITLE:
1693 do_subtitle_out(os, ost, ist, &subtitle,
1694 pkt->pts);
1695 break;
1696 default:
1697 abort();
1698 }
1699 } else {
1700 AVFrame avframe; //FIXME/XXX remove this
1701 AVPacket opkt;
1702 int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1703
1704 av_init_packet(&opkt);
1705
1706 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
1707 #if !CONFIG_AVFILTER
1708 continue;
1709 #else
1710 goto cont;
1711 #endif
1712
1713 /* no reencoding needed : output the packet directly */
1714 /* force the input stream PTS */
1715
1716 avcodec_get_frame_defaults(&avframe);
1717 ost->st->codec->coded_frame= &avframe;
1718 avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY;
1719
1720 if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1721 audio_size += data_size;
1722 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1723 video_size += data_size;
1724 ost->sync_opts++;
1725 }
1726
1727 opkt.stream_index= ost->index;
1728 if(pkt->pts != AV_NOPTS_VALUE)
1729 opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1730 else
1731 opkt.pts= AV_NOPTS_VALUE;
1732
1733 if (pkt->dts == AV_NOPTS_VALUE)
1734 opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
1735 else
1736 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1737 opkt.dts -= ost_tb_start_time;
1738
1739 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1740 opkt.flags= pkt->flags;
1741
1742 //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1743 if( ost->st->codec->codec_id != CODEC_ID_H264
1744 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
1745 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
1746 ) {
1747 if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
1748 opkt.destruct= av_destruct_packet;
1749 } else {
1750 opkt.data = data_buf;
1751 opkt.size = data_size;
1752 }
1753
1754 write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters);
1755 ost->st->codec->frame_number++;
1756 ost->frame_number++;
1757 av_free_packet(&opkt);
1758 }
1759 #if CONFIG_AVFILTER
1760 cont:
1761 frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
1762 ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
1763 if (ost->picref)
1764 avfilter_unref_buffer(ost->picref);
1765 }
1766 #endif
1767 }
1768 }
1769
1770 av_free(buffer_to_free);
1771 /* XXX: allocate the subtitles in the codec ? */
1772 if (subtitle_to_free) {
1773 avsubtitle_free(subtitle_to_free);
1774 subtitle_to_free = NULL;
1775 }
1776 }
1777 discard_packet:
1778 if (pkt == NULL) {
1779 /* EOF handling */
1780
1781 for(i=0;i<nb_ostreams;i++) {
1782 ost = ost_table[i];
1783 if (ost->source_index == ist_index) {
1784 AVCodecContext *enc= ost->st->codec;
1785 os = output_files[ost->file_index];
1786
1787 if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
1788 continue;
1789 if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
1790 continue;
1791
1792 if (ost->encoding_needed) {
1793 for(;;) {
1794 AVPacket pkt;
1795 int fifo_bytes;
1796 av_init_packet(&pkt);
1797 pkt.stream_index= ost->index;
1798
1799 switch(ost->st->codec->codec_type) {
1800 case AVMEDIA_TYPE_AUDIO:
1801 fifo_bytes = av_fifo_size(ost->fifo);
1802 ret = 0;
1803 /* encode any samples remaining in fifo */
1804 if (fifo_bytes > 0) {
1805 int osize = av_get_bytes_per_sample(enc->sample_fmt);
1806 int fs_tmp = enc->frame_size;
1807
1808 av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
1809 if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
1810 enc->frame_size = fifo_bytes / (osize * enc->channels);
1811 } else { /* pad */
1812 int frame_bytes = enc->frame_size*osize*enc->channels;
1813 if (allocated_audio_buf_size < frame_bytes)
1814 ffmpeg_exit(1);
1815 generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
1816 }
1817
1818 ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
1819 pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
1820 ost->st->time_base.num, enc->sample_rate);
1821 enc->frame_size = fs_tmp;
1822 }
1823 if(ret <= 0) {
1824 ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
1825 }
1826 if (ret < 0) {
1827 fprintf(stderr, "Audio encoding failed\n");
1828 ffmpeg_exit(1);
1829 }
1830 audio_size += ret;
1831 pkt.flags |= AV_PKT_FLAG_KEY;
1832 break;
1833 case AVMEDIA_TYPE_VIDEO:
1834 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
1835 if (ret < 0) {
1836 fprintf(stderr, "Video encoding failed\n");
1837 ffmpeg_exit(1);
1838 }
1839 video_size += ret;
1840 if(enc->coded_frame && enc->coded_frame->key_frame)
1841 pkt.flags |= AV_PKT_FLAG_KEY;
1842 if (ost->logfile && enc->stats_out) {
1843 fprintf(ost->logfile, "%s", enc->stats_out);
1844 }
1845 break;
1846 default:
1847 ret=-1;
1848 }
1849
1850 if(ret<=0)
1851 break;
1852 pkt.data= bit_buffer;
1853 pkt.size= ret;
1854 if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1855 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1856 write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
1857 }
1858 }
1859 }
1860 }
1861 }
1862
1863 return 0;
1864 fail_decode:
1865 return -1;
1866 }
1867
1868 static void print_sdp(AVFormatContext **avc, int n)
1869 {
1870 char sdp[2048];
1871
1872 av_sdp_create(avc, n, sdp, sizeof(sdp));
1873 printf("SDP:\n%s\n", sdp);
1874 fflush(stdout);
1875 }
1876
1877 static int copy_chapters(int infile, int outfile)
1878 {
1879 AVFormatContext *is = input_files[infile].ctx;
1880 AVFormatContext *os = output_files[outfile];
1881 int i;
1882
1883 for (i = 0; i < is->nb_chapters; i++) {
1884 AVChapter *in_ch = is->chapters[i], *out_ch;
1885 int64_t ts_off = av_rescale_q(start_time - input_files_ts_offset[infile],
1886 AV_TIME_BASE_Q, in_ch->time_base);
1887 int64_t rt = (recording_time == INT64_MAX) ? INT64_MAX :
1888 av_rescale_q(recording_time, AV_TIME_BASE_Q, in_ch->time_base);
1889
1890
1891 if (in_ch->end < ts_off)
1892 continue;
1893 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
1894 break;
1895
1896 out_ch = av_mallocz(sizeof(AVChapter));
1897 if (!out_ch)
1898 return AVERROR(ENOMEM);
1899
1900 out_ch->id = in_ch->id;
1901 out_ch->time_base = in_ch->time_base;
1902 out_ch->start = FFMAX(0, in_ch->start - ts_off);
1903 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
1904
1905 if (metadata_chapters_autocopy)
1906 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
1907
1908 os->nb_chapters++;
1909 os->chapters = av_realloc(os->chapters, sizeof(AVChapter)*os->nb_chapters);
1910 if (!os->chapters)
1911 return AVERROR(ENOMEM);
1912 os->chapters[os->nb_chapters - 1] = out_ch;
1913 }
1914 return 0;
1915 }
1916
1917 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1918 AVCodecContext *avctx)
1919 {
1920 char *p;
1921 int n = 1, i;
1922 int64_t t;
1923
1924 for (p = kf; *p; p++)
1925 if (*p == ',')
1926 n++;
1927 ost->forced_kf_count = n;
1928 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1929 if (!ost->forced_kf_pts) {
1930 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1931 ffmpeg_exit(1);
1932 }
1933 for (i = 0; i < n; i++) {
1934 p = i ? strchr(p, ',') + 1 : kf;
1935 t = parse_time_or_die("force_key_frames", p, 1);
1936 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1937 }
1938 }
1939
1940 /*
1941 * The following code is the main loop of the file converter
1942 */
1943 static int transcode(AVFormatContext **output_files,
1944 int nb_output_files,
1945 InputFile *input_files,
1946 int nb_input_files,
1947 StreamMap *stream_maps, int nb_stream_maps)
1948 {
1949 int ret = 0, i, j, k, n, nb_ostreams = 0;
1950 AVFormatContext *is, *os;
1951 AVCodecContext *codec, *icodec;
1952 OutputStream *ost, **ost_table = NULL;
1953 InputStream *ist;
1954 char error[1024];
1955 int want_sdp = 1;
1956 uint8_t no_packet[MAX_FILES]={0};
1957 int no_packet_count=0;
1958
1959 if (rate_emu)
1960 for (i = 0; i < nb_input_streams; i++)
1961 input_streams[i].start = av_gettime();
1962
1963 /* output stream init */
1964 nb_ostreams = 0;
1965 for(i=0;i<nb_output_files;i++) {
1966 os = output_files[i];
1967 if (!os->nb_streams && !(os->oformat->flags & AVFMT_NOSTREAMS)) {
1968 av_dump_format(output_files[i], i, output_files[i]->filename, 1);
1969 fprintf(stderr, "Output file #%d does not contain any stream\n", i);
1970 ret = AVERROR(EINVAL);
1971 goto fail;
1972 }
1973 nb_ostreams += os->nb_streams;
1974 }
1975 if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
1976 fprintf(stderr, "Number of stream maps must match number of output streams\n");
1977 ret = AVERROR(EINVAL);
1978 goto fail;
1979 }
1980
1981 /* Sanity check the mapping args -- do the input files & streams exist? */
1982 for(i=0;i<nb_stream_maps;i++) {
1983 int fi = stream_maps[i].file_index;
1984 int si = stream_maps[i].stream_index;
1985
1986 if (fi < 0 || fi > nb_input_files - 1 ||
1987 si < 0 || si > input_files[fi].ctx->nb_streams - 1) {
1988 fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
1989 ret = AVERROR(EINVAL);
1990 goto fail;
1991 }
1992 fi = stream_maps[i].sync_file_index;
1993 si = stream_maps[i].sync_stream_index;
1994 if (fi < 0 || fi > nb_input_files - 1 ||
1995 si < 0 || si > input_files[fi].ctx->nb_streams - 1) {
1996 fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si);
1997 ret = AVERROR(EINVAL);
1998 goto fail;
1999 }
2000 }
2001
2002 ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams);
2003 if (!ost_table)
2004 goto fail;
2005 n = 0;
2006 for(k=0;k<nb_output_files;k++) {
2007 os = output_files[k];
2008 for(i=0;i<os->nb_streams;i++,n++) {
2009 int found;
2010 ost = ost_table[n] = output_streams_for_file[k][i];
2011 ost->st = os->streams[i];
2012 if (nb_stream_maps > 0) {
2013 ost->source_index = input_files[stream_maps[n].file_index].ist_index +
2014 stream_maps[n].stream_index;
2015
2016 /* Sanity check that the stream types match */
2017 if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) {
2018 int i= ost->file_index;
2019 av_dump_format(output_files[i], i, output_files[i]->filename, 1);
2020 fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
2021 stream_maps[n].file_index, stream_maps[n].stream_index,
2022 ost->file_index, ost->index);
2023 ffmpeg_exit(1);
2024 }
2025
2026 } else {
2027 int best_nb_frames=-1;
2028 /* get corresponding input stream index : we select the first one with the right type */
2029 found = 0;
2030 for (j = 0; j < nb_input_streams; j++) {
2031 int skip=0;
2032 ist = &input_streams[j];
2033 if(opt_programid){
2034 int pi,si;
2035 AVFormatContext *f = input_files[ist->file_index].ctx;
2036 skip=1;
2037 for(pi=0; pi<f->nb_programs; pi++){
2038 AVProgram *p= f->programs[pi];
2039 if(p->id == opt_programid)
2040 for(si=0; si<p->nb_stream_indexes; si++){
2041 if(f->streams[ p->stream_index[si] ] == ist->st)
2042 skip=0;
2043 }
2044 }
2045 }
2046 if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip &&
2047 ist->st->codec->codec_type == ost->st->codec->codec_type) {
2048 if(best_nb_frames < ist->st->codec_info_nb_frames){
2049 best_nb_frames= ist->st->codec_info_nb_frames;
2050 ost->source_index = j;
2051 found = 1;
2052 }
2053 }
2054 }
2055
2056 if (!found) {
2057 if(! opt_programid) {
2058 /* try again and reuse existing stream */
2059 for (j = 0; j < nb_input_streams; j++) {
2060 ist = &input_streams[j];
2061 if ( ist->st->codec->codec_type == ost->st->codec->codec_type
2062 && ist->st->discard != AVDISCARD_ALL) {
2063 ost->source_index = j;
2064 found = 1;
2065 }
2066 }
2067 }
2068 if (!found) {
2069 int i= ost->file_index;
2070 av_dump_format(output_files[i], i, output_files[i]->filename, 1);
2071 fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
2072 ost->file_index, ost->index);
2073 ffmpeg_exit(1);
2074 }
2075 }
2076 }
2077 ist = &input_streams[ost->source_index];
2078 ist->discard = 0;
2079 ost->sync_ist = (nb_stream_maps > 0) ?
2080 &input_streams[input_files[stream_maps[n].sync_file_index].ist_index +
2081 stream_maps[n].sync_stream_index] : ist;
2082 }
2083 }
2084
2085 /* for each output stream, we compute the right encoding parameters */
2086 for(i=0;i<nb_ostreams;i++) {
2087 ost = ost_table[i];
2088 os = output_files[ost->file_index];
2089 ist = &input_streams[ost->source_index];
2090
2091 codec = ost->st->codec;
2092 icodec = ist->st->codec;
2093
2094 if (metadata_streams_autocopy)
2095 av_dict_copy(&ost->st->metadata, ist->st->metadata,
2096 AV_DICT_DONT_OVERWRITE);
2097
2098 ost->st->disposition = ist->st->disposition;
2099 codec->bits_per_raw_sample= icodec->bits_per_raw_sample;
2100 codec->chroma_sample_location = icodec->chroma_sample_location;
2101
2102 if (ost->st->stream_copy) {
2103 uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2104
2105 if (extra_size > INT_MAX)
2106 goto fail;
2107
2108 /* if stream_copy is selected, no need to decode or encode */
2109 codec->codec_id = icodec->codec_id;
2110 codec->codec_type = icodec->codec_type;
2111
2112 if(!codec->codec_tag){
2113 if( !os->oformat->codec_tag
2114 || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id
2115 || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0)
2116 codec->codec_tag = icodec->codec_tag;
2117 }
2118
2119 codec->bit_rate = icodec->bit_rate;
2120 codec->rc_max_rate = icodec->rc_max_rate;
2121 codec->rc_buffer_size = icodec->rc_buffer_size;
2122 codec->extradata= av_mallocz(extra_size);
2123 if (!codec->extradata)
2124 goto fail;
2125 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2126 codec->extradata_size= icodec->extradata_size;
2127 if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){
2128 codec->time_base = icodec->time_base;
2129 codec->time_base.num *= icodec->ticks_per_frame;
2130 av_reduce(&codec->time_base.num, &codec->time_base.den,
2131 codec->time_base.num, codec->time_base.den, INT_MAX);
2132 }else
2133 codec->time_base = ist->st->time_base;
2134 switch(codec->codec_type) {
2135 case AVMEDIA_TYPE_AUDIO:
2136 if(audio_volume != 256) {
2137 fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\n");
2138 ffmpeg_exit(1);
2139 }
2140 codec->channel_layout = icodec->channel_layout;
2141 codec->sample_rate = icodec->sample_rate;
2142 codec->channels = icodec->channels;
2143 codec->frame_size = icodec->frame_size;
2144 codec->audio_service_type = icodec->audio_service_type;
2145 codec->block_align= icodec->block_align;
2146 if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3)
2147 codec->block_align= 0;
2148 if(codec->codec_id == CODEC_ID_AC3)
2149 codec->block_align= 0;
2150 break;
2151 case AVMEDIA_TYPE_VIDEO:
2152 codec->pix_fmt = icodec->pix_fmt;
2153 codec->width = icodec->width;
2154 codec->height = icodec->height;
2155 codec->has_b_frames = icodec->has_b_frames;
2156 if (!codec->sample_aspect_ratio.num) {
2157 codec->sample_aspect_ratio =
2158 ost->st->sample_aspect_ratio =
2159 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2160 ist->st->codec->sample_aspect_ratio.num ?
2161 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2162 }
2163 break;
2164 case AVMEDIA_TYPE_SUBTITLE:
2165 codec->width = icodec->width;
2166 codec->height = icodec->height;
2167 break;
2168 case AVMEDIA_TYPE_DATA:
2169 break;
2170 default:
2171 abort();
2172 }
2173 } else {
2174 if (!ost->enc)
2175 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
2176 switch(codec->codec_type) {
2177 case AVMEDIA_TYPE_AUDIO:
2178 ost->fifo= av_fifo_alloc(1024);
2179 if(!ost->fifo)
2180 goto fail;
2181 ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE);
2182 if (!codec->sample_rate) {
2183 codec->sample_rate = icodec->sample_rate;
2184 if (icodec->lowres)
2185 codec->sample_rate >>= icodec->lowres;
2186 }
2187 choose_sample_rate(ost->st, ost->enc);
2188 codec->time_base = (AVRational){1, codec->sample_rate};
2189 if (!codec->channels)
2190 codec->channels = icodec->channels;
2191 if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels)
2192 codec->channel_layout = 0;
2193 ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1;
2194 icodec->request_channels = codec->channels;
2195 ist->decoding_needed = 1;
2196 ost->encoding_needed = 1;
2197 ost->resample_sample_fmt = icodec->sample_fmt;
2198 ost->resample_sample_rate = icodec->sample_rate;
2199 ost->resample_channels = icodec->channels;
2200 break;
2201 case AVMEDIA_TYPE_VIDEO:
2202 if (codec->pix_fmt == PIX_FMT_NONE)
2203 codec->pix_fmt = icodec->pix_fmt;
2204 choose_pixel_fmt(ost->st, ost->enc);
2205
2206 if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
2207 fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n");
2208 ffmpeg_exit(1);
2209 }
2210 ost->video_resample = codec->width != icodec->width ||
2211 codec->height != icodec->height ||
2212 codec->pix_fmt != icodec->pix_fmt;
2213 if (ost->video_resample) {
2214 #if !CONFIG_AVFILTER
2215 avcodec_get_frame_defaults(&ost->pict_tmp);
2216 if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
2217 codec->width, codec->height)) {
2218 fprintf(stderr, "Cannot allocate temp picture, check pix fmt\n");
2219 ffmpeg_exit(1);
2220 }
2221 ost->img_resample_ctx = sws_getContext(
2222 icodec->width,
2223 icodec->height,
2224 icodec->pix_fmt,
2225 codec->width,
2226 codec->height,
2227 codec->pix_fmt,
2228 ost->sws_flags, NULL, NULL, NULL);
2229 if (ost->img_resample_ctx == NULL) {
2230 fprintf(stderr, "Cannot get resampling context\n");
2231 ffmpeg_exit(1);
2232 }
2233 #endif
2234 codec->bits_per_raw_sample= 0;
2235 }
2236 if (!codec->width || !codec->height) {
2237 codec->width = icodec->width;
2238 codec->height = icodec->height;
2239 }
2240 ost->resample_height = icodec->height;
2241 ost->resample_width = icodec->width;
2242 ost->resample_pix_fmt= icodec->pix_fmt;
2243 ost->encoding_needed = 1;
2244 ist->decoding_needed = 1;
2245
2246 if (!ost->frame_rate.num)
2247 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1};
2248 if (ost->enc && ost->enc->supported_framerates && !force_fps) {
2249 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2250 ost->frame_rate = ost->enc->supported_framerates[idx];
2251 }
2252 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
2253
2254 #if CONFIG_AVFILTER
2255 if (configure_video_filters(ist, ost)) {
2256 fprintf(stderr, "Error opening filters!\n");
2257 exit(1);
2258 }
2259 #endif
2260 break;
2261 case AVMEDIA_TYPE_SUBTITLE:
2262 ost->encoding_needed = 1;
2263 ist->decoding_needed = 1;
2264 break;
2265 default:
2266 abort();
2267 break;
2268 }
2269 /* two pass mode */
2270 if (ost->encoding_needed &&
2271 (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2272 char logfilename[1024];
2273 FILE *f;
2274
2275 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2276 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2277 i);
2278 if (codec->flags & CODEC_FLAG_PASS1) {
2279 f = fopen(logfilename, "wb");
2280 if (!f) {
2281 fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno));
2282 ffmpeg_exit(1);
2283 }
2284 ost->logfile = f;
2285 } else {
2286 char *logbuffer;
2287 size_t logbuffer_size;
2288 if (read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2289 fprintf(stderr, "Error reading log file '%s' for pass-2 encoding\n", logfilename);
2290 ffmpeg_exit(1);
2291 }
2292 codec->stats_in = logbuffer;
2293 }
2294 }
2295 }
2296 if(codec->codec_type == AVMEDIA_TYPE_VIDEO){
2297 int size= codec->width * codec->height;
2298 bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 200);
2299 }
2300 }
2301
2302 if (!bit_buffer)
2303 bit_buffer = av_malloc(bit_buffer_size);
2304 if (!bit_buffer) {
2305 fprintf(stderr, "Cannot allocate %d bytes output buffer\n",
2306 bit_buffer_size);
2307 ret = AVERROR(ENOMEM);
2308 goto fail;
2309 }
2310
2311 /* open each encoder */
2312 for(i=0;i<nb_ostreams;i++) {
2313 ost = ost_table[i];
2314 if (ost->encoding_needed) {
2315 AVCodec *codec = ost->enc;
2316 AVCodecContext *dec = input_streams[ost->source_index].st->codec;
2317 if (!codec) {
2318 snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d",
2319 ost->st->codec->codec_id, ost->file_index, ost->index);
2320 ret = AVERROR(EINVAL);
2321 goto dump_format;
2322 }
2323 if (dec->subtitle_header) {
2324 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2325 if (!ost->st->codec->subtitle_header) {
2326 ret = AVERROR(ENOMEM);
2327 goto dump_format;
2328 }
2329 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2330 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2331 }
2332 if (avcodec_open(ost->st->codec, codec) < 0) {
2333 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2334 ost->file_index, ost->index);
2335 ret = AVERROR(EINVAL);
2336 goto dump_format;
2337 }
2338 extra_size += ost->st->codec->extradata_size;
2339 }
2340 }
2341
2342 /* open each decoder */
2343 for (i = 0; i < nb_input_streams; i++) {
2344 ist = &input_streams[i];
2345 if (ist->decoding_needed) {
2346 AVCodec *codec = ist->dec;
2347 if (!codec)
2348 codec = avcodec_find_decoder(ist->st->codec->codec_id);
2349 if (!codec) {
2350 snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d",
2351 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2352 ret = AVERROR(EINVAL);
2353 goto dump_format;
2354 }
2355
2356 /* update requested sample format for the decoder based on the
2357 corresponding encoder sample format */
2358 for (j = 0; j < nb_ostreams; j++) {
2359 ost = ost_table[j];
2360 if (ost->source_index == i) {
2361 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2362 break;
2363 }
2364 }
2365
2366 if (avcodec_open(ist->st->codec, codec) < 0) {
2367 snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d",
2368 ist->file_index, ist->st->index);
2369 ret = AVERROR(EINVAL);
2370 goto dump_format;
2371 }
2372 //if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2373 // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD;
2374 }
2375 }
2376
2377 /* init pts */
2378 for (i = 0; i < nb_input_streams; i++) {
2379 AVStream *st;
2380 ist = &input_streams[i];
2381 st= ist->st;
2382 ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0;
2383 ist->next_pts = AV_NOPTS_VALUE;
2384 init_pts_correction(&ist->pts_ctx);
2385 ist->is_start = 1;
2386 }
2387
2388 /* set meta data information from input file if required */
2389 for (i=0;i<nb_meta_data_maps;i++) {
2390 AVFormatContext *files[2];
2391 AVDictionary **meta[2];
2392 int j;
2393
2394 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
2395 if ((index) < 0 || (index) >= (nb_elems)) {\
2396 snprintf(error, sizeof(error), "Invalid %s index %d while processing metadata maps\n",\
2397 (desc), (index));\
2398 ret = AVERROR(EINVAL);\
2399 goto dump_format;\
2400 }
2401
2402 int out_file_index = meta_data_maps[i][0].file;
2403 int in_file_index = meta_data_maps[i][1].file;
2404 if (in_file_index < 0 || out_file_index < 0)
2405 continue;
2406 METADATA_CHECK_INDEX(out_file_index, nb_output_files, "output file")
2407 METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file")
2408
2409 files[0] = output_files[out_file_index];
2410 files[1] = input_files[in_file_index].ctx;
2411
2412 for (j = 0; j < 2; j++) {
2413 MetadataMap *map = &meta_data_maps[i][j];
2414
2415 switch (map->type) {
2416 case 'g':
2417 meta[j] = &files[j]->metadata;
2418 break;
2419 case 's':
2420 METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, "stream")
2421 meta[j] = &files[j]->streams[map->index]->metadata;
2422 break;
2423 case 'c':
2424 METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, "chapter")
2425 meta[j] = &files[j]->chapters[map->index]->metadata;
2426 break;
2427 case 'p':
2428 METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, "program")
2429 meta[j] = &files[j]->programs[map->index]->metadata;
2430 break;
2431 }
2432 }
2433
2434 av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE);
2435 }
2436
2437 /* copy global metadata by default */
2438 if (metadata_global_autocopy) {
2439
2440 for (i = 0; i < nb_output_files; i++)
2441 av_dict_copy(&output_files[i]->metadata, input_files[0].ctx->metadata,
2442 AV_DICT_DONT_OVERWRITE);
2443 }
2444
2445 /* copy chapters according to chapter maps */
2446 for (i = 0; i < nb_chapter_maps; i++) {
2447 int infile = chapter_maps[i].in_file;
2448 int outfile = chapter_maps[i].out_file;
2449
2450 if (infile < 0 || outfile < 0)
2451 continue;
2452 if (infile >= nb_input_files) {
2453 snprintf(error, sizeof(error), "Invalid input file index %d in chapter mapping.\n", infile);
2454 ret = AVERROR(EINVAL);
2455 goto dump_format;
2456 }
2457 if (outfile >= nb_output_files) {
2458 snprintf(error, sizeof(error), "Invalid output file index %d in chapter mapping.\n",outfile);
2459 ret = AVERROR(EINVAL);
2460 goto dump_format;
2461 }
2462 copy_chapters(infile, outfile);
2463 }
2464
2465 /* copy chapters from the first input file that has them*/
2466 if (!nb_chapter_maps)
2467 for (i = 0; i < nb_input_files; i++) {
2468 if (!input_files[i].ctx->nb_chapters)
2469 continue;
2470
2471 for (j = 0; j < nb_output_files; j++)
2472 if ((ret = copy_chapters(i, j)) < 0)
2473 goto dump_format;
2474 break;
2475 }
2476
2477 /* open files and write file headers */
2478 for(i=0;i<nb_output_files;i++) {
2479 os = output_files[i];
2480 if (avformat_write_header(os, &output_opts[i]) < 0) {
2481 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2482 ret = AVERROR(EINVAL);
2483 goto dump_format;
2484 }
2485 assert_avoptions(output_opts[i]);
2486 if (strcmp(output_files[i]->oformat->name, "rtp")) {
2487 want_sdp = 0;
2488 }
2489 }
2490
2491 dump_format:
2492 /* dump the file output parameters - cannot be done before in case
2493 of stream copy */
2494 for(i=0;i<nb_output_files;i++) {
2495 av_dump_format(output_files[i], i, output_files[i]->filename, 1);
2496 }
2497
2498 /* dump the stream mapping */
2499 if (verbose >= 0) {
2500 fprintf(stderr, "Stream mapping:\n");
2501 for(i=0;i<nb_ostreams;i++) {
2502 ost = ost_table[i];
2503 fprintf(stderr, " Stream #%d.%d -> #%d.%d",
2504 input_streams[ost->source_index].file_index,
2505 input_streams[ost->source_index].st->index,
2506 ost->file_index,
2507 ost->index);
2508 if (ost->sync_ist != &input_streams[ost->source_index])
2509 fprintf(stderr, " [sync #%d.%d]",
2510 ost->sync_ist->file_index,
2511 ost->sync_ist->st->index);
2512 fprintf(stderr, "\n");
2513 }
2514 }
2515
2516 if (ret) {
2517 fprintf(stderr, "%s\n", error);
2518 goto fail;
2519 }
2520
2521 if (want_sdp) {
2522 print_sdp(output_files, nb_output_files);
2523 }
2524
2525 if (verbose >= 0)
2526 fprintf(stderr, "Press ctrl-c to stop encoding\n");
2527 term_init();
2528
2529 timer_start = av_gettime();
2530
2531 for(; received_sigterm == 0;) {
2532 int file_index, ist_index;
2533 AVPacket pkt;
2534 double ipts_min;
2535 double opts_min;
2536
2537 redo:
2538 ipts_min= 1e100;
2539 opts_min= 1e100;
2540
2541 /* select the stream that we must read now by looking at the
2542 smallest output pts */
2543 file_index = -1;
2544 for(i=0;i<nb_ostreams;i++) {
2545 double ipts, opts;
2546 ost = ost_table[i];
2547 os = output_files[ost->file_index];
2548 ist = &input_streams[ost->source_index];
2549 if(ist->is_past_recording_time || no_packet[ist->file_index])
2550 continue;
2551 opts = ost->st->pts.val * av_q2d(ost->st->time_base);
2552 ipts = (double)ist->pts;
2553 if (!input_files[ist->file_index].eof_reached){
2554 if(ipts < ipts_min) {
2555 ipts_min = ipts;
2556 if(input_sync ) file_index = ist->file_index;
2557 }
2558 if(opts < opts_min) {
2559 opts_min = opts;
2560 if(!input_sync) file_index = ist->file_index;
2561 }
2562 }
2563 if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){
2564 file_index= -1;
2565 break;
2566 }
2567 }
2568 /* if none, if is finished */
2569 if (file_index < 0) {
2570 if(no_packet_count){
2571 no_packet_count=0;
2572 memset(no_packet, 0, sizeof(no_packet));
2573 usleep(10000);
2574 continue;
2575 }
2576 break;
2577 }
2578
2579 /* finish if limit size exhausted */
2580 if (limit_filesize != 0 && limit_filesize <= avio_tell(output_files[0]->pb))
2581 break;
2582
2583 /* read a frame from it and output it in the fifo */
2584 is = input_files[file_index].ctx;
2585 ret= av_read_frame(is, &pkt);
2586 if(ret == AVERROR(EAGAIN)){
2587 no_packet[file_index]=1;
2588 no_packet_count++;
2589 continue;
2590 }
2591 if (ret < 0) {
2592 input_files[file_index].eof_reached = 1;
2593 if (opt_shortest)
2594 break;
2595 else
2596 continue;
2597 }
2598
2599 no_packet_count=0;
2600 memset(no_packet, 0, sizeof(no_packet));
2601
2602 if (do_pkt_dump) {
2603 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2604 is->streams[pkt.stream_index]);
2605 }
2606 /* the following test is needed in case new streams appear
2607 dynamically in stream : we ignore them */
2608 if (pkt.stream_index >= input_files[file_index].ctx->nb_streams)
2609 goto discard_packet;
2610 ist_index = input_files[file_index].ist_index + pkt.stream_index;
2611 ist = &input_streams[ist_index];
2612 if (ist->discard)
2613 goto discard_packet;
2614
2615 if (pkt.dts != AV_NOPTS_VALUE)
2616 pkt.dts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base);
2617 if (pkt.pts != AV_NOPTS_VALUE)
2618 pkt.pts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base);
2619
2620 if (pkt.stream_index < nb_input_files_ts_scale[file_index]
2621 && input_files_ts_scale[file_index][pkt.stream_index]){
2622 if(pkt.pts != AV_NOPTS_VALUE)
2623 pkt.pts *= input_files_ts_scale[file_index][pkt.stream_index];
2624 if(pkt.dts != AV_NOPTS_VALUE)
2625 pkt.dts *= input_files_ts_scale[file_index][pkt.stream_index];
2626 }
2627
2628 // fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec->codec_type);
2629 if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
2630 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2631 int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2632 int64_t delta= pkt_dts - ist->next_pts;
2633 if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){
2634 input_files_ts_offset[ist->file_index]-= delta;
2635 if (verbose > 2)
2636 fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files_ts_offset[ist->file_index]);
2637 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2638 if(pkt.pts != AV_NOPTS_VALUE)
2639 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2640 }
2641 }
2642
2643 /* finish if recording time exhausted */
2644 if (recording_time != INT64_MAX &&
2645 av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) {
2646 ist->is_past_recording_time = 1;
2647 goto discard_packet;
2648 }
2649
2650 //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
2651 if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
2652
2653 if (verbose >= 0)
2654 fprintf(stderr, "Error while decoding stream #%d.%d\n",
2655 ist->file_index, ist->st->index);
2656 if (exit_on_error)
2657 ffmpeg_exit(1);
2658 av_free_packet(&pkt);
2659 goto redo;
2660 }
2661
2662 discard_packet:
2663 av_free_packet(&pkt);
2664
2665 /* dump report by using the output first video and audio streams */
2666 print_report(output_files, ost_table, nb_ostreams, 0);
2667 }
2668
2669 /* at the end of stream, we must flush the decoder buffers */
2670 for (i = 0; i < nb_input_streams; i++) {
2671 ist = &input_streams[i];
2672 if (ist->decoding_needed) {
2673 output_packet(ist, i, ost_table, nb_ostreams, NULL);
2674 }
2675 }
2676
2677 term_exit();
2678
2679 /* write the trailer if needed and close file */
2680 for(i=0;i<nb_output_files;i++) {
2681 os = output_files[i];
2682 av_write_trailer(os);
2683 }
2684
2685 /* dump report by using the first video and audio streams */
2686 print_report(output_files, ost_table, nb_ostreams, 1);
2687
2688 /* close each encoder */
2689 for(i=0;i<nb_ostreams;i++) {
2690 ost = ost_table[i];
2691 if (ost->encoding_needed) {
2692 av_freep(&ost->st->codec->stats_in);
2693 avcodec_close(ost->st->codec);
2694 }
2695 #if CONFIG_AVFILTER
2696 avfilter_graph_free(&ost->graph);
2697 #endif
2698 }
2699
2700 /* close each decoder */
2701 for (i = 0; i < nb_input_streams; i++) {
2702 ist = &input_streams[i];
2703 if (ist->decoding_needed) {
2704 avcodec_close(ist->st->codec);
2705 }
2706 }
2707
2708 /* finished ! */
2709 ret = 0;
2710
2711 fail:
2712 av_freep(&bit_buffer);
2713
2714 if (ost_table) {
2715 for(i=0;i<nb_ostreams;i++) {
2716 ost = ost_table[i];
2717 if (ost) {
2718 if (ost->st->stream_copy)
2719 av_freep(&ost->st->codec->extradata);
2720 if (ost->logfile) {
2721 fclose(ost->logfile);
2722 ost->logfile = NULL;
2723 }
2724 av_fifo_free(ost->fifo); /* works even if fifo is not
2725 initialized but set to zero */
2726 av_freep(&ost->st->codec->subtitle_header);
2727 av_free(ost->pict_tmp.data[0]);
2728 av_free(ost->forced_kf_pts);
2729 if (ost->video_resample)
2730 sws_freeContext(ost->img_resample_ctx);
2731 if (ost->resample)
2732 audio_resample_close(ost->resample);
2733 if (ost->reformat_ctx)
2734 av_audio_convert_free(ost->reformat_ctx);
2735 av_free(ost);
2736 }
2737 }
2738 av_free(ost_table);
2739 }
2740 return ret;
2741 }
2742
2743 static int opt_format(const char *opt, const char *arg)
2744 {
2745 last_asked_format = arg;
2746 return 0;
2747 }
2748
2749 static int opt_video_rc_override_string(const char *opt, const char *arg)
2750 {
2751 video_rc_override_string = arg;
2752 return 0;
2753 }
2754
2755 static int opt_me_threshold(const char *opt, const char *arg)
2756 {
2757 me_threshold = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2758 return 0;
2759 }
2760
2761 static int opt_verbose(const char *opt, const char *arg)
2762 {
2763 verbose = parse_number_or_die(opt, arg, OPT_INT64, -10, 10);
2764 return 0;
2765 }
2766
2767 static int opt_frame_rate(const char *opt, const char *arg)
2768 {
2769 if (av_parse_video_rate(&frame_rate, arg) < 0) {
2770 fprintf(stderr, "Incorrect value for %s: %s\n", opt, arg);
2771 ffmpeg_exit(1);
2772 }
2773 return 0;
2774 }
2775
2776 static int opt_bitrate(const char *opt, const char *arg)
2777 {
2778 int codec_type = opt[0]=='a' ? AVMEDIA_TYPE_AUDIO : AVMEDIA_TYPE_VIDEO;
2779
2780 opt_default(opt, arg);
2781
2782 if (av_get_int(avcodec_opts[codec_type], "b", NULL) < 1000)
2783 fprintf(stderr, "WARNING: The bitrate parameter is set too low. It takes bits/s as argument, not kbits/s\n");
2784
2785 return 0;
2786 }
2787
2788 static int opt_frame_crop(const char *opt, const char *arg)
2789 {
2790 fprintf(stderr, "Option '%s' has been removed, use the crop filter instead\n", opt);
2791 return AVERROR(EINVAL);
2792 }
2793
2794 static int opt_frame_size(const char *opt, const char *arg)
2795 {
2796 if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2797 fprintf(stderr, "Incorrect frame size\n");
2798 return AVERROR(EINVAL);
2799 }
2800 return 0;
2801 }
2802
2803 static int opt_pad(const char *opt, const char *arg) {
2804 fprintf(stderr, "Option '%s' has been removed, use the pad filter instead\n", opt);
2805 return -1;
2806 }
2807
2808 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2809 {
2810 if (strcmp(arg, "list")) {
2811 frame_pix_fmt = av_get_pix_fmt(arg);
2812 if (frame_pix_fmt == PIX_FMT_NONE) {
2813 fprintf(stderr, "Unknown pixel format requested: %s\n", arg);
2814 return AVERROR(EINVAL);
2815 }
2816 } else {
2817 show_pix_fmts();
2818 ffmpeg_exit(0);
2819 }
2820 return 0;
2821 }
2822
2823 static int opt_frame_aspect_ratio(const char *opt, const char *arg)
2824 {
2825 int x = 0, y = 0;
2826 double ar = 0;
2827 const char *p;
2828 char *end;
2829
2830 p = strchr(arg, ':');
2831 if (p) {
2832 x = strtol(arg, &end, 10);
2833 if (end == p)
2834 y = strtol(end+1, &end, 10);
2835 if (x > 0 && y > 0)
2836 ar = (double)x / (double)y;
2837 } else
2838 ar = strtod(arg, NULL);
2839
2840 if (!ar) {
2841 fprintf(stderr, "Incorrect aspect ratio specification.\n");
2842 return AVERROR(EINVAL);
2843 }
2844 frame_aspect_ratio = ar;
2845 return 0;
2846 }
2847
2848 static int opt_metadata(const char *opt, const char *arg)
2849 {
2850 char *mid= strchr(arg, '=');
2851
2852 if(!mid){
2853 fprintf(stderr, "Missing =\n");
2854 ffmpeg_exit(1);
2855 }
2856 *mid++= 0;
2857
2858 av_dict_set(&metadata, arg, mid, 0);
2859
2860 return 0;
2861 }
2862
2863 static int opt_qscale(const char *opt, const char *arg)
2864 {
2865 video_qscale = parse_number_or_die(opt, arg, OPT_FLOAT, 0, 255);
2866 if (video_qscale == 0) {
2867 fprintf(stderr, "qscale must be > 0.0 and <= 255\n");
2868 return AVERROR(EINVAL);
2869 }
2870 return 0;
2871 }
2872
2873 static int opt_top_field_first(const char *opt, const char *arg)
2874 {
2875 top_field_first = parse_number_or_die(opt, arg, OPT_INT, 0, 1);
2876 return 0;
2877 }
2878
2879 static int opt_thread_count(const char *opt, const char *arg)
2880 {
2881 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2882 #if !HAVE_THREADS
2883 if (verbose >= 0)
2884 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2885 #endif
2886 return 0;
2887 }
2888
2889 static int opt_audio_sample_fmt(const char *opt, const char *arg)
2890 {
2891 if (strcmp(arg, "list")) {
2892 audio_sample_fmt = av_get_sample_fmt(arg);
2893 if (audio_sample_fmt == AV_SAMPLE_FMT_NONE) {
2894 av_log(NULL, AV_LOG_ERROR, "Invalid sample format '%s'\n", arg);
2895 return AVERROR(EINVAL);
2896 }
2897 } else {
2898 int i;
2899 char fmt_str[128];
2900 for (i = -1; i < AV_SAMPLE_FMT_NB; i++)
2901 printf("%s\n", av_get_sample_fmt_string(fmt_str, sizeof(fmt_str), i));
2902 ffmpeg_exit(0);
2903 }
2904 return 0;
2905 }
2906
2907 static int opt_audio_rate(const char *opt, const char *arg)
2908 {
2909 audio_sample_rate = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2910 return 0;
2911 }
2912
2913 static int opt_audio_channels(const char *opt, const char *arg)
2914 {
2915 audio_channels = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2916 return 0;
2917 }
2918
2919 static int opt_video_channel(const char *opt, const char *arg)
2920 {
2921 av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -channel.\n");
2922 opt_default("channel", arg);
2923 return 0;
2924 }
2925
2926 static int opt_video_standard(const char *opt, const char *arg)
2927 {
2928 av_log(NULL, AV_LOG_WARNING, "This option is deprecated, use -standard.\n");
2929 opt_default("standard", arg);
2930 return 0;
2931 }
2932
2933 static int opt_codec(int *pstream_copy, char **pcodec_name,
2934 int codec_type, const char *arg)
2935 {
2936 av_freep(pcodec_name);
2937 if (!strcmp(arg, "copy")) {
2938 *pstream_copy = 1;
2939 } else {
2940 *pcodec_name = av_strdup(arg);
2941 }
2942 return 0;
2943 }
2944
2945 static int opt_audio_codec(const char *opt, const char *arg)
2946 {
2947 return opt_codec(&audio_stream_copy, &audio_codec_name, AVMEDIA_TYPE_AUDIO, arg);
2948 }
2949
2950 static int opt_video_codec(const char *opt, const char *arg)
2951 {
2952 return opt_codec(&video_stream_copy, &video_codec_name, AVMEDIA_TYPE_VIDEO, arg);
2953 }
2954
2955 static int opt_subtitle_codec(const char *opt, const char *arg)
2956 {
2957 return opt_codec(&subtitle_stream_copy, &subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, arg);
2958 }
2959
2960 static int opt_data_codec(const char *opt, const char *arg)
2961 {
2962 return opt_codec(&data_stream_copy, &data_codec_name, AVMEDIA_TYPE_DATA, arg);
2963 }
2964
2965 static int opt_codec_tag(const char *opt, const char *arg)
2966 {
2967 char *tail;
2968 uint32_t *codec_tag;
2969
2970 codec_tag = !strcmp(opt, "atag") ? &audio_codec_tag :
2971 !strcmp(opt, "vtag") ? &video_codec_tag :
2972 !strcmp(opt, "stag") ? &subtitle_codec_tag : NULL;
2973 if (!codec_tag)
2974 return -1;
2975
2976 *codec_tag = strtol(arg, &tail, 0);
2977 if (!tail || *tail)
2978 *codec_tag = AV_RL32(arg);
2979
2980 return 0;
2981 }
2982
2983 static int opt_map(const char *opt, const char *arg)
2984 {
2985 StreamMap *m;
2986 char *p;
2987
2988 stream_maps = grow_array(stream_maps, sizeof(*stream_maps), &nb_stream_maps, nb_stream_maps + 1);
2989 m = &stream_maps[nb_stream_maps-1];
2990
2991 m->file_index = strtol(arg, &p, 0);
2992 if (*p)
2993 p++;
2994
2995 m->stream_index = strtol(p, &p, 0);
2996 if (*p) {
2997 p++;
2998 m->sync_file_index = strtol(p, &p, 0);
2999 if (*p)
3000 p++;
3001 m->sync_stream_index = strtol(p, &p, 0);
3002 } else {
3003 m->sync_file_index = m->file_index;
3004 m->sync_stream_index = m->stream_index;
3005 }
3006 return 0;
3007 }
3008
3009 static void parse_meta_type(char *arg, char *type, int *index, char **endptr)
3010 {
3011 *endptr = arg;
3012 if (*arg == ',') {
3013 *type = *(++arg);
3014 switch (*arg) {
3015 case 'g':
3016 break;
3017 case 's':
3018 case 'c':
3019 case 'p':
3020 *index = strtol(++arg, endptr, 0);
3021 break;
3022 default:
3023 fprintf(stderr, "Invalid metadata type %c.\n", *arg);
3024 ffmpeg_exit(1);
3025 }
3026 } else
3027 *type = 'g';
3028 }
3029
3030 static int opt_map_metadata(const char *opt, const char *arg)
3031 {
3032 MetadataMap *m, *m1;
3033 char *p;
3034
3035 meta_data_maps = grow_array(meta_data_maps, sizeof(*meta_data_maps),
3036 &nb_meta_data_maps, nb_meta_data_maps + 1);
3037
3038 m = &meta_data_maps[nb_meta_data_maps - 1][0];
3039 m->file = strtol(arg, &p, 0);
3040 parse_meta_type(p, &m->type, &m->index, &p);
3041 if (*p)
3042 p++;
3043
3044 m1 = &meta_data_maps[nb_meta_data_maps - 1][1];
3045 m1->file = strtol(p, &p, 0);
3046 parse_meta_type(p, &m1->type, &m1->index, &p);
3047
3048 if (m->type == 'g' || m1->type == 'g')
3049 metadata_global_autocopy = 0;
3050 if (m->type == 's' || m1->type == 's')
3051 metadata_streams_autocopy = 0;
3052 if (m->type == 'c' || m1->type == 'c')
3053 metadata_chapters_autocopy = 0;
3054
3055 return 0;
3056 }
3057
3058 static int opt_map_meta_data(const char *opt, const char *arg)
3059 {
3060 fprintf(stderr, "-map_meta_data is deprecated and will be removed soon. "
3061 "Use -map_metadata instead.\n");
3062 return opt_map_metadata(opt, arg);
3063 }
3064
3065 static int opt_map_chapters(const char *opt, const char *arg)
3066 {
3067 ChapterMap *c;
3068 char *p;
3069
3070 chapter_maps = grow_array(chapter_maps, sizeof(*chapter_maps), &nb_chapter_maps,
3071 nb_chapter_maps + 1);
3072 c = &chapter_maps[nb_chapter_maps - 1];
3073 c->out_file = strtol(arg, &p, 0);
3074 if (*p)
3075 p++;
3076
3077 c->in_file = strtol(p, &p, 0);
3078 return 0;
3079 }
3080
3081 static int opt_input_ts_scale(const char *opt, const char *arg)
3082 {
3083 unsigned int stream;
3084 double scale;
3085 char *p;
3086
3087 stream = strtol(arg, &p, 0);
3088 if (*p)
3089 p++;
3090 scale= strtod(p, &p);
3091
3092 if(stream >= MAX_STREAMS)
3093 ffmpeg_exit(1);
3094
3095 input_files_ts_scale[nb_input_files] = grow_array(input_files_ts_scale[nb_input_files], sizeof(*input_files_ts_scale[nb_input_files]), &nb_input_files_ts_scale[nb_input_files], stream + 1);
3096 input_files_ts_scale[nb_input_files][stream]= scale;
3097 return 0;
3098 }
3099
3100 static int opt_recording_time(const char *opt, const char *arg)
3101 {
3102 recording_time = parse_time_or_die(opt, arg, 1);
3103 return 0;
3104 }
3105
3106 static int opt_start_time(const char *opt, const char *arg)
3107 {
3108 start_time = parse_time_or_die(opt, arg, 1);
3109 return 0;
3110 }
3111
3112 static int opt_recording_timestamp(const char *opt, const char *arg)
3113 {
3114 recording_timestamp = parse_time_or_die(opt, arg, 0) / 1000000;
3115 return 0;
3116 }
3117
3118 static int opt_input_ts_offset(const char *opt, const char *arg)
3119 {
3120 input_ts_offset = parse_time_or_die(opt, arg, 1);
3121 return 0;
3122 }
3123
3124 static enum CodecID find_codec_or_die(const char *name, int type, int encoder, int strict)
3125 {
3126 const char *codec_string = encoder ? "encoder" : "decoder";
3127 AVCodec *codec;
3128
3129 if(!name)
3130 return CODEC_ID_NONE;
3131 codec = encoder ?
3132 avcodec_find_encoder_by_name(name) :
3133 avcodec_find_decoder_by_name(name);
3134 if(!codec) {
3135 fprintf(stderr, "Unknown %s '%s'\n", codec_string, name);
3136 ffmpeg_exit(1);
3137 }
3138 if(codec->type != type) {
3139 fprintf(stderr, "Invalid %s type '%s'\n", codec_string, name);
3140 ffmpeg_exit(1);
3141 }
3142 if(codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
3143 strict > FF_COMPLIANCE_EXPERIMENTAL) {
3144 fprintf(stderr, "%s '%s' is experimental and might produce bad "
3145 "results.\nAdd '-strict experimental' if you want to use it.\n",
3146 codec_string, codec->name);
3147 codec = encoder ?
3148 avcodec_find_encoder(codec->id) :
3149 avcodec_find_decoder(codec->id);
3150 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
3151 fprintf(stderr, "Or use the non experimental %s '%s'.\n",
3152 codec_string, codec->name);
3153 ffmpeg_exit(1);
3154 }
3155 return codec->id;
3156 }
3157
3158 static int opt_input_file(const char *opt, const char *filename)
3159 {
3160 AVFormatContext *ic;
3161 AVInputFormat *file_iformat = NULL;
3162 int err, i, ret, rfps, rfps_base;
3163 int64_t timestamp;
3164 uint8_t buf[128];
3165
3166 if (last_asked_format) {
3167 if (!(file_iformat = av_find_input_format(last_asked_format))) {