Fix heap overflow with -async.
[libav.git] / ffmpeg.c
1 /*
2 * FFmpeg main
3 * Copyright (c) 2000-2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* needed for usleep() */
23 #define _XOPEN_SOURCE 600
24
25 #include "config.h"
26 #include <ctype.h>
27 #include <string.h>
28 #include <math.h>
29 #include <stdlib.h>
30 #include <errno.h>
31 #include <signal.h>
32 #include <limits.h>
33 #include <unistd.h>
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/opt.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavcodec/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/avstring.h"
42 #include "libavformat/os_support.h"
43
44 #if HAVE_SYS_RESOURCE_H
45 #include <sys/types.h>
46 #include <sys/resource.h>
47 #elif HAVE_GETPROCESSTIMES
48 #include <windows.h>
49 #endif
50
51 #if HAVE_SYS_SELECT_H
52 #include <sys/select.h>
53 #endif
54
55 #if HAVE_TERMIOS_H
56 #include <fcntl.h>
57 #include <sys/ioctl.h>
58 #include <sys/time.h>
59 #include <termios.h>
60 #elif HAVE_CONIO_H
61 #include <conio.h>
62 #endif
63 #undef time //needed because HAVE_AV_CONFIG_H is defined on top
64 #include <time.h>
65
66 #include "cmdutils.h"
67
68 #undef NDEBUG
69 #include <assert.h>
70
71 #undef exit
72
73 const char program_name[] = "FFmpeg";
74 const int program_birth_year = 2000;
75
76 /* select an input stream for an output stream */
77 typedef struct AVStreamMap {
78 int file_index;
79 int stream_index;
80 int sync_file_index;
81 int sync_stream_index;
82 } AVStreamMap;
83
84 /** select an input file for an output file */
85 typedef struct AVMetaDataMap {
86 int out_file;
87 int in_file;
88 } AVMetaDataMap;
89
90 static const OptionDef options[];
91
92 #define MAX_FILES 20
93
94 static const char *last_asked_format = NULL;
95 static AVFormatContext *input_files[MAX_FILES];
96 static int64_t input_files_ts_offset[MAX_FILES];
97 static double input_files_ts_scale[MAX_FILES][MAX_STREAMS];
98 static AVCodec *input_codecs[MAX_FILES*MAX_STREAMS];
99 static int nb_input_files = 0;
100 static int nb_icodecs;
101
102 static AVFormatContext *output_files[MAX_FILES];
103 static AVCodec *output_codecs[MAX_FILES*MAX_STREAMS];
104 static int nb_output_files = 0;
105 static int nb_ocodecs;
106
107 static AVStreamMap stream_maps[MAX_FILES*MAX_STREAMS];
108 static int nb_stream_maps;
109
110 static AVMetaDataMap meta_data_maps[MAX_FILES];
111 static int nb_meta_data_maps;
112
113 static int frame_width = 0;
114 static int frame_height = 0;
115 static float frame_aspect_ratio = 0;
116 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
117 static enum SampleFormat audio_sample_fmt = SAMPLE_FMT_NONE;
118 static int frame_padtop = 0;
119 static int frame_padbottom = 0;
120 static int frame_padleft = 0;
121 static int frame_padright = 0;
122 static int padcolor[3] = {16,128,128}; /* default to black */
123 static int frame_topBand = 0;
124 static int frame_bottomBand = 0;
125 static int frame_leftBand = 0;
126 static int frame_rightBand = 0;
127 static int max_frames[4] = {INT_MAX, INT_MAX, INT_MAX, INT_MAX};
128 static AVRational frame_rate;
129 static float video_qscale = 0;
130 static uint16_t *intra_matrix = NULL;
131 static uint16_t *inter_matrix = NULL;
132 static const char *video_rc_override_string=NULL;
133 static int video_disable = 0;
134 static int video_discard = 0;
135 static char *video_codec_name = NULL;
136 static int video_codec_tag = 0;
137 static char *video_language = NULL;
138 static int same_quality = 0;
139 static int do_deinterlace = 0;
140 static int top_field_first = -1;
141 static int me_threshold = 0;
142 static int intra_dc_precision = 8;
143 static int loop_input = 0;
144 static int loop_output = AVFMT_NOOUTPUTLOOP;
145 static int qp_hist = 0;
146
147 static int intra_only = 0;
148 static int audio_sample_rate = 44100;
149 static int64_t channel_layout = 0;
150 #define QSCALE_NONE -99999
151 static float audio_qscale = QSCALE_NONE;
152 static int audio_disable = 0;
153 static int audio_channels = 1;
154 static char *audio_codec_name = NULL;
155 static int audio_codec_tag = 0;
156 static char *audio_language = NULL;
157
158 static int subtitle_disable = 0;
159 static char *subtitle_codec_name = NULL;
160 static char *subtitle_language = NULL;
161 static int subtitle_codec_tag = 0;
162
163 static float mux_preload= 0.5;
164 static float mux_max_delay= 0.7;
165
166 static int64_t recording_time = INT64_MAX;
167 static int64_t start_time = 0;
168 static int64_t rec_timestamp = 0;
169 static int64_t input_ts_offset = 0;
170 static int file_overwrite = 0;
171 static int metadata_count;
172 static AVMetadataTag *metadata;
173 static int do_benchmark = 0;
174 static int do_hex_dump = 0;
175 static int do_pkt_dump = 0;
176 static int do_psnr = 0;
177 static int do_pass = 0;
178 static char *pass_logfilename_prefix = NULL;
179 static int audio_stream_copy = 0;
180 static int video_stream_copy = 0;
181 static int subtitle_stream_copy = 0;
182 static int video_sync_method= -1;
183 static int audio_sync_method= 0;
184 static float audio_drift_threshold= 0.1;
185 static int copy_ts= 0;
186 static int opt_shortest = 0;
187 static int video_global_header = 0;
188 static char *vstats_filename;
189 static FILE *vstats_file;
190 static int opt_programid = 0;
191 static int copy_initial_nonkeyframes = 0;
192
193 static int rate_emu = 0;
194
195 static int video_channel = 0;
196 static char *video_standard;
197
198 static int audio_volume = 256;
199
200 static int exit_on_error = 0;
201 static int using_stdin = 0;
202 static int verbose = 1;
203 static int thread_count= 1;
204 static int q_pressed = 0;
205 static int64_t video_size = 0;
206 static int64_t audio_size = 0;
207 static int64_t extra_size = 0;
208 static int nb_frames_dup = 0;
209 static int nb_frames_drop = 0;
210 static int input_sync;
211 static uint64_t limit_filesize = 0;
212 static int force_fps = 0;
213
214 static int pgmyuv_compatibility_hack=0;
215 static float dts_delta_threshold = 10;
216
217 static unsigned int sws_flags = SWS_BICUBIC;
218
219 static int64_t timer_start;
220
221 static uint8_t *audio_buf;
222 static uint8_t *audio_out;
223 unsigned int allocated_audio_out_size, allocated_audio_buf_size;
224
225 static short *samples;
226
227 static AVBitStreamFilterContext *video_bitstream_filters=NULL;
228 static AVBitStreamFilterContext *audio_bitstream_filters=NULL;
229 static AVBitStreamFilterContext *subtitle_bitstream_filters=NULL;
230 static AVBitStreamFilterContext *bitstream_filters[MAX_FILES][MAX_STREAMS];
231
232 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
233
234 struct AVInputStream;
235
236 typedef struct AVOutputStream {
237 int file_index; /* file index */
238 int index; /* stream index in the output file */
239 int source_index; /* AVInputStream index */
240 AVStream *st; /* stream in the output file */
241 int encoding_needed; /* true if encoding needed for this stream */
242 int frame_number;
243 /* input pts and corresponding output pts
244 for A/V sync */
245 //double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
246 struct AVInputStream *sync_ist; /* input stream to sync against */
247 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
248 /* video only */
249 int video_resample;
250 AVFrame pict_tmp; /* temporary image for resampling */
251 struct SwsContext *img_resample_ctx; /* for image resampling */
252 int resample_height;
253 int resample_width;
254 int resample_pix_fmt;
255
256 /* full frame size of first frame */
257 int original_height;
258 int original_width;
259
260 /* cropping area sizes */
261 int video_crop;
262 int topBand;
263 int bottomBand;
264 int leftBand;
265 int rightBand;
266
267 /* cropping area of first frame */
268 int original_topBand;
269 int original_bottomBand;
270 int original_leftBand;
271 int original_rightBand;
272
273 /* padding area sizes */
274 int video_pad;
275 int padtop;
276 int padbottom;
277 int padleft;
278 int padright;
279
280 /* audio only */
281 int audio_resample;
282 ReSampleContext *resample; /* for audio resampling */
283 int reformat_pair;
284 AVAudioConvert *reformat_ctx;
285 AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
286 FILE *logfile;
287 } AVOutputStream;
288
289 typedef struct AVInputStream {
290 int file_index;
291 int index;
292 AVStream *st;
293 int discard; /* true if stream data should be discarded */
294 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
295 int64_t sample_index; /* current sample */
296
297 int64_t start; /* time when read started */
298 int64_t next_pts; /* synthetic pts for cases where pkt.pts
299 is not defined */
300 int64_t pts; /* current pts */
301 int is_start; /* is 1 at the start and after a discontinuity */
302 } AVInputStream;
303
304 typedef struct AVInputFile {
305 int eof_reached; /* true if eof reached */
306 int ist_index; /* index of first stream in ist_table */
307 int buffer_size; /* current total buffer size */
308 int nb_streams; /* nb streams we are aware of */
309 } AVInputFile;
310
311 #if HAVE_TERMIOS_H
312
313 /* init terminal so that we can grab keys */
314 static struct termios oldtty;
315 #endif
316
317 static void term_exit(void)
318 {
319 #if HAVE_TERMIOS_H
320 tcsetattr (0, TCSANOW, &oldtty);
321 #endif
322 }
323
324 static volatile int received_sigterm = 0;
325
326 static void
327 sigterm_handler(int sig)
328 {
329 received_sigterm = sig;
330 term_exit();
331 }
332
333 static void term_init(void)
334 {
335 #if HAVE_TERMIOS_H
336 struct termios tty;
337
338 tcgetattr (0, &tty);
339 oldtty = tty;
340
341 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
342 |INLCR|IGNCR|ICRNL|IXON);
343 tty.c_oflag |= OPOST;
344 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
345 tty.c_cflag &= ~(CSIZE|PARENB);
346 tty.c_cflag |= CS8;
347 tty.c_cc[VMIN] = 1;
348 tty.c_cc[VTIME] = 0;
349
350 tcsetattr (0, TCSANOW, &tty);
351 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
352 #endif
353
354 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
355 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
356 #ifdef SIGXCPU
357 signal(SIGXCPU, sigterm_handler);
358 #endif
359
360 /*
361 register a function to be called at normal program termination
362 */
363 atexit(term_exit);
364 #if CONFIG_BEOS_NETSERVER
365 fcntl(0, F_SETFL, fcntl(0, F_GETFL) | O_NONBLOCK);
366 #endif
367 }
368
369 /* read a key without blocking */
370 static int read_key(void)
371 {
372 #if HAVE_TERMIOS_H
373 int n = 1;
374 unsigned char ch;
375 #if !CONFIG_BEOS_NETSERVER
376 struct timeval tv;
377 fd_set rfds;
378
379 FD_ZERO(&rfds);
380 FD_SET(0, &rfds);
381 tv.tv_sec = 0;
382 tv.tv_usec = 0;
383 n = select(1, &rfds, NULL, NULL, &tv);
384 #endif
385 if (n > 0) {
386 n = read(0, &ch, 1);
387 if (n == 1)
388 return ch;
389
390 return n;
391 }
392 #elif HAVE_CONIO_H
393 if(kbhit())
394 return(getch());
395 #endif
396 return -1;
397 }
398
399 static int decode_interrupt_cb(void)
400 {
401 return q_pressed || (q_pressed = read_key() == 'q');
402 }
403
404 static int av_exit(int ret)
405 {
406 int i;
407
408 /* close files */
409 for(i=0;i<nb_output_files;i++) {
410 /* maybe av_close_output_file ??? */
411 AVFormatContext *s = output_files[i];
412 int j;
413 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
414 url_fclose(s->pb);
415 for(j=0;j<s->nb_streams;j++) {
416 av_metadata_free(&s->streams[j]->metadata);
417 av_free(s->streams[j]->codec);
418 av_free(s->streams[j]);
419 }
420 for(j=0;j<s->nb_programs;j++) {
421 av_metadata_free(&s->programs[j]->metadata);
422 }
423 for(j=0;j<s->nb_chapters;j++) {
424 av_metadata_free(&s->chapters[j]->metadata);
425 }
426 av_metadata_free(&s->metadata);
427 av_free(s);
428 }
429 for(i=0;i<nb_input_files;i++)
430 av_close_input_file(input_files[i]);
431
432 av_free(intra_matrix);
433 av_free(inter_matrix);
434
435 if (vstats_file)
436 fclose(vstats_file);
437 av_free(vstats_filename);
438
439 av_free(opt_names);
440
441 av_free(video_codec_name);
442 av_free(audio_codec_name);
443 av_free(subtitle_codec_name);
444
445 av_free(video_standard);
446
447 #if CONFIG_POWERPC_PERF
448 void powerpc_display_perf_report(void);
449 powerpc_display_perf_report();
450 #endif /* CONFIG_POWERPC_PERF */
451
452 for (i=0;i<CODEC_TYPE_NB;i++)
453 av_free(avcodec_opts[i]);
454 av_free(avformat_opts);
455 av_free(sws_opts);
456 av_free(audio_buf);
457 av_free(audio_out);
458 allocated_audio_buf_size= allocated_audio_out_size= 0;
459 av_free(samples);
460
461 if (received_sigterm) {
462 fprintf(stderr,
463 "Received signal %d: terminating.\n",
464 (int) received_sigterm);
465 exit (255);
466 }
467
468 exit(ret); /* not all OS-es handle main() return value */
469 return ret;
470 }
471
472 static int read_ffserver_streams(AVFormatContext *s, const char *filename)
473 {
474 int i, err;
475 AVFormatContext *ic;
476 int nopts = 0;
477
478 err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL);
479 if (err < 0)
480 return err;
481 /* copy stream format */
482 s->nb_streams = ic->nb_streams;
483 for(i=0;i<ic->nb_streams;i++) {
484 AVStream *st;
485
486 // FIXME: a more elegant solution is needed
487 st = av_mallocz(sizeof(AVStream));
488 memcpy(st, ic->streams[i], sizeof(AVStream));
489 st->codec = avcodec_alloc_context();
490 if (!st->codec) {
491 print_error(filename, AVERROR(ENOMEM));
492 av_exit(1);
493 }
494 memcpy(st->codec, ic->streams[i]->codec, sizeof(AVCodecContext));
495 s->streams[i] = st;
496
497 if (st->codec->codec_type == CODEC_TYPE_AUDIO && audio_stream_copy)
498 st->stream_copy = 1;
499 else if (st->codec->codec_type == CODEC_TYPE_VIDEO && video_stream_copy)
500 st->stream_copy = 1;
501
502 if(!st->codec->thread_count)
503 st->codec->thread_count = 1;
504 if(st->codec->thread_count>1)
505 avcodec_thread_init(st->codec, st->codec->thread_count);
506
507 if(st->codec->flags & CODEC_FLAG_BITEXACT)
508 nopts = 1;
509 }
510
511 if (!nopts)
512 s->timestamp = av_gettime();
513
514 av_close_input_file(ic);
515 return 0;
516 }
517
518 static double
519 get_sync_ipts(const AVOutputStream *ost)
520 {
521 const AVInputStream *ist = ost->sync_ist;
522 return (double)(ist->pts - start_time)/AV_TIME_BASE;
523 }
524
525 static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
526 int ret;
527
528 while(bsfc){
529 AVPacket new_pkt= *pkt;
530 int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
531 &new_pkt.data, &new_pkt.size,
532 pkt->data, pkt->size,
533 pkt->flags & PKT_FLAG_KEY);
534 if(a>0){
535 av_free_packet(pkt);
536 new_pkt.destruct= av_destruct_packet;
537 } else if(a<0){
538 fprintf(stderr, "%s failed for stream %d, codec %s",
539 bsfc->filter->name, pkt->stream_index,
540 avctx->codec ? avctx->codec->name : "copy");
541 print_error("", a);
542 if (exit_on_error)
543 av_exit(1);
544 }
545 *pkt= new_pkt;
546
547 bsfc= bsfc->next;
548 }
549
550 ret= av_interleaved_write_frame(s, pkt);
551 if(ret < 0){
552 print_error("av_interleaved_write_frame()", ret);
553 av_exit(1);
554 }
555 }
556
557 #define MAX_AUDIO_PACKET_SIZE (128 * 1024)
558
559 static void do_audio_out(AVFormatContext *s,
560 AVOutputStream *ost,
561 AVInputStream *ist,
562 unsigned char *buf, int size)
563 {
564 uint8_t *buftmp;
565 int64_t audio_out_size, audio_buf_size;
566 int64_t allocated_for_size= size;
567
568 int size_out, frame_bytes, ret;
569 AVCodecContext *enc= ost->st->codec;
570 AVCodecContext *dec= ist->st->codec;
571 int osize= av_get_bits_per_sample_format(enc->sample_fmt)/8;
572 int isize= av_get_bits_per_sample_format(dec->sample_fmt)/8;
573 const int coded_bps = av_get_bits_per_sample(enc->codec->id);
574
575 need_realloc:
576 audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
577 audio_buf_size= (audio_buf_size*enc->sample_rate + dec->sample_rate) / dec->sample_rate;
578 audio_buf_size= audio_buf_size*2 + 10000; //safety factors for the deprecated resampling API
579 audio_buf_size*= osize*enc->channels;
580
581 audio_out_size= FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
582 if(coded_bps > 8*osize)
583 audio_out_size= audio_out_size * coded_bps / (8*osize);
584 audio_out_size += FF_MIN_BUFFER_SIZE;
585
586 if(audio_out_size > INT_MAX || audio_buf_size > INT_MAX){
587 fprintf(stderr, "Buffer sizes too large\n");
588 av_exit(1);
589 }
590
591 av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
592 av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size);
593 if (!audio_buf || !audio_out){
594 fprintf(stderr, "Out of memory in do_audio_out\n");
595 av_exit(1);
596 }
597
598 if (enc->channels != dec->channels)
599 ost->audio_resample = 1;
600
601 if (ost->audio_resample && !ost->resample) {
602 if (dec->sample_fmt != SAMPLE_FMT_S16)
603 fprintf(stderr, "Warning, using s16 intermediate sample format for resampling\n");
604 ost->resample = av_audio_resample_init(enc->channels, dec->channels,
605 enc->sample_rate, dec->sample_rate,
606 enc->sample_fmt, dec->sample_fmt,
607 16, 10, 0, 0.8);
608 if (!ost->resample) {
609 fprintf(stderr, "Can not resample %d channels @ %d Hz to %d channels @ %d Hz\n",
610 dec->channels, dec->sample_rate,
611 enc->channels, enc->sample_rate);
612 av_exit(1);
613 }
614 }
615
616 #define MAKE_SFMT_PAIR(a,b) ((a)+SAMPLE_FMT_NB*(b))
617 if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt &&
618 MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt)!=ost->reformat_pair) {
619 if (ost->reformat_ctx)
620 av_audio_convert_free(ost->reformat_ctx);
621 ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1,
622 dec->sample_fmt, 1, NULL, 0);
623 if (!ost->reformat_ctx) {
624 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
625 avcodec_get_sample_fmt_name(dec->sample_fmt),
626 avcodec_get_sample_fmt_name(enc->sample_fmt));
627 av_exit(1);
628 }
629 ost->reformat_pair=MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
630 }
631
632 if(audio_sync_method){
633 double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
634 - av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2);
635 double idelta= delta*ist->st->codec->sample_rate / enc->sample_rate;
636 int byte_delta= ((int)idelta)*2*ist->st->codec->channels;
637
638 //FIXME resample delay
639 if(fabs(delta) > 50){
640 if(ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate){
641 if(byte_delta < 0){
642 byte_delta= FFMAX(byte_delta, -size);
643 size += byte_delta;
644 buf -= byte_delta;
645 if(verbose > 2)
646 fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
647 if(!size)
648 return;
649 ist->is_start=0;
650 }else{
651 static uint8_t *input_tmp= NULL;
652 input_tmp= av_realloc(input_tmp, byte_delta + size);
653
654 if(byte_delta > allocated_for_size - size){
655 allocated_for_size= byte_delta + (int64_t)size;
656 goto need_realloc;
657 }
658 ist->is_start=0;
659
660 memset(input_tmp, 0, byte_delta);
661 memcpy(input_tmp + byte_delta, buf, size);
662 buf= input_tmp;
663 size += byte_delta;
664 if(verbose > 2)
665 fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
666 }
667 }else if(audio_sync_method>1){
668 int comp= av_clip(delta, -audio_sync_method, audio_sync_method);
669 assert(ost->audio_resample);
670 if(verbose > 2)
671 fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
672 // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
673 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
674 }
675 }
676 }else
677 ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
678 - av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2); //FIXME wrong
679
680 if (ost->audio_resample) {
681 buftmp = audio_buf;
682 size_out = audio_resample(ost->resample,
683 (short *)buftmp, (short *)buf,
684 size / (ist->st->codec->channels * isize));
685 size_out = size_out * enc->channels * osize;
686 } else {
687 buftmp = buf;
688 size_out = size;
689 }
690
691 if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt) {
692 const void *ibuf[6]= {buftmp};
693 void *obuf[6]= {audio_buf};
694 int istride[6]= {isize};
695 int ostride[6]= {osize};
696 int len= size_out/istride[0];
697 if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
698 printf("av_audio_convert() failed\n");
699 if (exit_on_error)
700 av_exit(1);
701 return;
702 }
703 buftmp = audio_buf;
704 size_out = len*osize;
705 }
706
707 /* now encode as many frames as possible */
708 if (enc->frame_size > 1) {
709 /* output resampled raw samples */
710 if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
711 fprintf(stderr, "av_fifo_realloc2() failed\n");
712 av_exit(1);
713 }
714 av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
715
716 frame_bytes = enc->frame_size * osize * enc->channels;
717
718 while (av_fifo_size(ost->fifo) >= frame_bytes) {
719 AVPacket pkt;
720 av_init_packet(&pkt);
721
722 av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
723
724 //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
725
726 ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
727 (short *)audio_buf);
728 if (ret < 0) {
729 fprintf(stderr, "Audio encoding failed\n");
730 av_exit(1);
731 }
732 audio_size += ret;
733 pkt.stream_index= ost->index;
734 pkt.data= audio_out;
735 pkt.size= ret;
736 if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
737 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
738 pkt.flags |= PKT_FLAG_KEY;
739 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
740
741 ost->sync_opts += enc->frame_size;
742 }
743 } else {
744 AVPacket pkt;
745 av_init_packet(&pkt);
746
747 ost->sync_opts += size_out / (osize * enc->channels);
748
749 /* output a pcm frame */
750 /* determine the size of the coded buffer */
751 size_out /= osize;
752 if (coded_bps)
753 size_out = size_out*coded_bps/8;
754
755 if(size_out > audio_out_size){
756 fprintf(stderr, "Internal error, buffer size too small\n");
757 av_exit(1);
758 }
759
760 //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
761 ret = avcodec_encode_audio(enc, audio_out, size_out,
762 (short *)buftmp);
763 if (ret < 0) {
764 fprintf(stderr, "Audio encoding failed\n");
765 av_exit(1);
766 }
767 audio_size += ret;
768 pkt.stream_index= ost->index;
769 pkt.data= audio_out;
770 pkt.size= ret;
771 if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
772 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
773 pkt.flags |= PKT_FLAG_KEY;
774 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
775 }
776 }
777
778 static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void **bufp)
779 {
780 AVCodecContext *dec;
781 AVPicture *picture2;
782 AVPicture picture_tmp;
783 uint8_t *buf = 0;
784
785 dec = ist->st->codec;
786
787 /* deinterlace : must be done before any resize */
788 if (do_deinterlace) {
789 int size;
790
791 /* create temporary picture */
792 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
793 buf = av_malloc(size);
794 if (!buf)
795 return;
796
797 picture2 = &picture_tmp;
798 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
799
800 if(avpicture_deinterlace(picture2, picture,
801 dec->pix_fmt, dec->width, dec->height) < 0) {
802 /* if error, do not deinterlace */
803 fprintf(stderr, "Deinterlacing failed\n");
804 av_free(buf);
805 buf = NULL;
806 picture2 = picture;
807 }
808 } else {
809 picture2 = picture;
810 }
811
812 if (picture != picture2)
813 *picture = *picture2;
814 *bufp = buf;
815 }
816
817 /* we begin to correct av delay at this threshold */
818 #define AV_DELAY_MAX 0.100
819
820 static void do_subtitle_out(AVFormatContext *s,
821 AVOutputStream *ost,
822 AVInputStream *ist,
823 AVSubtitle *sub,
824 int64_t pts)
825 {
826 static uint8_t *subtitle_out = NULL;
827 int subtitle_out_max_size = 1024 * 1024;
828 int subtitle_out_size, nb, i;
829 AVCodecContext *enc;
830 AVPacket pkt;
831
832 if (pts == AV_NOPTS_VALUE) {
833 fprintf(stderr, "Subtitle packets must have a pts\n");
834 if (exit_on_error)
835 av_exit(1);
836 return;
837 }
838
839 enc = ost->st->codec;
840
841 if (!subtitle_out) {
842 subtitle_out = av_malloc(subtitle_out_max_size);
843 }
844
845 /* Note: DVB subtitle need one packet to draw them and one other
846 packet to clear them */
847 /* XXX: signal it in the codec context ? */
848 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
849 nb = 2;
850 else
851 nb = 1;
852
853 for(i = 0; i < nb; i++) {
854 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
855 // start_display_time is required to be 0
856 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){1, 1000}, AV_TIME_BASE_Q);
857 sub->end_display_time -= sub->start_display_time;
858 sub->start_display_time = 0;
859 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
860 subtitle_out_max_size, sub);
861 if (subtitle_out_size < 0) {
862 fprintf(stderr, "Subtitle encoding failed\n");
863 av_exit(1);
864 }
865
866 av_init_packet(&pkt);
867 pkt.stream_index = ost->index;
868 pkt.data = subtitle_out;
869 pkt.size = subtitle_out_size;
870 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
871 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
872 /* XXX: the pts correction is handled here. Maybe handling
873 it in the codec would be better */
874 if (i == 0)
875 pkt.pts += 90 * sub->start_display_time;
876 else
877 pkt.pts += 90 * sub->end_display_time;
878 }
879 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
880 }
881 }
882
883 static int bit_buffer_size= 1024*256;
884 static uint8_t *bit_buffer= NULL;
885
886 static void do_video_out(AVFormatContext *s,
887 AVOutputStream *ost,
888 AVInputStream *ist,
889 AVFrame *in_picture,
890 int *frame_size)
891 {
892 int nb_frames, i, ret;
893 int64_t topBand, bottomBand, leftBand, rightBand;
894 AVFrame *final_picture, *formatted_picture, *resampling_dst, *padding_src;
895 AVFrame picture_crop_temp, picture_pad_temp;
896 AVCodecContext *enc, *dec;
897
898 avcodec_get_frame_defaults(&picture_crop_temp);
899 avcodec_get_frame_defaults(&picture_pad_temp);
900
901 enc = ost->st->codec;
902 dec = ist->st->codec;
903
904 /* by default, we output a single frame */
905 nb_frames = 1;
906
907 *frame_size = 0;
908
909 if(video_sync_method){
910 double vdelta;
911 vdelta = get_sync_ipts(ost) / av_q2d(enc->time_base) - ost->sync_opts;
912 //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
913 if (vdelta < -1.1)
914 nb_frames = 0;
915 else if (video_sync_method == 2 || (video_sync_method<0 && (s->oformat->flags & AVFMT_VARIABLE_FPS))){
916 if(vdelta<=-0.6){
917 nb_frames=0;
918 }else if(vdelta>0.6)
919 ost->sync_opts= lrintf(get_sync_ipts(ost) / av_q2d(enc->time_base));
920 }else if (vdelta > 1.1)
921 nb_frames = lrintf(vdelta);
922 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames);
923 if (nb_frames == 0){
924 ++nb_frames_drop;
925 if (verbose>2)
926 fprintf(stderr, "*** drop!\n");
927 }else if (nb_frames > 1) {
928 nb_frames_dup += nb_frames - 1;
929 if (verbose>2)
930 fprintf(stderr, "*** %d dup!\n", nb_frames-1);
931 }
932 }else
933 ost->sync_opts= lrintf(get_sync_ipts(ost) / av_q2d(enc->time_base));
934
935 nb_frames= FFMIN(nb_frames, max_frames[CODEC_TYPE_VIDEO] - ost->frame_number);
936 if (nb_frames <= 0)
937 return;
938
939 if (ost->video_crop) {
940 if (av_picture_crop((AVPicture *)&picture_crop_temp, (AVPicture *)in_picture, dec->pix_fmt, ost->topBand, ost->leftBand) < 0) {
941 fprintf(stderr, "error cropping picture\n");
942 if (exit_on_error)
943 av_exit(1);
944 return;
945 }
946 formatted_picture = &picture_crop_temp;
947 } else {
948 formatted_picture = in_picture;
949 }
950
951 final_picture = formatted_picture;
952 padding_src = formatted_picture;
953 resampling_dst = &ost->pict_tmp;
954 if (ost->video_pad) {
955 final_picture = &ost->pict_tmp;
956 if (ost->video_resample) {
957 if (av_picture_crop((AVPicture *)&picture_pad_temp, (AVPicture *)final_picture, enc->pix_fmt, ost->padtop, ost->padleft) < 0) {
958 fprintf(stderr, "error padding picture\n");
959 if (exit_on_error)
960 av_exit(1);
961 return;
962 }
963 resampling_dst = &picture_pad_temp;
964 }
965 }
966
967 if( (ost->resample_height != (ist->st->codec->height - (ost->topBand + ost->bottomBand)))
968 || (ost->resample_width != (ist->st->codec->width - (ost->leftBand + ost->rightBand)))
969 || (ost->resample_pix_fmt!= ist->st->codec->pix_fmt) ) {
970
971 fprintf(stderr,"Input Stream #%d.%d frame size changed to %dx%d, %s\n", ist->file_index, ist->index, ist->st->codec->width, ist->st->codec->height,avcodec_get_pix_fmt_name(ist->st->codec->pix_fmt));
972 if(!ost->video_resample)
973 av_exit(1);
974 }
975
976 if (ost->video_resample) {
977 padding_src = NULL;
978 final_picture = &ost->pict_tmp;
979 if( (ost->resample_height != (ist->st->codec->height - (ost->topBand + ost->bottomBand)))
980 || (ost->resample_width != (ist->st->codec->width - (ost->leftBand + ost->rightBand)))
981 || (ost->resample_pix_fmt!= ist->st->codec->pix_fmt) ) {
982
983 /* keep bands proportional to the frame size */
984 topBand = ((int64_t)ist->st->codec->height * ost->original_topBand / ost->original_height) & ~1;
985 bottomBand = ((int64_t)ist->st->codec->height * ost->original_bottomBand / ost->original_height) & ~1;
986 leftBand = ((int64_t)ist->st->codec->width * ost->original_leftBand / ost->original_width) & ~1;
987 rightBand = ((int64_t)ist->st->codec->width * ost->original_rightBand / ost->original_width) & ~1;
988
989 /* sanity check to ensure no bad band sizes sneak in */
990 assert(topBand <= INT_MAX && topBand >= 0);
991 assert(bottomBand <= INT_MAX && bottomBand >= 0);
992 assert(leftBand <= INT_MAX && leftBand >= 0);
993 assert(rightBand <= INT_MAX && rightBand >= 0);
994
995 ost->topBand = topBand;
996 ost->bottomBand = bottomBand;
997 ost->leftBand = leftBand;
998 ost->rightBand = rightBand;
999
1000 ost->resample_height = ist->st->codec->height - (ost->topBand + ost->bottomBand);
1001 ost->resample_width = ist->st->codec->width - (ost->leftBand + ost->rightBand);
1002 ost->resample_pix_fmt= ist->st->codec->pix_fmt;
1003
1004 /* initialize a new scaler context */
1005 sws_freeContext(ost->img_resample_ctx);
1006 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1007 ost->img_resample_ctx = sws_getContext(
1008 ist->st->codec->width - (ost->leftBand + ost->rightBand),
1009 ist->st->codec->height - (ost->topBand + ost->bottomBand),
1010 ist->st->codec->pix_fmt,
1011 ost->st->codec->width - (ost->padleft + ost->padright),
1012 ost->st->codec->height - (ost->padtop + ost->padbottom),
1013 ost->st->codec->pix_fmt,
1014 sws_flags, NULL, NULL, NULL);
1015 if (ost->img_resample_ctx == NULL) {
1016 fprintf(stderr, "Cannot get resampling context\n");
1017 av_exit(1);
1018 }
1019 }
1020 sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize,
1021 0, ost->resample_height, resampling_dst->data, resampling_dst->linesize);
1022 }
1023
1024 if (ost->video_pad) {
1025 av_picture_pad((AVPicture*)final_picture, (AVPicture *)padding_src,
1026 enc->height, enc->width, enc->pix_fmt,
1027 ost->padtop, ost->padbottom, ost->padleft, ost->padright, padcolor);
1028 }
1029
1030 /* duplicates frame if needed */
1031 for(i=0;i<nb_frames;i++) {
1032 AVPacket pkt;
1033 av_init_packet(&pkt);
1034 pkt.stream_index= ost->index;
1035
1036 if (s->oformat->flags & AVFMT_RAWPICTURE) {
1037 /* raw pictures are written as AVPicture structure to
1038 avoid any copies. We support temorarily the older
1039 method. */
1040 AVFrame* old_frame = enc->coded_frame;
1041 enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
1042 pkt.data= (uint8_t *)final_picture;
1043 pkt.size= sizeof(AVPicture);
1044 pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1045 pkt.flags |= PKT_FLAG_KEY;
1046
1047 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
1048 enc->coded_frame = old_frame;
1049 } else {
1050 AVFrame big_picture;
1051
1052 big_picture= *final_picture;
1053 /* better than nothing: use input picture interlaced
1054 settings */
1055 big_picture.interlaced_frame = in_picture->interlaced_frame;
1056 if(avcodec_opts[CODEC_TYPE_VIDEO]->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)){
1057 if(top_field_first == -1)
1058 big_picture.top_field_first = in_picture->top_field_first;
1059 else
1060 big_picture.top_field_first = top_field_first;
1061 }
1062
1063 /* handles sameq here. This is not correct because it may
1064 not be a global option */
1065 if (same_quality) {
1066 big_picture.quality = ist->st->quality;
1067 }else
1068 big_picture.quality = ost->st->quality;
1069 if(!me_threshold)
1070 big_picture.pict_type = 0;
1071 // big_picture.pts = AV_NOPTS_VALUE;
1072 big_picture.pts= ost->sync_opts;
1073 // big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
1074 //av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
1075 ret = avcodec_encode_video(enc,
1076 bit_buffer, bit_buffer_size,
1077 &big_picture);
1078 if (ret < 0) {
1079 fprintf(stderr, "Video encoding failed\n");
1080 av_exit(1);
1081 }
1082
1083 if(ret>0){
1084 pkt.data= bit_buffer;
1085 pkt.size= ret;
1086 if(enc->coded_frame->pts != AV_NOPTS_VALUE)
1087 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1088 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
1089 pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
1090 pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
1091
1092 if(enc->coded_frame->key_frame)
1093 pkt.flags |= PKT_FLAG_KEY;
1094 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
1095 *frame_size = ret;
1096 video_size += ret;
1097 //fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
1098 // enc->frame_number-1, ret, enc->pict_type);
1099 /* if two pass, output log */
1100 if (ost->logfile && enc->stats_out) {
1101 fprintf(ost->logfile, "%s", enc->stats_out);
1102 }
1103 }
1104 }
1105 ost->sync_opts++;
1106 ost->frame_number++;
1107 }
1108 }
1109
1110 static double psnr(double d){
1111 return -10.0*log(d)/log(10.0);
1112 }
1113
1114 static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
1115 int frame_size)
1116 {
1117 AVCodecContext *enc;
1118 int frame_number;
1119 double ti1, bitrate, avg_bitrate;
1120
1121 /* this is executed just the first time do_video_stats is called */
1122 if (!vstats_file) {
1123 vstats_file = fopen(vstats_filename, "w");
1124 if (!vstats_file) {
1125 perror("fopen");
1126 av_exit(1);
1127 }
1128 }
1129
1130 enc = ost->st->codec;
1131 if (enc->codec_type == CODEC_TYPE_VIDEO) {
1132 frame_number = ost->frame_number;
1133 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1134 if (enc->flags&CODEC_FLAG_PSNR)
1135 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
1136
1137 fprintf(vstats_file,"f_size= %6d ", frame_size);
1138 /* compute pts value */
1139 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1140 if (ti1 < 0.01)
1141 ti1 = 0.01;
1142
1143 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1144 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1145 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1146 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1147 fprintf(vstats_file,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
1148 }
1149 }
1150
1151 static void print_report(AVFormatContext **output_files,
1152 AVOutputStream **ost_table, int nb_ostreams,
1153 int is_last_report)
1154 {
1155 char buf[1024];
1156 AVOutputStream *ost;
1157 AVFormatContext *oc;
1158 int64_t total_size;
1159 AVCodecContext *enc;
1160 int frame_number, vid, i;
1161 double bitrate, ti1, pts;
1162 static int64_t last_time = -1;
1163 static int qp_histogram[52];
1164
1165 if (!is_last_report) {
1166 int64_t cur_time;
1167 /* display the report every 0.5 seconds */
1168 cur_time = av_gettime();
1169 if (last_time == -1) {
1170 last_time = cur_time;
1171 return;
1172 }
1173 if ((cur_time - last_time) < 500000)
1174 return;
1175 last_time = cur_time;
1176 }
1177
1178
1179 oc = output_files[0];
1180
1181 total_size = url_fsize(oc->pb);
1182 if(total_size<0) // FIXME improve url_fsize() so it works with non seekable output too
1183 total_size= url_ftell(oc->pb);
1184
1185 buf[0] = '\0';
1186 ti1 = 1e10;
1187 vid = 0;
1188 for(i=0;i<nb_ostreams;i++) {
1189 ost = ost_table[i];
1190 enc = ost->st->codec;
1191 if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1192 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ",
1193 !ost->st->stream_copy ?
1194 enc->coded_frame->quality/(float)FF_QP2LAMBDA : -1);
1195 }
1196 if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1197 float t = (av_gettime()-timer_start) / 1000000.0;
1198
1199 frame_number = ost->frame_number;
1200 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1201 frame_number, (t>1)?(int)(frame_number/t+0.5) : 0,
1202 !ost->st->stream_copy ?
1203 enc->coded_frame->quality/(float)FF_QP2LAMBDA : -1);
1204 if(is_last_report)
1205 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1206 if(qp_hist){
1207 int j;
1208 int qp= lrintf(enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1209 if(qp>=0 && qp<FF_ARRAY_ELEMS(qp_histogram))
1210 qp_histogram[qp]++;
1211 for(j=0; j<32; j++)
1212 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j]+1)/log(2)));
1213 }
1214 if (enc->flags&CODEC_FLAG_PSNR){
1215 int j;
1216 double error, error_sum=0;
1217 double scale, scale_sum=0;
1218 char type[3]= {'Y','U','V'};
1219 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1220 for(j=0; j<3; j++){
1221 if(is_last_report){
1222 error= enc->error[j];
1223 scale= enc->width*enc->height*255.0*255.0*frame_number;
1224 }else{
1225 error= enc->coded_frame->error[j];
1226 scale= enc->width*enc->height*255.0*255.0;
1227 }
1228 if(j) scale/=4;
1229 error_sum += error;
1230 scale_sum += scale;
1231 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
1232 }
1233 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
1234 }
1235 vid = 1;
1236 }
1237 /* compute min output value */
1238 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1239 if ((pts < ti1) && (pts > 0))
1240 ti1 = pts;
1241 }
1242 if (ti1 < 0.01)
1243 ti1 = 0.01;
1244
1245 if (verbose || is_last_report) {
1246 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1247
1248 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1249 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1250 (double)total_size / 1024, ti1, bitrate);
1251
1252 if (nb_frames_dup || nb_frames_drop)
1253 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1254 nb_frames_dup, nb_frames_drop);
1255
1256 if (verbose >= 0)
1257 fprintf(stderr, "%s \r", buf);
1258
1259 fflush(stderr);
1260 }
1261
1262 if (is_last_report && verbose >= 0){
1263 int64_t raw= audio_size + video_size + extra_size;
1264 fprintf(stderr, "\n");
1265 fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1266 video_size/1024.0,
1267 audio_size/1024.0,
1268 extra_size/1024.0,
1269 100.0*(total_size - raw)/raw
1270 );
1271 }
1272 }
1273
1274 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1275 static int output_packet(AVInputStream *ist, int ist_index,
1276 AVOutputStream **ost_table, int nb_ostreams,
1277 const AVPacket *pkt)
1278 {
1279 AVFormatContext *os;
1280 AVOutputStream *ost;
1281 int ret, i;
1282 int got_picture;
1283 AVFrame picture;
1284 void *buffer_to_free;
1285 static unsigned int samples_size= 0;
1286 AVSubtitle subtitle, *subtitle_to_free;
1287 int got_subtitle;
1288 AVPacket avpkt;
1289 int bps = av_get_bits_per_sample_format(ist->st->codec->sample_fmt)>>3;
1290
1291 if(ist->next_pts == AV_NOPTS_VALUE)
1292 ist->next_pts= ist->pts;
1293
1294 if (pkt == NULL) {
1295 /* EOF handling */
1296 av_init_packet(&avpkt);
1297 avpkt.data = NULL;
1298 avpkt.size = 0;
1299 goto handle_eof;
1300 } else {
1301 avpkt = *pkt;
1302 }
1303
1304 if(pkt->dts != AV_NOPTS_VALUE)
1305 ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1306
1307 //while we have more to decode or while the decoder did output something on EOF
1308 while (avpkt.size > 0 || (!pkt && ist->next_pts != ist->pts)) {
1309 uint8_t *data_buf, *decoded_data_buf;
1310 int data_size, decoded_data_size;
1311 handle_eof:
1312 ist->pts= ist->next_pts;
1313
1314 if(avpkt.size && avpkt.size != pkt->size &&
1315 !(ist->st->codec->codec->capabilities & CODEC_CAP_SUBFRAMES) && verbose>0)
1316 fprintf(stderr, "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1317
1318 /* decode the packet if needed */
1319 decoded_data_buf = NULL; /* fail safe */
1320 decoded_data_size= 0;
1321 data_buf = avpkt.data;
1322 data_size = avpkt.size;
1323 subtitle_to_free = NULL;
1324 if (ist->decoding_needed) {
1325 switch(ist->st->codec->codec_type) {
1326 case CODEC_TYPE_AUDIO:{
1327 if(pkt && samples_size < FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
1328 samples_size = FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE);
1329 av_free(samples);
1330 samples= av_malloc(samples_size);
1331 }
1332 decoded_data_size= samples_size;
1333 /* XXX: could avoid copy if PCM 16 bits with same
1334 endianness as CPU */
1335 ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
1336 &avpkt);
1337 if (ret < 0)
1338 goto fail_decode;
1339 avpkt.data += ret;
1340 avpkt.size -= ret;
1341 data_size = ret;
1342 /* Some bug in mpeg audio decoder gives */
1343 /* decoded_data_size < 0, it seems they are overflows */
1344 if (decoded_data_size <= 0) {
1345 /* no audio frame */
1346 continue;
1347 }
1348 decoded_data_buf = (uint8_t *)samples;
1349 ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
1350 (ist->st->codec->sample_rate * ist->st->codec->channels);
1351 break;}
1352 case CODEC_TYPE_VIDEO:
1353 decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
1354 /* XXX: allocate picture correctly */
1355 avcodec_get_frame_defaults(&picture);
1356
1357 ret = avcodec_decode_video2(ist->st->codec,
1358 &picture, &got_picture, &avpkt);
1359 ist->st->quality= picture.quality;
1360 if (ret < 0)
1361 goto fail_decode;
1362 if (!got_picture) {
1363 /* no picture yet */
1364 goto discard_packet;
1365 }
1366 if (ist->st->codec->time_base.num != 0) {
1367 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
1368 ist->next_pts += ((int64_t)AV_TIME_BASE *
1369 ist->st->codec->time_base.num * ticks) /
1370 ist->st->codec->time_base.den;
1371 }
1372 avpkt.size = 0;
1373 break;
1374 case CODEC_TYPE_SUBTITLE:
1375 ret = avcodec_decode_subtitle2(ist->st->codec,
1376 &subtitle, &got_subtitle, &avpkt);
1377 if (ret < 0)
1378 goto fail_decode;
1379 if (!got_subtitle) {
1380 goto discard_packet;
1381 }
1382 subtitle_to_free = &subtitle;
1383 avpkt.size = 0;
1384 break;
1385 default:
1386 goto fail_decode;
1387 }
1388 } else {
1389 switch(ist->st->codec->codec_type) {
1390 case CODEC_TYPE_AUDIO:
1391 ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1392 ist->st->codec->sample_rate;
1393 break;
1394 case CODEC_TYPE_VIDEO:
1395 if (ist->st->codec->time_base.num != 0) {
1396 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
1397 ist->next_pts += ((int64_t)AV_TIME_BASE *
1398 ist->st->codec->time_base.num * ticks) /
1399 ist->st->codec->time_base.den;
1400 }
1401 break;
1402 }
1403 ret = avpkt.size;
1404 avpkt.size = 0;
1405 }
1406
1407 buffer_to_free = NULL;
1408 if (ist->st->codec->codec_type == CODEC_TYPE_VIDEO) {
1409 pre_process_video_frame(ist, (AVPicture *)&picture,
1410 &buffer_to_free);
1411 }
1412
1413 // preprocess audio (volume)
1414 if (ist->st->codec->codec_type == CODEC_TYPE_AUDIO) {
1415 if (audio_volume != 256) {
1416 short *volp;
1417 volp = samples;
1418 for(i=0;i<(decoded_data_size / sizeof(short));i++) {
1419 int v = ((*volp) * audio_volume + 128) >> 8;
1420 if (v < -32768) v = -32768;
1421 if (v > 32767) v = 32767;
1422 *volp++ = v;
1423 }
1424 }
1425 }
1426
1427 /* frame rate emulation */
1428 if (rate_emu) {
1429 int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
1430 int64_t now = av_gettime() - ist->start;
1431 if (pts > now)
1432 usleep(pts - now);
1433 }
1434
1435 /* if output time reached then transcode raw format,
1436 encode packets and output them */
1437 if (start_time == 0 || ist->pts >= start_time)
1438 for(i=0;i<nb_ostreams;i++) {
1439 int frame_size;
1440
1441 ost = ost_table[i];
1442 if (ost->source_index == ist_index) {
1443 os = output_files[ost->file_index];
1444
1445 /* set the input output pts pairs */
1446 //ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/ AV_TIME_BASE;
1447
1448 if (ost->encoding_needed) {
1449 assert(ist->decoding_needed);
1450 switch(ost->st->codec->codec_type) {
1451 case CODEC_TYPE_AUDIO:
1452 do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
1453 break;
1454 case CODEC_TYPE_VIDEO:
1455 do_video_out(os, ost, ist, &picture, &frame_size);
1456 if (vstats_filename && frame_size)
1457 do_video_stats(os, ost, frame_size);
1458 break;
1459 case CODEC_TYPE_SUBTITLE:
1460 do_subtitle_out(os, ost, ist, &subtitle,
1461 pkt->pts);
1462 break;
1463 default:
1464 abort();
1465 }
1466 } else {
1467 AVFrame avframe; //FIXME/XXX remove this
1468 AVPacket opkt;
1469 int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1470
1471 av_init_packet(&opkt);
1472
1473 if ((!ost->frame_number && !(pkt->flags & PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
1474 continue;
1475
1476 /* no reencoding needed : output the packet directly */
1477 /* force the input stream PTS */
1478
1479 avcodec_get_frame_defaults(&avframe);
1480 ost->st->codec->coded_frame= &avframe;
1481 avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
1482
1483 if(ost->st->codec->codec_type == CODEC_TYPE_AUDIO)
1484 audio_size += data_size;
1485 else if (ost->st->codec->codec_type == CODEC_TYPE_VIDEO) {
1486 video_size += data_size;
1487 ost->sync_opts++;
1488 }
1489
1490 opkt.stream_index= ost->index;
1491 if(pkt->pts != AV_NOPTS_VALUE)
1492 opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1493 else
1494 opkt.pts= AV_NOPTS_VALUE;
1495
1496 if (pkt->dts == AV_NOPTS_VALUE)
1497 opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
1498 else
1499 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1500 opkt.dts -= ost_tb_start_time;
1501
1502 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1503 opkt.flags= pkt->flags;
1504
1505 //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1506 if(ost->st->codec->codec_id != CODEC_ID_H264) {
1507 if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & PKT_FLAG_KEY))
1508 opkt.destruct= av_destruct_packet;
1509 } else {
1510 opkt.data = data_buf;
1511 opkt.size = data_size;
1512 }
1513
1514 write_frame(os, &opkt, ost->st->codec, bitstream_filters[ost->file_index][opkt.stream_index]);
1515 ost->st->codec->frame_number++;
1516 ost->frame_number++;
1517 av_free_packet(&opkt);
1518 }
1519 }
1520 }
1521 av_free(buffer_to_free);
1522 /* XXX: allocate the subtitles in the codec ? */
1523 if (subtitle_to_free) {
1524 if (subtitle_to_free->rects != NULL) {
1525 for (i = 0; i < subtitle_to_free->num_rects; i++) {
1526 av_freep(&subtitle_to_free->rects[i]->pict.data[0]);
1527 av_freep(&subtitle_to_free->rects[i]->pict.data[1]);
1528 av_freep(&subtitle_to_free->rects[i]);
1529 }
1530 av_freep(&subtitle_to_free->rects);
1531 }
1532 subtitle_to_free->num_rects = 0;
1533 subtitle_to_free = NULL;
1534 }
1535 }
1536 discard_packet:
1537 if (pkt == NULL) {
1538 /* EOF handling */
1539
1540 for(i=0;i<nb_ostreams;i++) {
1541 ost = ost_table[i];
1542 if (ost->source_index == ist_index) {
1543 AVCodecContext *enc= ost->st->codec;
1544 os = output_files[ost->file_index];
1545
1546 if(ost->st->codec->codec_type == CODEC_TYPE_AUDIO && enc->frame_size <=1)
1547 continue;
1548 if(ost->st->codec->codec_type == CODEC_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
1549 continue;
1550
1551 if (ost->encoding_needed) {
1552 for(;;) {
1553 AVPacket pkt;
1554 int fifo_bytes;
1555 av_init_packet(&pkt);
1556 pkt.stream_index= ost->index;
1557
1558 switch(ost->st->codec->codec_type) {
1559 case CODEC_TYPE_AUDIO:
1560 fifo_bytes = av_fifo_size(ost->fifo);
1561 ret = 0;
1562 /* encode any samples remaining in fifo */
1563 if (fifo_bytes > 0) {
1564 int osize = av_get_bits_per_sample_format(enc->sample_fmt) >> 3;
1565 int fs_tmp = enc->frame_size;
1566
1567 av_fifo_generic_read(ost->fifo, samples, fifo_bytes, NULL);
1568 if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
1569 enc->frame_size = fifo_bytes / (osize * enc->channels);
1570 } else { /* pad */
1571 int frame_bytes = enc->frame_size*osize*enc->channels;
1572 if (samples_size < frame_bytes)
1573 av_exit(1);
1574 memset((uint8_t*)samples+fifo_bytes, 0, frame_bytes - fifo_bytes);
1575 }
1576
1577 ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, samples);
1578 pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
1579 ost->st->time_base.num, enc->sample_rate);
1580 enc->frame_size = fs_tmp;
1581 }
1582 if(ret <= 0) {
1583 ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
1584 }
1585 if (ret < 0) {
1586 fprintf(stderr, "Audio encoding failed\n");
1587 av_exit(1);
1588 }
1589 audio_size += ret;
1590 pkt.flags |= PKT_FLAG_KEY;
1591 break;
1592 case CODEC_TYPE_VIDEO:
1593 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
1594 if (ret < 0) {
1595 fprintf(stderr, "Video encoding failed\n");
1596 av_exit(1);
1597 }
1598 video_size += ret;
1599 if(enc->coded_frame && enc->coded_frame->key_frame)
1600 pkt.flags |= PKT_FLAG_KEY;
1601 if (ost->logfile && enc->stats_out) {
1602 fprintf(ost->logfile, "%s", enc->stats_out);
1603 }
1604 break;
1605 default:
1606 ret=-1;
1607 }
1608
1609 if(ret<=0)
1610 break;
1611 pkt.data= bit_buffer;
1612 pkt.size= ret;
1613 if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1614 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1615 write_frame(os, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
1616 }
1617 }
1618 }
1619 }
1620 }
1621
1622 return 0;
1623 fail_decode:
1624 return -1;
1625 }
1626
1627 static void print_sdp(AVFormatContext **avc, int n)
1628 {
1629 char sdp[2048];
1630
1631 avf_sdp_create(avc, n, sdp, sizeof(sdp));
1632 printf("SDP:\n%s\n", sdp);
1633 fflush(stdout);
1634 }
1635
1636 static int stream_index_from_inputs(AVFormatContext **input_files,
1637 int nb_input_files,
1638 AVInputFile *file_table,
1639 AVInputStream **ist_table,
1640 enum CodecType type,
1641 int programid)
1642 {
1643 int p, q, z;
1644 for(z=0; z<nb_input_files; z++) {
1645 AVFormatContext *ic = input_files[z];
1646 for(p=0; p<ic->nb_programs; p++) {
1647 AVProgram *program = ic->programs[p];
1648 if(program->id != programid)
1649 continue;
1650 for(q=0; q<program->nb_stream_indexes; q++) {
1651 int sidx = program->stream_index[q];
1652 int ris = file_table[z].ist_index + sidx;
1653 if(ist_table[ris]->discard && ic->streams[sidx]->codec->codec_type == type)
1654 return ris;
1655 }
1656 }
1657 }
1658
1659 return -1;
1660 }
1661
1662 /*
1663 * The following code is the main loop of the file converter
1664 */
1665 static int av_encode(AVFormatContext **output_files,
1666 int nb_output_files,
1667 AVFormatContext **input_files,
1668 int nb_input_files,
1669 AVStreamMap *stream_maps, int nb_stream_maps)
1670 {
1671 int ret = 0, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
1672 AVFormatContext *is, *os;
1673 AVCodecContext *codec, *icodec;
1674 AVOutputStream *ost, **ost_table = NULL;
1675 AVInputStream *ist, **ist_table = NULL;
1676 AVInputFile *file_table;
1677 char error[1024];
1678 int key;
1679 int want_sdp = 1;
1680 uint8_t no_packet[MAX_FILES]={0};
1681 int no_packet_count=0;
1682
1683 file_table= av_mallocz(nb_input_files * sizeof(AVInputFile));
1684 if (!file_table)
1685 goto fail;
1686
1687 /* input stream init */
1688 j = 0;
1689 for(i=0;i<nb_input_files;i++) {
1690 is = input_files[i];
1691 file_table[i].ist_index = j;
1692 file_table[i].nb_streams = is->nb_streams;
1693 j += is->nb_streams;
1694 }
1695 nb_istreams = j;
1696
1697 ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
1698 if (!ist_table)
1699 goto fail;
1700
1701 for(i=0;i<nb_istreams;i++) {
1702 ist = av_mallocz(sizeof(AVInputStream));
1703 if (!ist)
1704 goto fail;
1705 ist_table[i] = ist;
1706 }
1707 j = 0;
1708 for(i=0;i<nb_input_files;i++) {
1709 is = input_files[i];
1710 for(k=0;k<is->nb_streams;k++) {
1711 ist = ist_table[j++];
1712 ist->st = is->streams[k];
1713 ist->file_index = i;
1714 ist->index = k;
1715 ist->discard = 1; /* the stream is discarded by default
1716 (changed later) */
1717
1718 if (rate_emu) {
1719 ist->start = av_gettime();
1720 }
1721 }
1722 }
1723
1724 /* output stream init */
1725 nb_ostreams = 0;
1726 for(i=0;i<nb_output_files;i++) {
1727 os = output_files[i];
1728 if (!os->nb_streams) {
1729 dump_format(output_files[i], i, output_files[i]->filename, 1);
1730 fprintf(stderr, "Output file #%d does not contain any stream\n", i);
1731 av_exit(1);
1732 }
1733 nb_ostreams += os->nb_streams;
1734 }
1735 if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
1736 fprintf(stderr, "Number of stream maps must match number of output streams\n");
1737 av_exit(1);
1738 }
1739
1740 /* Sanity check the mapping args -- do the input files & streams exist? */
1741 for(i=0;i<nb_stream_maps;i++) {
1742 int fi = stream_maps[i].file_index;
1743 int si = stream_maps[i].stream_index;
1744
1745 if (fi < 0 || fi > nb_input_files - 1 ||
1746 si < 0 || si > file_table[fi].nb_streams - 1) {
1747 fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
1748 av_exit(1);
1749 }
1750 fi = stream_maps[i].sync_file_index;
1751 si = stream_maps[i].sync_stream_index;
1752 if (fi < 0 || fi > nb_input_files - 1 ||
1753 si < 0 || si > file_table[fi].nb_streams - 1) {
1754 fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si);
1755 av_exit(1);
1756 }
1757 }
1758
1759 ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
1760 if (!ost_table)
1761 goto fail;
1762 for(i=0;i<nb_ostreams;i++) {
1763 ost = av_mallocz(sizeof(AVOutputStream));
1764 if (!ost)
1765 goto fail;
1766 ost_table[i] = ost;
1767 }
1768
1769 n = 0;
1770 for(k=0;k<nb_output_files;k++) {
1771 os = output_files[k];
1772 for(i=0;i<os->nb_streams;i++,n++) {
1773 int found;
1774 ost = ost_table[n];
1775 ost->file_index = k;
1776 ost->index = i;
1777 ost->st = os->streams[i];
1778 if (nb_stream_maps > 0) {
1779 ost->source_index = file_table[stream_maps[n].file_index].ist_index +
1780 stream_maps[n].stream_index;
1781
1782 /* Sanity check that the stream types match */
1783 if (ist_table[ost->source_index]->st->codec->codec_type != ost->st->codec->codec_type) {
1784 int i= ost->file_index;
1785 dump_format(output_files[i], i, output_files[i]->filename, 1);
1786 fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
1787 stream_maps[n].file_index, stream_maps[n].stream_index,
1788 ost->file_index, ost->index);
1789 av_exit(1);
1790 }
1791
1792 } else {
1793 if(opt_programid) {
1794 found = 0;
1795 j = stream_index_from_inputs(input_files, nb_input_files, file_table, ist_table, ost->st->codec->codec_type, opt_programid);
1796 if(j != -1) {
1797 ost->source_index = j;
1798 found = 1;
1799 }
1800 } else {
1801 /* get corresponding input stream index : we select the first one with the right type */
1802 found = 0;
1803 for(j=0;j<nb_istreams;j++) {
1804 ist = ist_table[j];
1805 if (ist->discard &&
1806 ist->st->codec->codec_type == ost->st->codec->codec_type) {
1807 ost->source_index = j;
1808 found = 1;
1809 break;
1810 }
1811 }
1812 }
1813
1814 if (!found) {
1815 if(! opt_programid) {
1816 /* try again and reuse existing stream */
1817 for(j=0;j<nb_istreams;j++) {
1818 ist = ist_table[j];
1819 if (ist->st->codec->codec_type == ost->st->codec->codec_type) {
1820 ost->source_index = j;
1821 found = 1;
1822 }
1823 }
1824 }
1825 if (!found) {
1826 int i= ost->file_index;
1827 dump_format(output_files[i], i, output_files[i]->filename, 1);
1828 fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
1829 ost->file_index, ost->index);
1830 av_exit(1);
1831 }
1832 }
1833 }
1834 ist = ist_table[ost->source_index];
1835 ist->discard = 0;
1836 ost->sync_ist = (nb_stream_maps > 0) ?
1837 ist_table[file_table[stream_maps[n].sync_file_index].ist_index +
1838 stream_maps[n].sync_stream_index] : ist;
1839 }
1840 }
1841
1842 /* for each output stream, we compute the right encoding parameters */
1843 for(i=0;i<nb_ostreams;i++) {
1844 AVMetadataTag *lang;
1845 ost = ost_table[i];
1846 os = output_files[ost->file_index];
1847 ist = ist_table[ost->source_index];
1848
1849 codec = ost->st->codec;
1850 icodec = ist->st->codec;
1851
1852 if ((lang=av_metadata_get(ist->st->metadata, "language", NULL, 0))
1853 && !av_metadata_get(ost->st->metadata, "language", NULL, 0))
1854 av_metadata_set(&ost->st->metadata, "language", lang->value);
1855
1856 ost->st->disposition = ist->st->disposition;
1857 codec->bits_per_raw_sample= icodec->bits_per_raw_sample;
1858 codec->chroma_sample_location = icodec->chroma_sample_location;
1859
1860 if (ost->st->stream_copy) {
1861 /* if stream_copy is selected, no need to decode or encode */
1862 codec->codec_id = icodec->codec_id;
1863 codec->codec_type = icodec->codec_type;
1864
1865 if(!codec->codec_tag){
1866 if( !os->oformat->codec_tag
1867 || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id
1868 || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0)
1869 codec->codec_tag = icodec->codec_tag;
1870 }
1871
1872 codec->bit_rate = icodec->bit_rate;
1873 codec->extradata= icodec->extradata;
1874 codec->extradata_size= icodec->extradata_size;
1875 if(av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/1000){
1876 codec->time_base = icodec->time_base;
1877 codec->time_base.num *= icodec->ticks_per_frame;
1878 }else
1879 codec->time_base = ist->st->time_base;
1880 switch(codec->codec_type) {
1881 case CODEC_TYPE_AUDIO:
1882 if(audio_volume != 256) {
1883 fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\n");
1884 av_exit(1);
1885 }
1886 codec->channel_layout = icodec->channel_layout;
1887 codec->sample_rate = icodec->sample_rate;
1888 codec->channels = icodec->channels;
1889 codec->frame_size = icodec->frame_size;
1890 codec->block_align= icodec->block_align;
1891 if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3)
1892 codec->block_align= 0;
1893 if(codec->codec_id == CODEC_ID_AC3)
1894 codec->block_align= 0;
1895 break;
1896 case CODEC_TYPE_VIDEO:
1897 codec->pix_fmt = icodec->pix_fmt;
1898 codec->width = icodec->width;
1899 codec->height = icodec->height;
1900 codec->has_b_frames = icodec->has_b_frames;
1901 break;
1902 case CODEC_TYPE_SUBTITLE:
1903 codec->width = icodec->width;
1904 codec->height = icodec->height;
1905 break;
1906 default:
1907 abort();
1908 }
1909 } else {
1910 switch(codec->codec_type) {
1911 case CODEC_TYPE_AUDIO:
1912 ost->fifo= av_fifo_alloc(1024);
1913 if(!ost->fifo)
1914 goto fail;
1915 ost->reformat_pair = MAKE_SFMT_PAIR(SAMPLE_FMT_NONE,SAMPLE_FMT_NONE);
1916 ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1;
1917 icodec->request_channels = codec->channels;
1918 ist->decoding_needed = 1;
1919 ost->encoding_needed = 1;
1920 break;
1921 case CODEC_TYPE_VIDEO:
1922 if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
1923 fprintf(stderr, "Video pixel format is unknown, stream cannot be decoded\n");
1924 av_exit(1);
1925 }
1926 ost->video_crop = ((frame_leftBand + frame_rightBand + frame_topBand + frame_bottomBand) != 0);
1927 ost->video_pad = ((frame_padleft + frame_padright + frame_padtop + frame_padbottom) != 0);
1928 ost->video_resample = ((codec->width != icodec->width -
1929 (frame_leftBand + frame_rightBand) +
1930 (frame_padleft + frame_padright)) ||
1931 (codec->height != icodec->height -
1932 (frame_topBand + frame_bottomBand) +
1933 (frame_padtop + frame_padbottom)) ||
1934 (codec->pix_fmt != icodec->pix_fmt));
1935 if (ost->video_crop) {
1936 ost->topBand = ost->original_topBand = frame_topBand;
1937 ost->bottomBand = ost->original_bottomBand = frame_bottomBand;
1938 ost->leftBand = ost->original_leftBand = frame_leftBand;
1939 ost->rightBand = ost->original_rightBand = frame_rightBand;
1940 }
1941 if (ost->video_pad) {
1942 ost->padtop = frame_padtop;
1943 ost->padleft = frame_padleft;
1944 ost->padbottom = frame_padbottom;
1945 ost->padright = frame_padright;
1946 if (!ost->video_resample) {
1947 avcodec_get_frame_defaults(&ost->pict_tmp);
1948 if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
1949 codec->width, codec->height))
1950 goto fail;
1951 }
1952 }
1953 if (ost->video_resample) {
1954 avcodec_get_frame_defaults(&ost->pict_tmp);
1955 if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
1956 codec->width, codec->height)) {
1957 fprintf(stderr, "Cannot allocate temp picture, check pix fmt\n");
1958 av_exit(1);
1959 }
1960 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1961 ost->img_resample_ctx = sws_getContext(
1962 icodec->width - (frame_leftBand + frame_rightBand),
1963 icodec->height - (frame_topBand + frame_bottomBand),
1964 icodec->pix_fmt,
1965 codec->width - (frame_padleft + frame_padright),
1966 codec->height - (frame_padtop + frame_padbottom),
1967 codec->pix_fmt,
1968 sws_flags, NULL, NULL, NULL);
1969 if (ost->img_resample_ctx == NULL) {
1970 fprintf(stderr, "Cannot get resampling context\n");
1971 av_exit(1);
1972 }
1973
1974 ost->original_height = icodec->height;
1975 ost->original_width = icodec->width;
1976
1977 codec->bits_per_raw_sample= 0;
1978 }
1979 ost->resample_height = icodec->height - (frame_topBand + frame_bottomBand);
1980 ost->resample_width = icodec->width - (frame_leftBand + frame_rightBand);
1981 ost->resample_pix_fmt= icodec->pix_fmt;
1982 ost->encoding_needed = 1;
1983 ist->decoding_needed = 1;
1984 break;
1985 case CODEC_TYPE_SUBTITLE:
1986 ost->encoding_needed = 1;
1987 ist->decoding_needed = 1;
1988 break;
1989 default:
1990 abort();
1991 break;
1992 }
1993 /* two pass mode */
1994 if (ost->encoding_needed &&
1995 (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1996 char logfilename[1024];
1997 FILE *f;
1998 int size;
1999 char *logbuffer;
2000
2001 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2002 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2003 i);
2004 if (codec->flags & CODEC_FLAG_PASS1) {
2005 f = fopen(logfilename, "w");
2006 if (!f) {
2007 fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno));
2008 av_exit(1);
2009 }
2010 ost->logfile = f;
2011 } else {
2012 /* read the log file */
2013 f = fopen(logfilename, "r");
2014 if (!f) {
2015 fprintf(stderr, "Cannot read log file '%s' for pass-2 encoding: %s\n", logfilename, strerror(errno));
2016 av_exit(1);
2017 }
2018 fseek(f, 0, SEEK_END);
2019 size = ftell(f);
2020 fseek(f, 0, SEEK_SET);
2021 logbuffer = av_malloc(size + 1);
2022 if (!logbuffer) {
2023 fprintf(stderr, "Could not allocate log buffer\n");
2024 av_exit(1);
2025 }
2026 size = fread(logbuffer, 1, size, f);
2027 fclose(f);
2028 logbuffer[size] = '\0';
2029 codec->stats_in = logbuffer;
2030 }
2031 }
2032 }
2033 if(codec->codec_type == CODEC_TYPE_VIDEO){
2034 int size= codec->width * codec->height;
2035 bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 200);
2036 }
2037 }
2038
2039 if (!bit_buffer)
2040 bit_buffer = av_malloc(bit_buffer_size);
2041 if (!bit_buffer) {
2042 fprintf(stderr, "Cannot allocate %d bytes output buffer\n",
2043 bit_buffer_size);
2044 ret = AVERROR(ENOMEM);
2045 goto fail;
2046 }
2047
2048 /* open each encoder */
2049 for(i=0;i<nb_ostreams;i++) {
2050 ost = ost_table[i];
2051 if (ost->encoding_needed) {
2052 AVCodec *codec = output_codecs[i];
2053 if (!codec)
2054 codec = avcodec_find_encoder(ost->st->codec->codec_id);
2055 if (!codec) {
2056 snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d",
2057 ost->st->codec->codec_id, ost->file_index, ost->index);
2058 ret = AVERROR(EINVAL);
2059 goto dump_format;
2060 }
2061 if (avcodec_open(ost->st->codec, codec) < 0) {
2062 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2063 ost->file_index, ost->index);
2064 ret = AVERROR(EINVAL);
2065 goto dump_format;
2066 }
2067 extra_size += ost->st->codec->extradata_size;
2068 }
2069 }
2070
2071 /* open each decoder */
2072 for(i=0;i<nb_istreams;i++) {
2073 ist = ist_table[i];
2074 if (ist->decoding_needed) {
2075 AVCodec *codec = input_codecs[i];
2076 if (!codec)
2077 codec = avcodec_find_decoder(ist->st->codec->codec_id);
2078 if (!codec) {
2079 snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d",
2080 ist->st->codec->codec_id, ist->file_index, ist->index);
2081 ret = AVERROR(EINVAL);
2082 goto dump_format;
2083 }
2084 if (avcodec_open(ist->st->codec, codec) < 0) {
2085 snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d",
2086 ist->file_index, ist->index);
2087 ret = AVERROR(EINVAL);
2088 goto dump_format;
2089 }
2090 //if (ist->st->codec->codec_type == CODEC_TYPE_VIDEO)
2091 // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD;
2092 }
2093 }
2094
2095 /* init pts */
2096 for(i=0;i<nb_istreams;i++) {
2097 ist = ist_table[i];
2098 ist->pts = 0;
2099 ist->next_pts = AV_NOPTS_VALUE;
2100 ist->is_start = 1;
2101 }
2102
2103 /* set meta data information from input file if required */
2104 for (i=0;i<nb_meta_data_maps;i++) {
2105 AVFormatContext *out_file;
2106 AVFormatContext *in_file;
2107 AVMetadataTag *mtag;
2108
2109 int out_file_index = meta_data_maps[i].out_file;
2110 int in_file_index = meta_data_maps[i].in_file;
2111 if (out_file_index < 0 || out_file_index >= nb_output_files) {
2112 snprintf(error, sizeof(error), "Invalid output file index %d map_meta_data(%d,%d)",
2113 out_file_index, out_file_index, in_file_index);
2114 ret = AVERROR(EINVAL);
2115 goto dump_format;
2116 }
2117 if (in_file_index < 0 || in_file_index >= nb_input_files) {
2118 snprintf(error, sizeof(error), "Invalid input file index %d map_meta_data(%d,%d)",
2119 in_file_index, out_file_index, in_file_index);
2120 ret = AVERROR(EINVAL);
2121 goto dump_format;
2122 }
2123
2124 out_file = output_files[out_file_index];
2125 in_file = input_files[in_file_index];
2126
2127
2128 mtag=NULL;
2129 while((mtag=av_metadata_get(in_file->metadata, "", mtag, AV_METADATA_IGNORE_SUFFIX)))
2130 av_metadata_set(&out_file->metadata, mtag->key, mtag->value);
2131 av_metadata_conv(out_file, out_file->oformat->metadata_conv,
2132 in_file->iformat->metadata_conv);
2133 }
2134
2135 /* open files and write file headers */
2136 for(i=0;i<nb_output_files;i++) {
2137 os = output_files[i];
2138 if (av_write_header(os) < 0) {
2139 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2140 ret = AVERROR(EINVAL);
2141 goto dump_format;
2142 }
2143 if (strcmp(output_files[i]->oformat->name, "rtp")) {
2144 want_sdp = 0;
2145 }
2146 }
2147
2148 dump_format:
2149 /* dump the file output parameters - cannot be done before in case
2150 of stream copy */
2151 for(i=0;i<nb_output_files;i++) {
2152 dump_format(output_files[i], i, output_files[i]->filename, 1);
2153 }
2154
2155 /* dump the stream mapping */
2156 if (verbose >= 0) {
2157 fprintf(stderr, "Stream mapping:\n");
2158 for(i=0;i<nb_ostreams;i++) {
2159 ost = ost_table[i];
2160 fprintf(stderr, " Stream #%d.%d -> #%d.%d",
2161 ist_table[ost->source_index]->file_index,
2162 ist_table[ost->source_index]->index,
2163 ost->file_index,
2164 ost->index);
2165 if (ost->sync_ist != ist_table[ost->source_index])
2166 fprintf(stderr, " [sync #%d.%d]",
2167 ost->sync_ist->file_index,
2168 ost->sync_ist->index);
2169 fprintf(stderr, "\n");
2170 }
2171 }
2172
2173 if (ret) {
2174 fprintf(stderr, "%s\n", error);
2175 goto fail;
2176 }
2177
2178 if (want_sdp) {
2179 print_sdp(output_files, nb_output_files);
2180 }
2181
2182 if (!using_stdin && verbose >= 0) {
2183 fprintf(stderr, "Press [q] to stop encoding\n");
2184 url_set_interrupt_cb(decode_interrupt_cb);
2185 }
2186 term_init();
2187
2188 timer_start = av_gettime();
2189
2190 for(; received_sigterm == 0;) {
2191 int file_index, ist_index;
2192 AVPacket pkt;
2193 double ipts_min;
2194 double opts_min;
2195
2196 redo:
2197 ipts_min= 1e100;
2198 opts_min= 1e100;
2199 /* if 'q' pressed, exits */
2200 if (!using_stdin) {
2201 if (q_pressed)
2202 break;
2203 /* read_key() returns 0 on EOF */
2204 key = read_key();
2205 if (key == 'q')
2206 break;
2207 }
2208
2209 /* select the stream that we must read now by looking at the
2210 smallest output pts */
2211 file_index = -1;
2212 for(i=0;i<nb_ostreams;i++) {
2213 double ipts, opts;
2214 ost = ost_table[i];
2215 os = output_files[ost->file_index];
2216 ist = ist_table[ost->source_index];
2217 if(no_packet[ist->file_index])
2218 continue;
2219 if(ost->st->codec->codec_type == CODEC_TYPE_VIDEO)
2220 opts = ost->sync_opts * av_q2d(ost->st->codec->time_base);
2221 else
2222 opts = ost->st->pts.val * av_q2d(ost->st->time_base);
2223 ipts = (double)ist->pts;
2224 if (!file_table[ist->file_index].eof_reached){
2225 if(ipts < ipts_min) {
2226 ipts_min = ipts;
2227 if(input_sync ) file_index = ist->file_index;
2228 }
2229 if(opts < opts_min) {
2230 opts_min = opts;
2231 if(!input_sync) file_index = ist->file_index;
2232 }
2233 }
2234 if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){
2235 file_index= -1;
2236 break;
2237 }
2238 }
2239 /* if none, if is finished */
2240 if (file_index < 0) {
2241 if(no_packet_count){
2242 no_packet_count=0;
2243 memset(no_packet, 0, sizeof(no_packet));
2244 usleep(10000);
2245 continue;
2246 }
2247 break;
2248 }
2249
2250 /* finish if recording time exhausted */
2251 if (opts_min >= (recording_time / 1000000.0))
2252 break;
2253
2254 /* finish if limit size exhausted */
2255 if (limit_filesize != 0 && limit_filesize < url_ftell(output_files[0]->pb))
2256 break;
2257
2258 /* read a frame from it and output it in the fifo */
2259 is = input_files[file_index];
2260 ret= av_read_frame(is, &pkt);
2261 if(ret == AVERROR(EAGAIN)){
2262 no_packet[file_index]=1;
2263 no_packet_count++;
2264 continue;
2265 }
2266 if (ret < 0) {
2267 file_table[file_index].eof_reached = 1;
2268 if (opt_shortest)
2269 break;
2270 else
2271 continue;
2272 }
2273
2274 no_packet_count=0;
2275 memset(no_packet, 0, sizeof(no_packet));
2276
2277 if (do_pkt_dump) {
2278 av_pkt_dump_log(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump);
2279 }
2280 /* the following test is needed in case new streams appear
2281 dynamically in stream : we ignore them */
2282 if (pkt.stream_index >= file_table[file_index].nb_streams)
2283 goto discard_packet;
2284 ist_index = file_table[file_index].ist_index + pkt.stream_index;
2285 ist = ist_table[ist_index];
2286 if (ist->discard)
2287 goto discard_packet;
2288
2289 if (pkt.dts != AV_NOPTS_VALUE)
2290 pkt.dts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base);
2291 if (pkt.pts != AV_NOPTS_VALUE)
2292 pkt.pts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base);
2293
2294 if(input_files_ts_scale[file_index][pkt.stream_index]){
2295 if(pkt.pts != AV_NOPTS_VALUE)
2296 pkt.pts *= input_files_ts_scale[file_index][pkt.stream_index];
2297 if(pkt.dts != AV_NOPTS_VALUE)
2298 pkt.dts *= input_files_ts_scale[file_index][pkt.stream_index];
2299 }
2300
2301 // fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec->codec_type);
2302 if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
2303 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2304 int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2305 int64_t delta= pkt_dts - ist->next_pts;
2306 if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){
2307 input_files_ts_offset[ist->file_index]-= delta;
2308 if (verbose > 2)
2309 fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files_ts_offset[ist->file_index]);
2310 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2311 if(pkt.pts != AV_NOPTS_VALUE)
2312 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2313 }
2314 }
2315
2316 //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
2317 if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
2318
2319 if (verbose >= 0)
2320 fprintf(stderr, "Error while decoding stream #%d.%d\n",
2321 ist->file_index, ist->index);
2322 if (exit_on_error)
2323 av_exit(1);
2324 av_free_packet(&pkt);
2325 goto redo;
2326 }
2327
2328 discard_packet:
2329 av_free_packet(&pkt);
2330
2331 /* dump report by using the output first video and audio streams */
2332 print_report(output_files, ost_table, nb_ostreams, 0);
2333 }
2334
2335 /* at the end of stream, we must flush the decoder buffers */
2336 for(i=0;i<nb_istreams;i++) {
2337 ist = ist_table[i];
2338 if (ist->decoding_needed) {
2339 output_packet(ist, i, ost_table, nb_ostreams, NULL);
2340 }
2341 }
2342
2343 term_exit();
2344
2345 /* write the trailer if needed and close file */
2346 for(i=0;i<nb_output_files;i++) {
2347 os = output_files[i];
2348 av_write_trailer(os);
2349 }
2350
2351 /* dump report by using the first video and audio streams */
2352 print_report(output_files, ost_table, nb_ostreams, 1);
2353
2354 /* close each encoder */
2355 for(i=0;i<nb_ostreams;i++) {
2356 ost = ost_table[i];
2357 if (ost->encoding_needed) {
2358 av_freep(&ost->st->codec->stats_in);
2359 avcodec_close(ost->st->codec);
2360 }
2361 }
2362
2363 /* close each decoder */
2364 for(i=0;i<nb_istreams;i++) {
2365 ist = ist_table[i];
2366 if (ist->decoding_needed) {
2367 avcodec_close(ist->st->codec);
2368 }
2369 }
2370
2371 /* finished ! */
2372 ret = 0;
2373
2374 fail:
2375 av_freep(&bit_buffer);
2376 av_free(file_table);
2377
2378 if (ist_table) {
2379 for(i=0;i<nb_istreams;i++) {
2380 ist = ist_table[i];
2381 av_free(ist);
2382 }
2383 av_free(ist_table);
2384 }
2385 if (ost_table) {
2386 for(i=0;i<nb_ostreams;i++) {
2387 ost = ost_table[i];
2388 if (ost) {
2389 if (ost->logfile) {
2390 fclose(ost->logfile);
2391 ost->logfile = NULL;
2392 }
2393 av_fifo_free(ost->fifo); /* works even if fifo is not
2394 initialized but set to zero */
2395 av_free(ost->pict_tmp.data[0]);
2396 if (ost->video_resample)
2397 sws_freeContext(ost->img_resample_ctx);
2398 if (ost->resample)
2399 audio_resample_close(ost->resample);
2400 if (ost->reformat_ctx)
2401 av_audio_convert_free(ost->reformat_ctx);
2402 av_free(ost);
2403 }
2404 }
2405 av_free(ost_table);
2406 }
2407 return ret;
2408 }
2409
2410 static void opt_format(const char *arg)
2411 {
2412 /* compatibility stuff for pgmyuv */
2413 if (!strcmp(arg, "pgmyuv")) {
2414 pgmyuv_compatibility_hack=1;
2415 // opt_image_format(arg);
2416 arg = "image2";
2417 fprintf(stderr, "pgmyuv format is deprecated, use image2\n");
2418 }
2419
2420 last_asked_format = arg;
2421 }
2422
2423 static void opt_video_rc_override_string(const char *arg)
2424 {
2425 video_rc_override_string = arg;
2426 }
2427
2428 static int opt_me_threshold(const char *opt, const char *arg)
2429 {
2430 me_threshold = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2431 return 0;
2432 }
2433
2434 static int opt_verbose(const char *opt, const char *arg)
2435 {
2436 verbose = parse_number_or_die(opt, arg, OPT_INT64, -10, 10);
2437 return 0;
2438 }
2439
2440 static int opt_frame_rate(const char *opt, const char *arg)
2441 {
2442 if (av_parse_video_frame_rate(&frame_rate, arg) < 0) {
2443 fprintf(stderr, "Incorrect value for %s: %s\n", opt, arg);
2444 av_exit(1);
2445 }
2446 return 0;
2447 }
2448
2449 static int opt_bitrate(const char *opt, const char *arg)
2450 {
2451 int codec_type = opt[0]=='a' ? CODEC_TYPE_AUDIO : CODEC_TYPE_VIDEO;
2452
2453 opt_default(opt, arg);
2454
2455 if (av_get_int(avcodec_opts[codec_type], "b", NULL) < 1000)
2456 fprintf(stderr, "WARNING: The bitrate parameter is set too low. It takes bits/s as argument, not kbits/s\n");
2457
2458 return 0;
2459 }
2460
2461 static void opt_frame_crop_top(const char *arg)
2462 {
2463 frame_topBand = atoi(arg);
2464 if (frame_topBand < 0) {
2465 fprintf(stderr, "Incorrect top crop size\n");
2466 av_exit(1);
2467 }
2468 if ((frame_topBand) >= frame_height){
2469 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2470 av_exit(1);
2471 }
2472 frame_height -= frame_topBand;
2473 }
2474
2475 static void opt_frame_crop_bottom(const char *arg)
2476 {
2477 frame_bottomBand = atoi(arg);
2478 if (frame_bottomBand < 0) {
2479 fprintf(stderr, "Incorrect bottom crop size\n");
2480 av_exit(1);
2481 }
2482 if ((frame_bottomBand) >= frame_height){
2483 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2484 av_exit(1);
2485 }
2486 frame_height -= frame_bottomBand;
2487 }
2488
2489 static void opt_frame_crop_left(const char *arg)
2490 {
2491 frame_leftBand = atoi(arg);
2492 if (frame_leftBand < 0) {
2493 fprintf(stderr, "Incorrect left crop size\n");
2494 av_exit(1);
2495 }
2496 if ((frame_leftBand) >= frame_width){
2497 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2498 av_exit(1);
2499 }
2500 frame_width -= frame_leftBand;
2501 }
2502
2503 static void opt_frame_crop_right(const char *arg)
2504 {
2505 frame_rightBand = atoi(arg);
2506 if (frame_rightBand < 0) {
2507 fprintf(stderr, "Incorrect right crop size\n");
2508 av_exit(1);
2509 }
2510 if ((frame_rightBand) >= frame_width){
2511 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2512 av_exit(1);
2513 }
2514 frame_width -= frame_rightBand;
2515 }
2516
2517 static void opt_frame_size(const char *arg)
2518 {
2519 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2520 fprintf(stderr, "Incorrect frame size\n");
2521 av_exit(1);
2522 }
2523 }
2524
2525 static void opt_pad_color(const char *arg) {
2526 /* Input is expected to be six hex digits similar to
2527 how colors are expressed in html tags (but without the #) */
2528 int rgb = strtol(arg, NULL, 16);
2529 int r,g,b;
2530
2531 r = (rgb >> 16);
2532 g = ((rgb >> 8) & 255);
2533 b = (rgb & 255);
2534
2535 padcolor[0] = RGB_TO_Y(r,g,b);
2536 padcolor[1] = RGB_TO_U(r,g,b,0);
2537 padcolor[2] = RGB_TO_V(r,g,b,0);
2538 }
2539
2540 static void opt_frame_pad_top(const char *arg)
2541 {
2542 frame_padtop = atoi(arg);
2543 if (frame_padtop < 0) {
2544 fprintf(stderr, "Incorrect top pad size\n");
2545 av_exit(1);
2546 }
2547 }
2548
2549 static void opt_frame_pad_bottom(const char *arg)
2550 {
2551 frame_padbottom = atoi(arg);
2552 if (frame_padbottom < 0) {
2553 fprintf(stderr, "Incorrect bottom pad size\n");
2554 av_exit(1);
2555 }
2556 }
2557
2558
2559 static void opt_frame_pad_left(const char *arg)
2560 {
2561 frame_padleft = atoi(arg);
2562 if (frame_padleft < 0) {
2563 fprintf(stderr, "Incorrect left pad size\n");
2564 av_exit(1);
2565 }
2566 }
2567
2568
2569 static void opt_frame_pad_right(const char *arg)
2570 {
2571 frame_padright = atoi(arg);
2572 if (frame_padright < 0) {
2573 fprintf(stderr, "Incorrect right pad size\n");
2574 av_exit(1);
2575 }
2576 }
2577
2578 static void opt_frame_pix_fmt(const char *arg)
2579 {
2580 if (strcmp(arg, "list")) {
2581 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2582 if (frame_pix_fmt == PIX_FMT_NONE) {
2583 fprintf(stderr, "Unknown pixel format requested: %s\n", arg);
2584 av_exit(1);
2585 }
2586 } else {
2587 list_fmts(avcodec_pix_fmt_string, PIX_FMT_NB);
2588 av_exit(0);
2589 }
2590 }
2591
2592 static void opt_frame_aspect_ratio(const char *arg)
2593 {
2594 int x = 0, y = 0;
2595 double ar = 0;
2596 const char *p;
2597 char *end;
2598
2599 p = strchr(arg, ':');
2600 if (p) {
2601 x = strtol(arg, &end, 10);
2602 if (end == p)
2603 y = strtol(end+1, &end, 10);
2604 if (x > 0 && y > 0)
2605 ar = (double)x / (double)y;
2606 } else
2607 ar = strtod(arg, NULL);
2608
2609 if (!ar) {
2610 fprintf(stderr, "Incorrect aspect ratio specification.\n");
2611 av_exit(1);
2612 }
2613 frame_aspect_ratio = ar;
2614 }
2615
2616 static int opt_metadata(const char *opt, const char *arg)
2617 {
2618 char *mid= strchr(arg, '=');
2619
2620 if(!mid){
2621 fprintf(stderr, "Missing =\n");
2622 av_exit(1);
2623 }
2624 *mid++= 0;
2625
2626 metadata_count++;
2627 metadata= av_realloc(metadata, sizeof(*metadata)*metadata_count);
2628 metadata[metadata_count-1].key = av_strdup(arg);
2629 metadata[metadata_count-1].value= av_strdup(mid);
2630
2631 return 0;
2632 }
2633
2634 static void opt_qscale(const char *arg)
2635 {
2636 video_qscale = atof(arg);
2637 if (video_qscale <= 0 ||
2638 video_qscale > 255) {
2639 fprintf(stderr, "qscale must be > 0.0 and <= 255\n");
2640 av_exit(1);
2641 }
2642 }
2643
2644 static void opt_top_field_first(const char *arg)
2645 {
2646 top_field_first= atoi(arg);
2647 }
2648
2649 static int opt_thread_count(const char *opt, const char *arg)
2650 {
2651 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2652 #if !HAVE_THREADS
2653 if (verbose >= 0)
2654 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2655 #endif
2656 return 0;
2657 }
2658
2659 static void opt_audio_sample_fmt(const char *arg)
2660 {
2661 if (strcmp(arg, "list"))
2662 audio_sample_fmt = avcodec_get_sample_fmt(arg);
2663 else {
2664 list_fmts(avcodec_sample_fmt_string, SAMPLE_FMT_NB);
2665 av_exit(0);
2666 }
2667 }
2668
2669 static int opt_audio_rate(const char *opt, const char *arg)
2670 {
2671 audio_sample_rate = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2672 return 0;
2673 }
2674
2675 static int opt_audio_channels(const char *opt, const char *arg)
2676 {
2677 audio_channels = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2678 return 0;
2679 }
2680
2681 static void opt_video_channel(const char *arg)
2682 {
2683 video_channel = strtol(arg, NULL, 0);
2684 }
2685
2686 static void opt_video_standard(const char *arg)
2687 {
2688 video_standard = av_strdup(arg);
2689 }
2690
2691 static void opt_codec(int *pstream_copy, char **pcodec_name,
2692 int codec_type, const char *arg)
2693 {
2694 av_freep(pcodec_name);
2695 if (!strcmp(arg, "copy")) {
2696 *pstream_copy = 1;
2697 } else {
2698 *pcodec_name = av_strdup(arg);
2699 }
2700 }
2701
2702 static void opt_audio_codec(const char *arg)
2703 {
2704 opt_codec(&audio_stream_copy, &audio_codec_name, CODEC_TYPE_AUDIO, arg);
2705 }
2706
2707 static void opt_audio_tag(const char *arg)
2708 {
2709 char *tail;
2710 audio_codec_tag= strtol(arg, &tail, 0);
2711
2712 if(!tail || *tail)
2713 audio_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2714 }
2715
2716 static void opt_video_tag(const char *arg)
2717 {
2718 char *tail;
2719 video_codec_tag= strtol(arg, &tail, 0);
2720
2721 if(!tail || *tail)
2722 video_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2723 }
2724
2725 static void opt_video_codec(const char *arg)
2726 {
2727 opt_codec(&video_stream_copy, &video_codec_name, CODEC_TYPE_VIDEO, arg);
2728 }
2729
2730 static void opt_subtitle_codec(const char *arg)
2731 {
2732 opt_codec(&subtitle_stream_copy, &subtitle_codec_name, CODEC_TYPE_SUBTITLE, arg);
2733 }
2734
2735 static void opt_subtitle_tag(const char *arg)
2736 {
2737 char *tail;
2738 subtitle_codec_tag= strtol(arg, &tail, 0);
2739
2740 if(!tail || *tail)
2741 subtitle_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2742 }
2743
2744 static void opt_map(const char *arg)
2745 {
2746 AVStreamMap *m;
2747 char *p;
2748
2749 m = &stream_maps[nb_stream_maps++];
2750
2751 m->file_index = strtol(arg, &p, 0);
2752 if (*p)
2753 p++;
2754
2755 m->stream_index = strtol(p, &p, 0);
2756 if (*p) {
2757 p++;
2758 m->sync_file_index = strtol(p, &p, 0);
2759 if (*p)
2760 p++;
2761 m->sync_stream_index = strtol(p, &p, 0);
2762 } else {
2763 m->sync_file_index = m->file_index;
2764 m->sync_stream_index = m->stream_index;
2765 }
2766 }
2767
2768 static void opt_map_meta_data(const char *arg)
2769 {
2770 AVMetaDataMap *m;
2771 char *p;
2772
2773 m = &meta_data_maps[nb_meta_data_maps++];
2774
2775 m->out_file = strtol(arg, &p, 0);
2776 if (*p)
2777 p++;
2778
2779 m->in_file = strtol(p, &p, 0);
2780 }
2781
2782 static void opt_input_ts_scale(const char *arg)
2783 {
2784 unsigned int stream;
2785 double scale;
2786 char *p;
2787
2788 stream = strtol(arg, &p, 0);
2789 if (*p)
2790 p++;
2791 scale= strtod(p, &p);
2792
2793 if(stream >= MAX_STREAMS)
2794 av_exit(1);
2795
2796 input_files_ts_scale[nb_input_files][stream]= scale;
2797 }
2798
2799 static int opt_recording_time(const char *opt, const char *arg)
2800 {
2801 recording_time = parse_time_or_die(opt, arg, 1);
2802 return 0;
2803 }
2804
2805 static int opt_start_time(const char *opt, const char *arg)
2806 {
2807 start_time = parse_time_or_die(opt, arg, 1);
2808 return 0;
2809 }
2810
2811 static int opt_rec_timestamp(const char *opt, const char *arg)
2812 {
2813 rec_timestamp = parse_time_or_die(opt, arg, 0) / 1000000;
2814 return 0;
2815 }
2816
2817 static int opt_input_ts_offset(const char *opt, const char *arg)
2818 {
2819 input_ts_offset = parse_time_or_die(opt, arg, 1);
2820 return 0;
2821 }
2822
2823 static enum CodecID find_codec_or_die(const char *name, int type, int encoder)
2824 {
2825 const char *codec_string = encoder ? "encoder" : "decoder";
2826 AVCodec *codec;
2827
2828 if(!name)
2829 return CODEC_ID_NONE;
2830 codec = encoder ?
2831 avcodec_find_encoder_by_name(name) :
2832 avcodec_find_decoder_by_name(name);
2833 if(!codec) {
2834 fprintf(stderr, "Unknown %s '%s'\n", codec_string, name);
2835 av_exit(1);
2836 }
2837 if(codec->type != type) {
2838 fprintf(stderr, "Invalid %s type '%s'\n", codec_string, name);
2839 av_exit(1);
2840 }
2841 return codec->id;
2842 }
2843
2844 static void opt_input_file(const char *filename)
2845 {
2846 AVFormatContext *ic;
2847 AVFormatParameters params, *ap = &params;
2848 AVInputFormat *file_iformat = NULL;
2849 int err, i, ret, rfps, rfps_base;
2850 int64_t timestamp;
2851
2852 if (last_asked_format) {
2853 file_iformat = av_find_input_format(last_asked_format);
2854 last_asked_format = NULL;
2855 }
2856
2857 if (!strcmp(filename, "-"))
2858 filename = "pipe:";
2859
2860 using_stdin |= !strncmp(filename, "pipe:", 5) ||
2861 !strcmp(filename, "/dev/stdin");
2862
2863 /* get default parameters from command line */
2864 ic = avformat_alloc_context();
2865 if (!ic) {
2866 print_error(filename, AVERROR(ENOMEM));
2867 av_exit(1);
2868 }
2869
2870 memset(ap, 0, sizeof(*ap));
2871 ap->prealloced_context = 1;
2872 ap->sample_rate = audio_sample_rate;
2873 ap->channels = audio_channels;
2874 ap->time_base.den = frame_rate.num;
2875 ap->time_base.num = frame_rate.den;
2876 ap->width = frame_width + frame_padleft + frame_padright;
2877 ap->height = frame_height + frame_padtop + frame_padbottom;
2878 ap->pix_fmt = frame_pix_fmt;
2879 // ap->sample_fmt = audio_sample_fmt; //FIXME:not implemented in libavformat
2880 ap->channel = video_channel;
2881 ap->standard = video_standard;
2882 ap->video_codec_id = find_codec_or_die(video_codec_name, CODEC_TYPE_VIDEO, 0);
2883 ap->audio_codec_id = find_codec_or_die(audio_codec_name, CODEC_TYPE_AUDIO, 0);
2884 if(pgmyuv_compatibility_hack)
2885 ap->video_codec_id= CODEC_ID_PGMYUV;
2886
2887 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2888
2889 ic->video_codec_id = find_codec_or_die(video_codec_name , CODEC_TYPE_VIDEO , 0);
2890 ic->audio_codec_id = find_codec_or_die(audio_codec_name , CODEC_TYPE_AUDIO , 0);
2891 ic->subtitle_codec_id= find_codec_or_die(subtitle_codec_name, CODEC_TYPE_SUBTITLE, 0);
2892 ic->flags |= AVFMT_FLAG_NONBLOCK;
2893
2894 /* open the input file with generic libav function */
2895 err = av_open_input_file(&ic, filename, file_iformat, 0, ap);
2896 if (err < 0) {
2897 print_error(filename, err);
2898 av_exit(1);
2899 }
2900 if(opt_programid) {
2901 int i;
2902 for(i=0; i<ic->nb_programs; i++)
2903 if(ic->programs[i]->id != opt_programid)
2904 ic->programs[i]->discard = AVDISCARD_ALL;
2905 }
2906
2907 ic->loop_input = loop_input;
2908
2909 /* If not enough info to get the stream parameters, we decode the
2910 first frames to get it. (used in mpeg case for example) */
2911 ret = av_find_stream_info(ic);
2912 if (ret < 0 && verbose >= 0) {
2913 fprintf(stderr, "%s: could not find codec parameters\n", filename);
2914 av_exit(1);
2915 }
2916
2917 timestamp = start_time;
2918 /* add the stream start time */
2919 if (ic->start_time != AV_NOPTS_VALUE)
2920 timestamp += ic->start_time;
2921
2922 /* if seeking requested, we execute it */
2923 if (start_time != 0) {
2924 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
2925 if (ret < 0) {
2926 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2927 filename, (double)timestamp / AV_TIME_BASE);
2928 }
2929 /* reset seek info */
2930 start_time = 0;
2931 }
2932
2933 /* update the current parameters so that they match the one of the input stream */
2934 for(i=0;i<ic->nb_streams;i++) {
2935 AVStream *st = ic->streams[i];
2936 AVCodecContext *enc = st->codec;
2937 avcodec_thread_init(enc, thread_count);
2938 switch(enc->codec_type) {
2939 case CODEC_TYPE_AUDIO:
2940 set_context_opts(enc, avcodec_opts[CODEC_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM);
2941 //fprintf(stderr, "\nInput Audio channels: %d", enc->channels);
2942 channel_layout = enc->channel_layout;
2943 audio_channels = enc->channels;
2944 audio_sample_rate = enc->sample_rate;
2945 audio_sample_fmt = enc->sample_fmt;
2946 input_codecs[nb_icodecs++] = avcodec_find_decoder_by_name(audio_codec_name);
2947 if(audio_disable)
2948 st->discard= AVDISCARD_ALL;
2949 break;
2950 case CODEC_TYPE_VIDEO:
2951 set_context_opts(enc, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM);
2952 frame_height = enc->height;
2953 frame_width = enc->width;
2954 if(ic->streams[i]->sample_aspect_ratio.num)
2955 frame_aspect_ratio=av_q2d(ic->streams[i]->sample_aspect_ratio);
2956 else
2957 frame_aspect_ratio=av_q2d(enc->sample_aspect_ratio);
2958 frame_aspect_ratio *= (float) enc->width / enc->height;
2959 frame_pix_fmt = enc->pix_fmt;
2960 rfps = ic->streams[i]->r_frame_rate.num;
2961 rfps_base = ic->streams[i]->r_frame_rate.den;
2962 if(enc->lowres) {
2963 enc->flags |= CODEC_FLAG_EMU_EDGE;
2964 frame_height >>= enc->lowres;
2965 frame_width >>= enc->lowres;
2966 }
2967 if(me_threshold)
2968 enc->debug |= FF_DEBUG_MV;
2969
2970 if (enc->time_base.den != rfps || enc->time_base.num != rfps_base) {
2971
2972 if (verbose >= 0)
2973 fprintf(stderr,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
2974 i, (float)enc->time_base.den / enc->time_base.num, enc->time_base.den, enc->time_base.num,
2975
2976 (float)rfps / rfps_base, rfps, rfps_base);
2977 }
2978 /* update the current frame rate to match the stream frame rate */
2979 frame_rate.num = rfps;
2980 frame_rate.den = rfps_base;
2981
2982 input_codecs[nb_icodecs++] = avcodec_find_decoder_by_name(video_codec_name);
2983 if(video_disable)
2984 st->discard= AVDISCARD_ALL;
2985 else if(video_discard)
2986 st->discard= video_discard;
2987 break;
2988 case CODEC_TYPE_DATA:
2989 break;
2990 case CODEC_TYPE_SUBTITLE:
2991 input_codecs[nb_icodecs++] = avcodec_find_decoder_by_name(subtitle_codec_name);
2992 if(subtitle_disable)
2993 st->discard = AVDISCARD_ALL;
2994 break;
2995 case CODEC_TYPE_ATTACHMENT:
2996 case CODEC_TYPE_UNKNOWN:
2997 nb_icodecs++;
2998 break;
2999 default:
3000 abort();
3001 }
3002 }
3003
3004 input_files[nb_input_files] = ic;
3005 input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp);
3006 /* dump the file content */
3007 if (verbose >= 0)
3008 dump_format(ic, nb_input_files, filename, 0);
3009
3010 nb_input_files++;
3011
3012 video_channel = 0;
3013
3014 av_freep(&video_codec_name);
3015 av_freep(&audio_codec_name);
3016 av_freep(&subtitle_codec_name);
3017 }
3018
3019 static void check_audio_video_sub_inputs(int *has_video_ptr, int *has_audio_ptr,
3020 int *has_subtitle_ptr)
3021 {
3022 int has_video, has_audio, has_subtitle, i, j;
3023 AVFormatContext *ic;
3024
3025 has_video = 0;
3026 has_audio = 0;
3027 has_subtitle = 0;
3028 for(j=0;j<nb_input_files;j++) {
3029 ic = input_files[j];
3030 for(i=0;i<ic->nb_streams;i++) {
3031 AVCodecContext *enc = ic->streams[i]->codec;
3032 switch(enc->codec_type) {
3033 case CODEC_TYPE_AUDIO:
3034 has_audio = 1;
3035 break;
3036 case CODEC_TYPE_VIDEO:
3037 has_video = 1;
3038 break;
3039 case CODEC_TYPE_SUBTITLE:
3040 has_subtitle = 1;
3041 break;
3042 case CODEC_TYPE_DATA:
3043 case CODEC_TYPE_ATTACHMENT:
3044 case CODEC_TYPE_UNKNOWN:
3045 break;
3046 default:
3047 abort();
3048 }
3049 }
3050 }
3051 *has_video_ptr = has_video;
3052 *has_audio_ptr = has_audio;
3053 *has_subtitle_ptr = has_subtitle;
3054 }
3055
3056 static void new_video_stream(AVFormatContext *oc)
3057 {
3058 AVStream *st;
3059 AVCodecContext *video_enc;
3060 enum CodecID codec_id;
3061
3062 st = av_new_stream(oc, oc->nb_streams);
3063 if (!st) {
3064 fprintf(stderr, "Could not alloc stream\n");
3065 av_exit(1);
3066 }
3067 avcodec_get_context_defaults2(st->codec, CODEC_TYPE_VIDEO);
3068 bitstream_filters[nb_output_files][oc->nb_streams - 1]= video_bitstream_filters;
3069 video_bitstream_filters= NULL;
3070
3071 avcodec_thread_init(st->codec, thread_count);
3072
3073 video_enc = st->codec;
3074
3075 if(video_codec_tag)
3076 video_enc->codec_tag= video_codec_tag;
3077
3078 if( (video_global_header&1)
3079 || (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))){
3080 video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3081 avcodec_opts[CODEC_TYPE_VIDEO]->flags|= CODEC_FLAG_GLOBAL_HEADER;
3082 }
3083 if(video_global_header&2){
3084 video_enc->flags2 |= CODEC_FLAG2_LOCAL_HEADER;
3085 avcodec_opts[CODEC_TYPE_VIDEO]->flags2|= CODEC_FLAG2_LOCAL_HEADER;
3086 }
3087
3088 if (video_stream_copy) {
3089 st->stream_copy = 1;
3090 video_enc->codec_type = CODEC_TYPE_VIDEO;
3091 video_enc->sample_aspect_ratio =
3092 st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
3093 } else {
3094 const char *p;
3095 int i;
3096 AVCodec *codec;
3097 AVRational fps= frame_rate.num ? frame_rate : (AVRational){25,1};
3098
3099 if (video_codec_name) {
3100 codec_id = find_codec_or_die(video_codec_name, CODEC_TYPE_VIDEO, 1);
3101 codec = avcodec_find_encoder_by_name(video_codec_name);
3102 output_codecs[nb_ocodecs] = codec;
3103 } else {
3104 codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, CODEC_TYPE_VIDEO);
3105 codec = avcodec_find_encoder(codec_id);
3106 }
3107
3108 video_enc->codec_id = codec_id;
3109
3110 set_context_opts(video_enc, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM);
3111
3112 if (codec && codec->supported_framerates && !force_fps)
3113 fps = codec->supported_framerates[av_find_nearest_q_idx(fps, codec->supported_framerates)];
3114 video_enc->time_base.den = fps.num;
3115 video_enc->time_base.num = fps.den;
3116
3117 video_enc->width = frame_width + frame_padright + frame_padleft;
3118 video_enc->height = frame_height + frame_padtop + frame_padbottom;
3119 video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*video_enc->height/video_enc->width, 255);
3120 video_enc->pix_fmt = frame_pix_fmt;
3121 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3122
3123 if(codec && codec->pix_fmts){
3124 const enum PixelFormat *p= codec->pix_fmts;
3125 for(; *p!=-1; p++){
3126 if(*p == video_enc->pix_fmt)
3127 break;
3128 }
3129 if(*p == -1)
3130 video_enc->pix_fmt = codec->pix_fmts[0];
3131 }
3132
3133 if (intra_only)
3134 video_enc->gop_size = 0;
3135 if (video_qscale || same_quality) {
3136 video_enc->flags |= CODEC_FLAG_QSCALE;
3137 video_enc->global_quality=
3138 st->quality = FF_QP2LAMBDA * video_qscale;
3139 }
3140
3141 if(intra_matrix)
3142 video_enc->intra_matrix = intra_matrix;
3143 if(inter_matrix)
3144 video_enc->inter_matrix = inter_matrix;
3145
3146 p= video_rc_override_string;
3147 for(i=0; p; i++){
3148 int start, end, q;
3149 int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
3150 if(e!=3){
3151 fprintf(stderr, "error parsing rc_override\n");
3152 av_exit(1);
3153 }
3154 video_enc->rc_override=
3155 av_realloc(video_enc->rc_override,
3156 sizeof(RcOverride)*(i+1));
3157 video_enc->rc_override[i].start_frame= start;
3158 video_enc->rc_override[i].end_frame = end;
3159 if(q>0){
3160 video_enc->rc_override[i].qscale= q;
3161 video_enc->rc_override[i].quality_factor= 1.0;
3162 }
3163 else{
3164 video_enc->rc_override[i].qscale= 0;
3165 video_enc->rc_override[i].quality_factor= -q/100.0;
3166 }
3167 p= strchr(p, '/');
3168 if(p) p++;
3169 }
3170 video_enc->rc_override_count=i;
3171 if (!video_enc->rc_initial_buffer_occupancy)
3172 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4;
3173 video_enc->me_threshold= me_threshold;
3174 video_enc->intra_dc_precision= intra_dc_precision - 8;
3175
3176 if (do_psnr)
3177 video_enc->flags|= CODEC_FLAG_PSNR;
3178
3179 /* two pass mode */
3180 if (do_pass) {
3181 if (do_pass == 1) {
3182 video_enc->flags |= CODEC_FLAG_PASS1;
3183 } else {
3184 video_enc->flags |= CODEC_FLAG_PASS2;
3185 }
3186 }
3187 }
3188 nb_ocodecs++;
3189 if (video_language) {
3190 av_metadata_set(&st->metadata, "language", video_language);
3191 av_freep(&video_language);
3192 }