When using the -target option, although PAL or NTSC is specified,
[libav.git] / ffmpeg.c
1 /*
2 * FFmpeg main
3 * Copyright (c) 2000-2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include <limits.h>
21 #include "avformat.h"
22 #include "framehook.h"
23 #include "dsputil.h"
24
25 #ifndef CONFIG_WIN32
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/time.h>
30 #include <termios.h>
31 #include <sys/resource.h>
32 #include <signal.h>
33 #endif
34 #ifdef CONFIG_OS2
35 #include <sys/types.h>
36 #include <sys/select.h>
37 #include <stdlib.h>
38 #endif
39 #undef time //needed because HAVE_AV_CONFIG_H is defined on top
40 #include <time.h>
41
42 #include "cmdutils.h"
43
44 #undef NDEBUG
45 #include <assert.h>
46
47 #if !defined(INFINITY) && defined(HUGE_VAL)
48 #define INFINITY HUGE_VAL
49 #endif
50
51 /* select an input stream for an output stream */
52 typedef struct AVStreamMap {
53 int file_index;
54 int stream_index;
55 } AVStreamMap;
56
57 /** select an input file for an output file */
58 typedef struct AVMetaDataMap {
59 int out_file;
60 int in_file;
61 } AVMetaDataMap;
62
63 extern const OptionDef options[];
64
65 static void show_help(void);
66 static void show_license(void);
67
68 #define MAX_FILES 20
69
70 static AVFormatContext *input_files[MAX_FILES];
71 static int64_t input_files_ts_offset[MAX_FILES];
72 static int nb_input_files = 0;
73
74 static AVFormatContext *output_files[MAX_FILES];
75 static int nb_output_files = 0;
76
77 static AVStreamMap stream_maps[MAX_FILES];
78 static int nb_stream_maps;
79
80 static AVMetaDataMap meta_data_maps[MAX_FILES];
81 static int nb_meta_data_maps;
82
83 static AVInputFormat *file_iformat;
84 static AVOutputFormat *file_oformat;
85 static AVImageFormat *image_format;
86 static int frame_width = 160;
87 static int frame_height = 128;
88 static float frame_aspect_ratio = 0;
89 static enum PixelFormat frame_pix_fmt = PIX_FMT_YUV420P;
90 static int frame_padtop = 0;
91 static int frame_padbottom = 0;
92 static int frame_padleft = 0;
93 static int frame_padright = 0;
94 static int padcolor[3] = {16,128,128}; /* default to black */
95 static int frame_topBand = 0;
96 static int frame_bottomBand = 0;
97 static int frame_leftBand = 0;
98 static int frame_rightBand = 0;
99 static int max_frames[3] = {INT_MAX, INT_MAX, INT_MAX};
100 static int frame_rate = 25;
101 static int frame_rate_base = 1;
102 static int video_bit_rate = 200*1000;
103 static int video_bit_rate_tolerance = 4000*1000;
104 static float video_qscale = 0;
105 static int video_qmin = 2;
106 static int video_qmax = 31;
107 static int video_lmin = 2*FF_QP2LAMBDA;
108 static int video_lmax = 31*FF_QP2LAMBDA;
109 static int video_mb_qmin = 2;
110 static int video_mb_qmax = 31;
111 static int video_qdiff = 3;
112 static float video_qblur = 0.5;
113 static float video_qcomp = 0.5;
114 static uint16_t *intra_matrix = NULL;
115 static uint16_t *inter_matrix = NULL;
116 #if 0 //experimental, (can be removed)
117 static float video_rc_qsquish=1.0;
118 static float video_rc_qmod_amp=0;
119 static int video_rc_qmod_freq=0;
120 #endif
121 static char *video_rc_override_string=NULL;
122 static char *video_rc_eq="tex^qComp";
123 static int video_rc_buffer_size=0;
124 static float video_rc_buffer_aggressivity=1.0;
125 static int video_rc_max_rate=0;
126 static int video_rc_min_rate=0;
127 static float video_rc_initial_cplx=0;
128 static float video_b_qfactor = 1.25;
129 static float video_b_qoffset = 1.25;
130 static float video_i_qfactor = -0.8;
131 static float video_i_qoffset = 0.0;
132 static int video_intra_quant_bias= FF_DEFAULT_QUANT_BIAS;
133 static int video_inter_quant_bias= FF_DEFAULT_QUANT_BIAS;
134 static int me_method = ME_EPZS;
135 static int video_disable = 0;
136 static int video_codec_id = CODEC_ID_NONE;
137 static int video_codec_tag = 0;
138 static int same_quality = 0;
139 static int b_frames = 0;
140 static int mb_decision = FF_MB_DECISION_SIMPLE;
141 static int ildct_cmp = FF_CMP_VSAD;
142 static int mb_cmp = FF_CMP_SAD;
143 static int sub_cmp = FF_CMP_SAD;
144 static int cmp = FF_CMP_SAD;
145 static int pre_cmp = FF_CMP_SAD;
146 static int pre_me = 0;
147 static float lumi_mask = 0;
148 static float dark_mask = 0;
149 static float scplx_mask = 0;
150 static float tcplx_mask = 0;
151 static float p_mask = 0;
152 static int use_4mv = 0;
153 static int use_obmc = 0;
154 static int use_loop = 0;
155 static int use_aic = 0;
156 static int use_aiv = 0;
157 static int use_umv = 0;
158 static int use_ss = 0;
159 static int use_alt_scan = 0;
160 static int use_trell = 0;
161 static int use_scan_offset = 0;
162 static int use_qpel = 0;
163 static int use_qprd = 0;
164 static int use_cbprd = 0;
165 static int qns = 0;
166 static int closed_gop = 0;
167 static int strict_gop = 0;
168 static int do_deinterlace = 0;
169 static int do_interlace_dct = 0;
170 static int do_interlace_me = 0;
171 static int workaround_bugs = FF_BUG_AUTODETECT;
172 static int error_resilience = 2;
173 static int error_concealment = 3;
174 static int dct_algo = 0;
175 static int idct_algo = 0;
176 static int use_part = 0;
177 static int packet_size = 0;
178 static int error_rate = 0;
179 static int strict = 0;
180 static int top_field_first = -1;
181 static int noise_reduction = 0;
182 static int sc_threshold = 0;
183 static int debug = 0;
184 static int debug_mv = 0;
185 static int me_threshold = 0;
186 static int mb_threshold = 0;
187 static int intra_dc_precision = 8;
188 static int coder = 0;
189 static int context = 0;
190 static int predictor = 0;
191 static int video_profile = FF_PROFILE_UNKNOWN;
192 static int video_level = FF_LEVEL_UNKNOWN;
193 static int nsse_weight = 8;
194 static int subpel_quality= 8;
195 static int lowres= 0;
196 static int frame_skip_threshold= 0;
197 static int frame_skip_factor= 0;
198 static int frame_skip_exp= 0;
199 static int frame_skip_cmp= FF_CMP_DCTMAX;
200 extern int loop_input; /* currently a hack */
201
202 static int gop_size = 12;
203 static int intra_only = 0;
204 static int audio_sample_rate = 44100;
205 static int audio_bit_rate = 64000;
206 static int audio_disable = 0;
207 static int audio_channels = 1;
208 static int audio_codec_id = CODEC_ID_NONE;
209 static int audio_codec_tag = 0;
210
211 static int mux_rate= 0;
212 static int mux_packet_size= 0;
213 static float mux_preload= 0.5;
214 static float mux_max_delay= 0.7;
215
216 static int64_t recording_time = 0;
217 static int64_t start_time = 0;
218 static int64_t rec_timestamp = 0;
219 static int64_t input_ts_offset = 0;
220 static int file_overwrite = 0;
221 static char *str_title = NULL;
222 static char *str_author = NULL;
223 static char *str_copyright = NULL;
224 static char *str_comment = NULL;
225 static int do_benchmark = 0;
226 static int do_hex_dump = 0;
227 static int do_pkt_dump = 0;
228 static int do_psnr = 0;
229 static int do_vstats = 0;
230 static int do_pass = 0;
231 static int bitexact = 0;
232 static char *pass_logfilename = NULL;
233 static int audio_stream_copy = 0;
234 static int video_stream_copy = 0;
235 static int video_sync_method= 1;
236 static int audio_sync_method= 0;
237 static int copy_ts= 0;
238
239 static int rate_emu = 0;
240
241 static char *video_grab_format = "video4linux";
242 static char *video_device = NULL;
243 static char *grab_device = NULL;
244 static int video_channel = 0;
245 static char *video_standard = "ntsc";
246
247 static char *audio_grab_format = "audio_device";
248 static char *audio_device = NULL;
249
250 static int using_stdin = 0;
251 static int using_vhook = 0;
252 static int verbose = 1;
253 static int thread_count= 1;
254 static int q_pressed = 0;
255 static int me_range = 0;
256 static int64_t video_size = 0;
257 static int64_t audio_size = 0;
258 static int64_t extra_size = 0;
259 static int nb_frames_dup = 0;
260 static int nb_frames_drop = 0;
261 static int input_sync;
262
263 static int pgmyuv_compatibility_hack=0;
264
265
266 #define DEFAULT_PASS_LOGFILENAME "ffmpeg2pass"
267
268 typedef struct AVOutputStream {
269 int file_index; /* file index */
270 int index; /* stream index in the output file */
271 int source_index; /* AVInputStream index */
272 AVStream *st; /* stream in the output file */
273 int encoding_needed; /* true if encoding needed for this stream */
274 int frame_number;
275 /* input pts and corresponding output pts
276 for A/V sync */
277 double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
278 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
279 /* video only */
280 int video_resample; /* video_resample and video_crop are mutually exclusive */
281 AVFrame pict_tmp; /* temporary image for resampling */
282 ImgReSampleContext *img_resample_ctx; /* for image resampling */
283
284 int video_crop; /* video_resample and video_crop are mutually exclusive */
285 int topBand; /* cropping area sizes */
286 int leftBand;
287
288 int video_pad; /* video_resample and video_pad are mutually exclusive */
289 int padtop; /* padding area sizes */
290 int padbottom;
291 int padleft;
292 int padright;
293
294 /* audio only */
295 int audio_resample;
296 ReSampleContext *resample; /* for audio resampling */
297 FifoBuffer fifo; /* for compression: one audio fifo per codec */
298 FILE *logfile;
299 } AVOutputStream;
300
301 typedef struct AVInputStream {
302 int file_index;
303 int index;
304 AVStream *st;
305 int discard; /* true if stream data should be discarded */
306 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
307 int64_t sample_index; /* current sample */
308
309 int64_t start; /* time when read started */
310 unsigned long frame; /* current frame */
311 int64_t next_pts; /* synthetic pts for cases where pkt.pts
312 is not defined */
313 int64_t pts; /* current pts */
314 int is_start; /* is 1 at the start and after a discontinuity */
315 } AVInputStream;
316
317 typedef struct AVInputFile {
318 int eof_reached; /* true if eof reached */
319 int ist_index; /* index of first stream in ist_table */
320 int buffer_size; /* current total buffer size */
321 int buffer_size_max; /* buffer size at which we consider we can stop
322 buffering */
323 int nb_streams; /* nb streams we are aware of */
324 } AVInputFile;
325
326 #ifndef CONFIG_WIN32
327
328 /* init terminal so that we can grab keys */
329 static struct termios oldtty;
330
331 static void term_exit(void)
332 {
333 tcsetattr (0, TCSANOW, &oldtty);
334 }
335
336 static volatile sig_atomic_t received_sigterm = 0;
337
338 static void
339 sigterm_handler(int sig)
340 {
341 received_sigterm = sig;
342 term_exit();
343 }
344
345 static void term_init(void)
346 {
347 struct termios tty;
348
349 tcgetattr (0, &tty);
350 oldtty = tty;
351
352 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
353 |INLCR|IGNCR|ICRNL|IXON);
354 tty.c_oflag |= OPOST;
355 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
356 tty.c_cflag &= ~(CSIZE|PARENB);
357 tty.c_cflag |= CS8;
358 tty.c_cc[VMIN] = 1;
359 tty.c_cc[VTIME] = 0;
360
361 tcsetattr (0, TCSANOW, &tty);
362
363 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
364 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
365 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
366 /*
367 register a function to be called at normal program termination
368 */
369 atexit(term_exit);
370 #ifdef CONFIG_BEOS_NETSERVER
371 fcntl(0, F_SETFL, fcntl(0, F_GETFL) | O_NONBLOCK);
372 #endif
373 }
374
375 /* read a key without blocking */
376 static int read_key(void)
377 {
378 int n = 1;
379 unsigned char ch;
380 #ifndef CONFIG_BEOS_NETSERVER
381 struct timeval tv;
382 fd_set rfds;
383
384 FD_ZERO(&rfds);
385 FD_SET(0, &rfds);
386 tv.tv_sec = 0;
387 tv.tv_usec = 0;
388 n = select(1, &rfds, NULL, NULL, &tv);
389 #endif
390 if (n > 0) {
391 n = read(0, &ch, 1);
392 if (n == 1)
393 return ch;
394
395 return n;
396 }
397 return -1;
398 }
399
400 static int decode_interrupt_cb(void)
401 {
402 return q_pressed || (q_pressed = read_key() == 'q');
403 }
404
405 #else
406
407 static volatile int received_sigterm = 0;
408
409 /* no interactive support */
410 static void term_exit(void)
411 {
412 }
413
414 static void term_init(void)
415 {
416 }
417
418 static int read_key(void)
419 {
420 return 0;
421 }
422
423 #endif
424
425 static int read_ffserver_streams(AVFormatContext *s, const char *filename)
426 {
427 int i, err;
428 AVFormatContext *ic;
429
430 err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL);
431 if (err < 0)
432 return err;
433 /* copy stream format */
434 s->nb_streams = ic->nb_streams;
435 for(i=0;i<ic->nb_streams;i++) {
436 AVStream *st;
437
438 st = av_mallocz(sizeof(AVStream));
439 memcpy(st, ic->streams[i], sizeof(AVStream));
440 s->streams[i] = st;
441 }
442
443 av_close_input_file(ic);
444 return 0;
445 }
446
447 #define MAX_AUDIO_PACKET_SIZE (128 * 1024)
448
449 static void do_audio_out(AVFormatContext *s,
450 AVOutputStream *ost,
451 AVInputStream *ist,
452 unsigned char *buf, int size)
453 {
454 uint8_t *buftmp;
455 static uint8_t *audio_buf = NULL;
456 static uint8_t *audio_out = NULL;
457 const int audio_out_size= 4*MAX_AUDIO_PACKET_SIZE;
458
459 int size_out, frame_bytes, ret;
460 AVCodecContext *enc= &ost->st->codec;
461
462 /* SC: dynamic allocation of buffers */
463 if (!audio_buf)
464 audio_buf = av_malloc(2*MAX_AUDIO_PACKET_SIZE);
465 if (!audio_out)
466 audio_out = av_malloc(audio_out_size);
467 if (!audio_buf || !audio_out)
468 return; /* Should signal an error ! */
469
470 if(audio_sync_method){
471 double delta = ost->sync_ipts * enc->sample_rate - ost->sync_opts
472 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2);
473 double idelta= delta*ist->st->codec.sample_rate / enc->sample_rate;
474 int byte_delta= ((int)idelta)*2*ist->st->codec.channels;
475
476 //FIXME resample delay
477 if(fabs(delta) > 50){
478 if(ist->is_start){
479 if(byte_delta < 0){
480 byte_delta= FFMAX(byte_delta, -size);
481 size += byte_delta;
482 buf -= byte_delta;
483 if(verbose > 2)
484 fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
485 if(!size)
486 return;
487 ist->is_start=0;
488 }else{
489 static uint8_t *input_tmp= NULL;
490 input_tmp= av_realloc(input_tmp, byte_delta + size);
491
492 if(byte_delta + size <= MAX_AUDIO_PACKET_SIZE)
493 ist->is_start=0;
494 else
495 byte_delta= MAX_AUDIO_PACKET_SIZE - size;
496
497 memset(input_tmp, 0, byte_delta);
498 memcpy(input_tmp + byte_delta, buf, size);
499 buf= input_tmp;
500 size += byte_delta;
501 if(verbose > 2)
502 fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
503 }
504 }else if(audio_sync_method>1){
505 int comp= clip(delta, -audio_sync_method, audio_sync_method);
506 assert(ost->audio_resample);
507 if(verbose > 2)
508 fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
509 // fprintf(stderr, "drift:%f len:%d opts:%lld ipts:%lld fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(ost->sync_ipts * enc->sample_rate), fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2));
510 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
511 }
512 }
513 }else
514 ost->sync_opts= lrintf(ost->sync_ipts * enc->sample_rate)
515 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2); //FIXME wrong
516
517 if (ost->audio_resample) {
518 buftmp = audio_buf;
519 size_out = audio_resample(ost->resample,
520 (short *)buftmp, (short *)buf,
521 size / (ist->st->codec.channels * 2));
522 size_out = size_out * enc->channels * 2;
523 } else {
524 buftmp = buf;
525 size_out = size;
526 }
527
528 /* now encode as many frames as possible */
529 if (enc->frame_size > 1) {
530 /* output resampled raw samples */
531 fifo_write(&ost->fifo, buftmp, size_out,
532 &ost->fifo.wptr);
533
534 frame_bytes = enc->frame_size * 2 * enc->channels;
535
536 while (fifo_read(&ost->fifo, audio_buf, frame_bytes,
537 &ost->fifo.rptr) == 0) {
538 AVPacket pkt;
539 av_init_packet(&pkt);
540
541 ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
542 (short *)audio_buf);
543 audio_size += ret;
544 pkt.stream_index= ost->index;
545 pkt.data= audio_out;
546 pkt.size= ret;
547 if(enc->coded_frame)
548 pkt.pts= enc->coded_frame->pts;
549 pkt.flags |= PKT_FLAG_KEY;
550 av_interleaved_write_frame(s, &pkt);
551
552 ost->sync_opts += enc->frame_size;
553 }
554 } else {
555 AVPacket pkt;
556 av_init_packet(&pkt);
557
558 ost->sync_opts += size_out / (2 * enc->channels);
559
560 /* output a pcm frame */
561 /* XXX: change encoding codec API to avoid this ? */
562 switch(enc->codec->id) {
563 case CODEC_ID_PCM_S16LE:
564 case CODEC_ID_PCM_S16BE:
565 case CODEC_ID_PCM_U16LE:
566 case CODEC_ID_PCM_U16BE:
567 break;
568 default:
569 size_out = size_out >> 1;
570 break;
571 }
572 ret = avcodec_encode_audio(enc, audio_out, size_out,
573 (short *)buftmp);
574 audio_size += ret;
575 pkt.stream_index= ost->index;
576 pkt.data= audio_out;
577 pkt.size= ret;
578 if(enc->coded_frame)
579 pkt.pts= enc->coded_frame->pts;
580 pkt.flags |= PKT_FLAG_KEY;
581 av_interleaved_write_frame(s, &pkt);
582 }
583 }
584
585 static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void **bufp)
586 {
587 AVCodecContext *dec;
588 AVPicture *picture2;
589 AVPicture picture_tmp;
590 uint8_t *buf = 0;
591
592 dec = &ist->st->codec;
593
594 /* deinterlace : must be done before any resize */
595 if (do_deinterlace || using_vhook) {
596 int size;
597
598 /* create temporary picture */
599 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
600 buf = av_malloc(size);
601 if (!buf)
602 return;
603
604 picture2 = &picture_tmp;
605 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
606
607 if (do_deinterlace){
608 if(avpicture_deinterlace(picture2, picture,
609 dec->pix_fmt, dec->width, dec->height) < 0) {
610 /* if error, do not deinterlace */
611 av_free(buf);
612 buf = NULL;
613 picture2 = picture;
614 }
615 } else {
616 if (img_convert(picture2, dec->pix_fmt, picture,
617 dec->pix_fmt, dec->width, dec->height) < 0) {
618 /* if error, do not copy */
619 av_free(buf);
620 buf = NULL;
621 picture2 = picture;
622 }
623 }
624 } else {
625 picture2 = picture;
626 }
627
628 frame_hook_process(picture2, dec->pix_fmt, dec->width, dec->height);
629
630 if (picture != picture2)
631 *picture = *picture2;
632 *bufp = buf;
633 }
634
635 /* we begin to correct av delay at this threshold */
636 #define AV_DELAY_MAX 0.100
637
638
639 /* Expects img to be yuv420 */
640 static void fill_pad_region(AVPicture* img, int height, int width,
641 int padtop, int padbottom, int padleft, int padright, int *color) {
642
643 int i, y, shift;
644 uint8_t *optr;
645
646 for (i = 0; i < 3; i++) {
647 shift = (i == 0) ? 0 : 1;
648
649 if (padtop || padleft) {
650 memset(img->data[i], color[i], (((img->linesize[i] * padtop) +
651 padleft) >> shift));
652 }
653
654 if (padleft || padright) {
655 optr = img->data[i] + (img->linesize[i] * (padtop >> shift)) +
656 (img->linesize[i] - (padright >> shift));
657
658 for (y = 0; y < ((height - (padtop + padbottom)) >> shift); y++) {
659 memset(optr, color[i], (padleft + padright) >> shift);
660 optr += img->linesize[i];
661 }
662 }
663
664 if (padbottom) {
665 optr = img->data[i] + (img->linesize[i] * ((height - padbottom) >> shift));
666 memset(optr, color[i], ((img->linesize[i] * padbottom) >> shift));
667 }
668 }
669 }
670
671 static int bit_buffer_size= 1024*256;
672 static uint8_t *bit_buffer= NULL;
673
674 static void do_video_out(AVFormatContext *s,
675 AVOutputStream *ost,
676 AVInputStream *ist,
677 AVFrame *in_picture,
678 int *frame_size)
679 {
680 int nb_frames, i, ret;
681 AVFrame *final_picture, *formatted_picture;
682 AVFrame picture_format_temp, picture_crop_temp;
683 uint8_t *buf = NULL, *buf1 = NULL;
684 AVCodecContext *enc, *dec;
685 enum PixelFormat target_pixfmt;
686
687 avcodec_get_frame_defaults(&picture_format_temp);
688 avcodec_get_frame_defaults(&picture_crop_temp);
689
690 enc = &ost->st->codec;
691 dec = &ist->st->codec;
692
693 /* by default, we output a single frame */
694 nb_frames = 1;
695
696 *frame_size = 0;
697
698 if(video_sync_method){
699 double vdelta;
700 vdelta = ost->sync_ipts * enc->frame_rate / enc->frame_rate_base - ost->sync_opts;
701 //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
702 if (vdelta < -1.1)
703 nb_frames = 0;
704 else if (vdelta > 1.1)
705 nb_frames = lrintf(vdelta);
706 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%lld, ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, ost->sync_ipts, nb_frames);
707 if (nb_frames == 0){
708 ++nb_frames_drop;
709 if (verbose>2)
710 fprintf(stderr, "*** drop!\n");
711 }else if (nb_frames > 1) {
712 nb_frames_dup += nb_frames;
713 if (verbose>2)
714 fprintf(stderr, "*** %d dup!\n", nb_frames-1);
715 }
716 }else
717 ost->sync_opts= lrintf(ost->sync_ipts * enc->frame_rate / enc->frame_rate_base);
718
719 nb_frames= FFMIN(nb_frames, max_frames[CODEC_TYPE_VIDEO] - ost->frame_number);
720 if (nb_frames <= 0)
721 return;
722
723 /* convert pixel format if needed */
724 target_pixfmt = ost->video_resample || ost->video_pad
725 ? PIX_FMT_YUV420P : enc->pix_fmt;
726 if (dec->pix_fmt != target_pixfmt) {
727 int size;
728
729 /* create temporary picture */
730 size = avpicture_get_size(target_pixfmt, dec->width, dec->height);
731 buf = av_malloc(size);
732 if (!buf)
733 return;
734 formatted_picture = &picture_format_temp;
735 avpicture_fill((AVPicture*)formatted_picture, buf, target_pixfmt, dec->width, dec->height);
736
737 if (img_convert((AVPicture*)formatted_picture, target_pixfmt,
738 (AVPicture *)in_picture, dec->pix_fmt,
739 dec->width, dec->height) < 0) {
740
741 if (verbose >= 0)
742 fprintf(stderr, "pixel format conversion not handled\n");
743
744 goto the_end;
745 }
746 } else {
747 formatted_picture = in_picture;
748 }
749
750 /* XXX: resampling could be done before raw format conversion in
751 some cases to go faster */
752 /* XXX: only works for YUV420P */
753 if (ost->video_resample) {
754 final_picture = &ost->pict_tmp;
755 img_resample(ost->img_resample_ctx, (AVPicture*)final_picture, (AVPicture*)formatted_picture);
756
757 if (ost->padtop || ost->padbottom || ost->padleft || ost->padright) {
758 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
759 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
760 padcolor);
761 }
762
763 if (enc->pix_fmt != PIX_FMT_YUV420P) {
764 int size;
765
766 av_free(buf);
767 /* create temporary picture */
768 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
769 buf = av_malloc(size);
770 if (!buf)
771 return;
772 final_picture = &picture_format_temp;
773 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
774
775 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
776 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
777 enc->width, enc->height) < 0) {
778
779 if (verbose >= 0)
780 fprintf(stderr, "pixel format conversion not handled\n");
781
782 goto the_end;
783 }
784 }
785 } else if (ost->video_crop) {
786 picture_crop_temp.data[0] = formatted_picture->data[0] +
787 (ost->topBand * formatted_picture->linesize[0]) + ost->leftBand;
788
789 picture_crop_temp.data[1] = formatted_picture->data[1] +
790 ((ost->topBand >> 1) * formatted_picture->linesize[1]) +
791 (ost->leftBand >> 1);
792
793 picture_crop_temp.data[2] = formatted_picture->data[2] +
794 ((ost->topBand >> 1) * formatted_picture->linesize[2]) +
795 (ost->leftBand >> 1);
796
797 picture_crop_temp.linesize[0] = formatted_picture->linesize[0];
798 picture_crop_temp.linesize[1] = formatted_picture->linesize[1];
799 picture_crop_temp.linesize[2] = formatted_picture->linesize[2];
800 final_picture = &picture_crop_temp;
801 } else if (ost->video_pad) {
802 final_picture = &ost->pict_tmp;
803
804 for (i = 0; i < 3; i++) {
805 uint8_t *optr, *iptr;
806 int shift = (i == 0) ? 0 : 1;
807 int y, yheight;
808
809 /* set offset to start writing image into */
810 optr = final_picture->data[i] + (((final_picture->linesize[i] *
811 ost->padtop) + ost->padleft) >> shift);
812 iptr = formatted_picture->data[i];
813
814 yheight = (enc->height - ost->padtop - ost->padbottom) >> shift;
815 for (y = 0; y < yheight; y++) {
816 /* copy unpadded image row into padded image row */
817 memcpy(optr, iptr, formatted_picture->linesize[i]);
818 optr += final_picture->linesize[i];
819 iptr += formatted_picture->linesize[i];
820 }
821 }
822
823 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
824 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
825 padcolor);
826
827 if (enc->pix_fmt != PIX_FMT_YUV420P) {
828 int size;
829
830 av_free(buf);
831 /* create temporary picture */
832 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
833 buf = av_malloc(size);
834 if (!buf)
835 return;
836 final_picture = &picture_format_temp;
837 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
838
839 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
840 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
841 enc->width, enc->height) < 0) {
842
843 if (verbose >= 0)
844 fprintf(stderr, "pixel format conversion not handled\n");
845
846 goto the_end;
847 }
848 }
849 } else {
850 final_picture = formatted_picture;
851 }
852 /* duplicates frame if needed */
853 for(i=0;i<nb_frames;i++) {
854 AVPacket pkt;
855 av_init_packet(&pkt);
856 pkt.stream_index= ost->index;
857
858 if (s->oformat->flags & AVFMT_RAWPICTURE) {
859 /* raw pictures are written as AVPicture structure to
860 avoid any copies. We support temorarily the older
861 method. */
862 AVFrame* old_frame = enc->coded_frame;
863 enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
864 pkt.data= (uint8_t *)final_picture;
865 pkt.size= sizeof(AVPicture);
866 if(dec->coded_frame)
867 pkt.pts= dec->coded_frame->pts;
868 if(dec->coded_frame && dec->coded_frame->key_frame)
869 pkt.flags |= PKT_FLAG_KEY;
870
871 av_interleaved_write_frame(s, &pkt);
872 enc->coded_frame = old_frame;
873 } else {
874 AVFrame big_picture;
875
876 big_picture= *final_picture;
877 /* better than nothing: use input picture interlaced
878 settings */
879 big_picture.interlaced_frame = in_picture->interlaced_frame;
880 if(do_interlace_me || do_interlace_dct){
881 if(top_field_first == -1)
882 big_picture.top_field_first = in_picture->top_field_first;
883 else
884 big_picture.top_field_first = top_field_first;
885 }
886
887 /* handles sameq here. This is not correct because it may
888 not be a global option */
889 if (same_quality) {
890 big_picture.quality = ist->st->quality;
891 }else
892 big_picture.quality = ost->st->quality;
893 if(!me_threshold)
894 big_picture.pict_type = 0;
895 // big_picture.pts = AV_NOPTS_VALUE;
896 big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->frame_rate_base, enc->frame_rate);
897 //av_log(NULL, AV_LOG_DEBUG, "%lld -> encoder\n", ost->sync_opts);
898 ret = avcodec_encode_video(enc,
899 bit_buffer, bit_buffer_size,
900 &big_picture);
901 //enc->frame_number = enc->real_pict_num;
902 if(ret){
903 pkt.data= bit_buffer;
904 pkt.size= ret;
905 if(enc->coded_frame)
906 pkt.pts= enc->coded_frame->pts;
907 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %lld/%lld\n",
908 pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->frame_rate, AV_TIME_BASE*(int64_t)enc->frame_rate_base) : -1,
909 pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->frame_rate, AV_TIME_BASE*(int64_t)enc->frame_rate_base) : -1);*/
910
911 if(enc->coded_frame && enc->coded_frame->key_frame)
912 pkt.flags |= PKT_FLAG_KEY;
913 av_interleaved_write_frame(s, &pkt);
914 *frame_size = ret;
915 //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d",
916 // enc->frame_number-1, enc->real_pict_num, ret,
917 // enc->pict_type);
918 /* if two pass, output log */
919 if (ost->logfile && enc->stats_out) {
920 fprintf(ost->logfile, "%s", enc->stats_out);
921 }
922 }
923 }
924 ost->sync_opts++;
925 ost->frame_number++;
926 }
927 the_end:
928 av_free(buf);
929 av_free(buf1);
930 }
931
932 static double psnr(double d){
933 if(d==0) return INFINITY;
934 return -10.0*log(d)/log(10.0);
935 }
936
937 static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
938 int frame_size)
939 {
940 static FILE *fvstats=NULL;
941 char filename[40];
942 time_t today2;
943 struct tm *today;
944 AVCodecContext *enc;
945 int frame_number;
946 int64_t ti;
947 double ti1, bitrate, avg_bitrate;
948
949 if (!fvstats) {
950 today2 = time(NULL);
951 today = localtime(&today2);
952 sprintf(filename, "vstats_%02d%02d%02d.log", today->tm_hour,
953 today->tm_min,
954 today->tm_sec);
955 fvstats = fopen(filename,"w");
956 if (!fvstats) {
957 perror("fopen");
958 exit(1);
959 }
960 }
961
962 ti = MAXINT64;
963 enc = &ost->st->codec;
964 if (enc->codec_type == CODEC_TYPE_VIDEO) {
965 frame_number = ost->frame_number;
966 fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
967 if (enc->flags&CODEC_FLAG_PSNR)
968 fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
969
970 fprintf(fvstats,"f_size= %6d ", frame_size);
971 /* compute pts value */
972 ti1 = (double)ost->sync_opts *enc->frame_rate_base / enc->frame_rate;
973 if (ti1 < 0.01)
974 ti1 = 0.01;
975
976 bitrate = (double)(frame_size * 8) * enc->frame_rate / enc->frame_rate_base / 1000.0;
977 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
978 fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
979 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
980 fprintf(fvstats,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
981 }
982 }
983
984 static void print_report(AVFormatContext **output_files,
985 AVOutputStream **ost_table, int nb_ostreams,
986 int is_last_report)
987 {
988 char buf[1024];
989 AVOutputStream *ost;
990 AVFormatContext *oc, *os;
991 int64_t total_size;
992 AVCodecContext *enc;
993 int frame_number, vid, i;
994 double bitrate, ti1, pts;
995 static int64_t last_time = -1;
996
997 if (!is_last_report) {
998 int64_t cur_time;
999 /* display the report every 0.5 seconds */
1000 cur_time = av_gettime();
1001 if (last_time == -1) {
1002 last_time = cur_time;
1003 return;
1004 }
1005 if ((cur_time - last_time) < 500000)
1006 return;
1007 last_time = cur_time;
1008 }
1009
1010
1011 oc = output_files[0];
1012
1013 total_size = url_ftell(&oc->pb);
1014
1015 buf[0] = '\0';
1016 ti1 = 1e10;
1017 vid = 0;
1018 for(i=0;i<nb_ostreams;i++) {
1019 ost = ost_table[i];
1020 os = output_files[ost->file_index];
1021 enc = &ost->st->codec;
1022 if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1023 sprintf(buf + strlen(buf), "q=%2.1f ",
1024 enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1025 }
1026 if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1027 frame_number = ost->frame_number;
1028 sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ",
1029 frame_number, enc->coded_frame ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : 0);
1030 if(is_last_report)
1031 sprintf(buf + strlen(buf), "L");
1032 if (enc->flags&CODEC_FLAG_PSNR){
1033 int j;
1034 double error, error_sum=0;
1035 double scale, scale_sum=0;
1036 char type[3]= {'Y','U','V'};
1037 sprintf(buf + strlen(buf), "PSNR=");
1038 for(j=0; j<3; j++){
1039 if(is_last_report){
1040 error= enc->error[j];
1041 scale= enc->width*enc->height*255.0*255.0*frame_number;
1042 }else{
1043 error= enc->coded_frame->error[j];
1044 scale= enc->width*enc->height*255.0*255.0;
1045 }
1046 if(j) scale/=4;
1047 error_sum += error;
1048 scale_sum += scale;
1049 sprintf(buf + strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
1050 }
1051 sprintf(buf + strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
1052 }
1053 vid = 1;
1054 }
1055 /* compute min output value */
1056 pts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den;
1057 if ((pts < ti1) && (pts > 0))
1058 ti1 = pts;
1059 }
1060 if (ti1 < 0.01)
1061 ti1 = 0.01;
1062
1063 if (verbose || is_last_report) {
1064 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1065
1066 sprintf(buf + strlen(buf),
1067 "size=%8.0fkB time=%0.1f bitrate=%6.1fkbits/s",
1068 (double)total_size / 1024, ti1, bitrate);
1069
1070 if (verbose > 1)
1071 sprintf(buf + strlen(buf), " dup=%d drop=%d",
1072 nb_frames_dup, nb_frames_drop);
1073
1074 if (verbose >= 0)
1075 fprintf(stderr, "%s \r", buf);
1076
1077 fflush(stderr);
1078 }
1079
1080 if (is_last_report && verbose >= 0){
1081 int64_t raw= audio_size + video_size + extra_size;
1082 fprintf(stderr, "\n");
1083 fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1084 video_size/1024.0,
1085 audio_size/1024.0,
1086 extra_size/1024.0,
1087 100.0*(total_size - raw)/raw
1088 );
1089 }
1090 }
1091
1092 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1093 static int output_packet(AVInputStream *ist, int ist_index,
1094 AVOutputStream **ost_table, int nb_ostreams,
1095 const AVPacket *pkt)
1096 {
1097 AVFormatContext *os;
1098 AVOutputStream *ost;
1099 uint8_t *ptr;
1100 int len, ret, i;
1101 uint8_t *data_buf;
1102 int data_size, got_picture;
1103 AVFrame picture;
1104 void *buffer_to_free;
1105 static int samples_size= 0;
1106 static short *samples= NULL;
1107
1108 if(!pkt){
1109 ist->pts= ist->next_pts; // needed for last packet if vsync=0
1110 } else if (pkt->dts != AV_NOPTS_VALUE) { //FIXME seems redundant, as libavformat does this too
1111 ist->next_pts = ist->pts = pkt->dts;
1112 } else {
1113 assert(ist->pts == ist->next_pts);
1114 }
1115
1116 if (pkt == NULL) {
1117 /* EOF handling */
1118 ptr = NULL;
1119 len = 0;
1120 goto handle_eof;
1121 }
1122
1123 len = pkt->size;
1124 ptr = pkt->data;
1125 while (len > 0) {
1126 handle_eof:
1127 /* decode the packet if needed */
1128 data_buf = NULL; /* fail safe */
1129 data_size = 0;
1130 if (ist->decoding_needed) {
1131 switch(ist->st->codec.codec_type) {
1132 case CODEC_TYPE_AUDIO:{
1133 if(pkt)
1134 samples= av_fast_realloc(samples, &samples_size, FFMAX(pkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE));
1135 /* XXX: could avoid copy if PCM 16 bits with same
1136 endianness as CPU */
1137 ret = avcodec_decode_audio(&ist->st->codec, samples, &data_size,
1138 ptr, len);
1139 if (ret < 0)
1140 goto fail_decode;
1141 ptr += ret;
1142 len -= ret;
1143 /* Some bug in mpeg audio decoder gives */
1144 /* data_size < 0, it seems they are overflows */
1145 if (data_size <= 0) {
1146 /* no audio frame */
1147 continue;
1148 }
1149 data_buf = (uint8_t *)samples;
1150 ist->next_pts += ((int64_t)AV_TIME_BASE/2 * data_size) /
1151 (ist->st->codec.sample_rate * ist->st->codec.channels);
1152 break;}
1153 case CODEC_TYPE_VIDEO:
1154 data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
1155 /* XXX: allocate picture correctly */
1156 avcodec_get_frame_defaults(&picture);
1157
1158 ret = avcodec_decode_video(&ist->st->codec,
1159 &picture, &got_picture, ptr, len);
1160 ist->st->quality= picture.quality;
1161 if (ret < 0)
1162 goto fail_decode;
1163 if (!got_picture) {
1164 /* no picture yet */
1165 goto discard_packet;
1166 }
1167 if (ist->st->codec.frame_rate_base != 0) {
1168 ist->next_pts += ((int64_t)AV_TIME_BASE *
1169 ist->st->codec.frame_rate_base) /
1170 ist->st->codec.frame_rate;
1171 }
1172 len = 0;
1173 break;
1174 default:
1175 goto fail_decode;
1176 }
1177 } else {
1178 switch(ist->st->codec.codec_type) {
1179 case CODEC_TYPE_AUDIO:
1180 ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec.frame_size) /
1181 (ist->st->codec.sample_rate * ist->st->codec.channels);
1182 break;
1183 case CODEC_TYPE_VIDEO:
1184 if (ist->st->codec.frame_rate_base != 0) {
1185 ist->next_pts += ((int64_t)AV_TIME_BASE *
1186 ist->st->codec.frame_rate_base) /
1187 ist->st->codec.frame_rate;
1188 }
1189 break;
1190 }
1191 data_buf = ptr;
1192 data_size = len;
1193 ret = len;
1194 len = 0;
1195 }
1196
1197 buffer_to_free = NULL;
1198 if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO) {
1199 pre_process_video_frame(ist, (AVPicture *)&picture,
1200 &buffer_to_free);
1201 }
1202
1203 /* frame rate emulation */
1204 if (ist->st->codec.rate_emu) {
1205 int64_t pts = av_rescale((int64_t) ist->frame * ist->st->codec.frame_rate_base, 1000000, ist->st->codec.frame_rate);
1206 int64_t now = av_gettime() - ist->start;
1207 if (pts > now)
1208 usleep(pts - now);
1209
1210 ist->frame++;
1211 }
1212
1213 #if 0
1214 /* mpeg PTS deordering : if it is a P or I frame, the PTS
1215 is the one of the next displayed one */
1216 /* XXX: add mpeg4 too ? */
1217 if (ist->st->codec.codec_id == CODEC_ID_MPEG1VIDEO) {
1218 if (ist->st->codec.pict_type != B_TYPE) {
1219 int64_t tmp;
1220 tmp = ist->last_ip_pts;
1221 ist->last_ip_pts = ist->frac_pts.val;
1222 ist->frac_pts.val = tmp;
1223 }
1224 }
1225 #endif
1226 /* if output time reached then transcode raw format,
1227 encode packets and output them */
1228 if (start_time == 0 || ist->pts >= start_time)
1229 for(i=0;i<nb_ostreams;i++) {
1230 int frame_size;
1231
1232 ost = ost_table[i];
1233 if (ost->source_index == ist_index) {
1234 os = output_files[ost->file_index];
1235
1236 #if 0
1237 printf("%d: got pts=%0.3f %0.3f\n", i,
1238 (double)pkt->pts / AV_TIME_BASE,
1239 ((double)ist->pts / AV_TIME_BASE) -
1240 ((double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den));
1241 #endif
1242 /* set the input output pts pairs */
1243 ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index])/ AV_TIME_BASE;
1244
1245 if (ost->encoding_needed) {
1246 switch(ost->st->codec.codec_type) {
1247 case CODEC_TYPE_AUDIO:
1248 do_audio_out(os, ost, ist, data_buf, data_size);
1249 break;
1250 case CODEC_TYPE_VIDEO:
1251 do_video_out(os, ost, ist, &picture, &frame_size);
1252 video_size += frame_size;
1253 if (do_vstats && frame_size)
1254 do_video_stats(os, ost, frame_size);
1255 break;
1256 default:
1257 av_abort();
1258 }
1259 } else {
1260 AVFrame avframe; //FIXME/XXX remove this
1261 AVPacket opkt;
1262 av_init_packet(&opkt);
1263
1264 /* no reencoding needed : output the packet directly */
1265 /* force the input stream PTS */
1266
1267 avcodec_get_frame_defaults(&avframe);
1268 ost->st->codec.coded_frame= &avframe;
1269 avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
1270
1271 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO)
1272 audio_size += data_size;
1273 else if (ost->st->codec.codec_type == CODEC_TYPE_VIDEO) {
1274 video_size += data_size;
1275 ost->sync_opts++;
1276 }
1277
1278 opkt.stream_index= ost->index;
1279 opkt.data= data_buf;
1280 opkt.size= data_size;
1281 if(pkt->pts != AV_NOPTS_VALUE)
1282 opkt.pts= pkt->pts + input_files_ts_offset[ist->file_index];
1283 else
1284 opkt.pts= AV_NOPTS_VALUE;
1285 opkt.dts= pkt->dts + input_files_ts_offset[ist->file_index];
1286 opkt.flags= pkt->flags;
1287
1288 av_interleaved_write_frame(os, &opkt);
1289 ost->st->codec.frame_number++;
1290 ost->frame_number++;
1291 }
1292 }
1293 }
1294 av_free(buffer_to_free);
1295 }
1296 discard_packet:
1297 if (pkt == NULL) {
1298 /* EOF handling */
1299
1300 for(i=0;i<nb_ostreams;i++) {
1301 ost = ost_table[i];
1302 if (ost->source_index == ist_index) {
1303 AVCodecContext *enc= &ost->st->codec;
1304 os = output_files[ost->file_index];
1305
1306 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO && enc->frame_size <=1)
1307 continue;
1308 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
1309 continue;
1310
1311 if (ost->encoding_needed) {
1312 for(;;) {
1313 AVPacket pkt;
1314 av_init_packet(&pkt);
1315 pkt.stream_index= ost->index;
1316
1317 switch(ost->st->codec.codec_type) {
1318 case CODEC_TYPE_AUDIO:
1319 ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
1320 audio_size += ret;
1321 pkt.flags |= PKT_FLAG_KEY;
1322 break;
1323 case CODEC_TYPE_VIDEO:
1324 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
1325 video_size += ret;
1326 if(enc->coded_frame && enc->coded_frame->key_frame)
1327 pkt.flags |= PKT_FLAG_KEY;
1328 if (ost->logfile && enc->stats_out) {
1329 fprintf(ost->logfile, "%s", enc->stats_out);
1330 }
1331 break;
1332 default:
1333 ret=-1;
1334 }
1335
1336 if(ret<=0)
1337 break;
1338 pkt.data= bit_buffer;
1339 pkt.size= ret;
1340 if(enc->coded_frame)
1341 pkt.pts= enc->coded_frame->pts;
1342 av_interleaved_write_frame(os, &pkt);
1343 }
1344 }
1345 }
1346 }
1347 }
1348
1349 return 0;
1350 fail_decode:
1351 return -1;
1352 }
1353
1354
1355 /*
1356 * The following code is the main loop of the file converter
1357 */
1358 static int av_encode(AVFormatContext **output_files,
1359 int nb_output_files,
1360 AVFormatContext **input_files,
1361 int nb_input_files,
1362 AVStreamMap *stream_maps, int nb_stream_maps)
1363 {
1364 int ret, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
1365 AVFormatContext *is, *os;
1366 AVCodecContext *codec, *icodec;
1367 AVOutputStream *ost, **ost_table = NULL;
1368 AVInputStream *ist, **ist_table = NULL;
1369 AVInputFile *file_table;
1370 AVFormatContext *stream_no_data;
1371 int key;
1372
1373 file_table= (AVInputFile*) av_mallocz(nb_input_files * sizeof(AVInputFile));
1374 if (!file_table)
1375 goto fail;
1376
1377 /* input stream init */
1378 j = 0;
1379 for(i=0;i<nb_input_files;i++) {
1380 is = input_files[i];
1381 file_table[i].ist_index = j;
1382 file_table[i].nb_streams = is->nb_streams;
1383 j += is->nb_streams;
1384 }
1385 nb_istreams = j;
1386
1387 ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
1388 if (!ist_table)
1389 goto fail;
1390
1391 for(i=0;i<nb_istreams;i++) {
1392 ist = av_mallocz(sizeof(AVInputStream));
1393 if (!ist)
1394 goto fail;
1395 ist_table[i] = ist;
1396 }
1397 j = 0;
1398 for(i=0;i<nb_input_files;i++) {
1399 is = input_files[i];
1400 for(k=0;k<is->nb_streams;k++) {
1401 ist = ist_table[j++];
1402 ist->st = is->streams[k];
1403 ist->file_index = i;
1404 ist->index = k;
1405 ist->discard = 1; /* the stream is discarded by default
1406 (changed later) */
1407
1408 if (ist->st->codec.rate_emu) {
1409 ist->start = av_gettime();
1410 ist->frame = 0;
1411 }
1412 }
1413 }
1414
1415 /* output stream init */
1416 nb_ostreams = 0;
1417 for(i=0;i<nb_output_files;i++) {
1418 os = output_files[i];
1419 nb_ostreams += os->nb_streams;
1420 }
1421 if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
1422 fprintf(stderr, "Number of stream maps must match number of output streams\n");
1423 exit(1);
1424 }
1425
1426 /* Sanity check the mapping args -- do the input files & streams exist? */
1427 for(i=0;i<nb_stream_maps;i++) {
1428 int fi = stream_maps[i].file_index;
1429 int si = stream_maps[i].stream_index;
1430
1431 if (fi < 0 || fi > nb_input_files - 1 ||
1432 si < 0 || si > file_table[fi].nb_streams - 1) {
1433 fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
1434 exit(1);
1435 }
1436 }
1437
1438 ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
1439 if (!ost_table)
1440 goto fail;
1441 for(i=0;i<nb_ostreams;i++) {
1442 ost = av_mallocz(sizeof(AVOutputStream));
1443 if (!ost)
1444 goto fail;
1445 ost_table[i] = ost;
1446 }
1447
1448 n = 0;
1449 for(k=0;k<nb_output_files;k++) {
1450 os = output_files[k];
1451 for(i=0;i<os->nb_streams;i++) {
1452 int found;
1453 ost = ost_table[n++];
1454 ost->file_index = k;
1455 ost->index = i;
1456 ost->st = os->streams[i];
1457 if (nb_stream_maps > 0) {
1458 ost->source_index = file_table[stream_maps[n-1].file_index].ist_index +
1459 stream_maps[n-1].stream_index;
1460
1461 /* Sanity check that the stream types match */
1462 if (ist_table[ost->source_index]->st->codec.codec_type != ost->st->codec.codec_type) {
1463 fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
1464 stream_maps[n-1].file_index, stream_maps[n-1].stream_index,
1465 ost->file_index, ost->index);
1466 exit(1);
1467 }
1468
1469 } else {
1470 /* get corresponding input stream index : we select the first one with the right type */
1471 found = 0;
1472 for(j=0;j<nb_istreams;j++) {
1473 ist = ist_table[j];
1474 if (ist->discard &&
1475 ist->st->codec.codec_type == ost->st->codec.codec_type) {
1476 ost->source_index = j;
1477 found = 1;
1478 break;
1479 }
1480 }
1481
1482 if (!found) {
1483 /* try again and reuse existing stream */
1484 for(j=0;j<nb_istreams;j++) {
1485 ist = ist_table[j];
1486 if (ist->st->codec.codec_type == ost->st->codec.codec_type) {
1487 ost->source_index = j;
1488 found = 1;
1489 }
1490 }
1491 if (!found) {
1492 fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
1493 ost->file_index, ost->index);
1494 exit(1);
1495 }
1496 }
1497 }
1498 ist = ist_table[ost->source_index];
1499 ist->discard = 0;
1500 }
1501 }
1502
1503 /* for each output stream, we compute the right encoding parameters */
1504 for(i=0;i<nb_ostreams;i++) {
1505 ost = ost_table[i];
1506 ist = ist_table[ost->source_index];
1507
1508 codec = &ost->st->codec;
1509 icodec = &ist->st->codec;
1510
1511 if (ost->st->stream_copy) {
1512 /* if stream_copy is selected, no need to decode or encode */
1513 codec->codec_id = icodec->codec_id;
1514 codec->codec_type = icodec->codec_type;
1515 if(!codec->codec_tag) codec->codec_tag = icodec->codec_tag;
1516 codec->bit_rate = icodec->bit_rate;
1517 switch(codec->codec_type) {
1518 case CODEC_TYPE_AUDIO:
1519 codec->sample_rate = icodec->sample_rate;
1520 codec->channels = icodec->channels;
1521 codec->frame_size = icodec->frame_size;
1522 break;
1523 case CODEC_TYPE_VIDEO:
1524 codec->frame_rate = icodec->frame_rate;
1525 codec->frame_rate_base = icodec->frame_rate_base;
1526 codec->width = icodec->width;
1527 codec->height = icodec->height;
1528 break;
1529 default:
1530 av_abort();
1531 }
1532 } else {
1533 switch(codec->codec_type) {
1534 case CODEC_TYPE_AUDIO:
1535 if (fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE))
1536 goto fail;
1537
1538 if (codec->channels == icodec->channels &&
1539 codec->sample_rate == icodec->sample_rate) {
1540 ost->audio_resample = 0;
1541 } else {
1542 if (codec->channels != icodec->channels &&
1543 (icodec->codec_id == CODEC_ID_AC3 ||
1544 icodec->codec_id == CODEC_ID_DTS)) {
1545 /* Special case for 5:1 AC3 and DTS input */
1546 /* and mono or stereo output */
1547 /* Request specific number of channels */
1548 icodec->channels = codec->channels;
1549 if (codec->sample_rate == icodec->sample_rate)
1550 ost->audio_resample = 0;
1551 else {
1552 ost->audio_resample = 1;
1553 }
1554 } else {
1555 ost->audio_resample = 1;
1556 }
1557 }
1558 if(audio_sync_method>1)
1559 ost->audio_resample = 1;
1560
1561 if(ost->audio_resample){
1562 ost->resample = audio_resample_init(codec->channels, icodec->channels,
1563 codec->sample_rate, icodec->sample_rate);
1564 if(!ost->resample){
1565 printf("Can't resample. Aborting.\n");
1566 av_abort();
1567 }
1568 }
1569 ist->decoding_needed = 1;
1570 ost->encoding_needed = 1;
1571 break;
1572 case CODEC_TYPE_VIDEO:
1573 if (codec->width == icodec->width &&
1574 codec->height == icodec->height &&
1575 frame_topBand == 0 &&
1576 frame_bottomBand == 0 &&
1577 frame_leftBand == 0 &&
1578 frame_rightBand == 0 &&
1579 frame_padtop == 0 &&
1580 frame_padbottom == 0 &&
1581 frame_padleft == 0 &&
1582 frame_padright == 0)
1583 {
1584 ost->video_resample = 0;
1585 ost->video_crop = 0;
1586 ost->video_pad = 0;
1587 } else if ((codec->width == icodec->width -
1588 (frame_leftBand + frame_rightBand)) &&
1589 (codec->height == icodec->height -
1590 (frame_topBand + frame_bottomBand)))
1591 {
1592 ost->video_resample = 0;
1593 ost->video_crop = 1;
1594 ost->topBand = frame_topBand;
1595 ost->leftBand = frame_leftBand;
1596 } else if ((codec->width == icodec->width +
1597 (frame_padleft + frame_padright)) &&
1598 (codec->height == icodec->height +
1599 (frame_padtop + frame_padbottom))) {
1600 ost->video_resample = 0;
1601 ost->video_crop = 0;
1602 ost->video_pad = 1;
1603 ost->padtop = frame_padtop;
1604 ost->padleft = frame_padleft;
1605 ost->padbottom = frame_padbottom;
1606 ost->padright = frame_padright;
1607 avcodec_get_frame_defaults(&ost->pict_tmp);
1608 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1609 codec->width, codec->height ) )
1610 goto fail;
1611 } else {
1612 ost->video_resample = 1;
1613 ost->video_crop = 0; // cropping is handled as part of resample
1614 avcodec_get_frame_defaults(&ost->pict_tmp);
1615 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1616 codec->width, codec->height ) )
1617 goto fail;
1618
1619 ost->img_resample_ctx = img_resample_full_init(
1620 ost->st->codec.width, ost->st->codec.height,
1621 ist->st->codec.width, ist->st->codec.height,
1622 frame_topBand, frame_bottomBand,
1623 frame_leftBand, frame_rightBand,
1624 frame_padtop, frame_padbottom,
1625 frame_padleft, frame_padright);
1626
1627 ost->padtop = frame_padtop;
1628 ost->padleft = frame_padleft;
1629 ost->padbottom = frame_padbottom;
1630 ost->padright = frame_padright;
1631
1632 }
1633 ost->encoding_needed = 1;
1634 ist->decoding_needed = 1;
1635 break;
1636 default:
1637 av_abort();
1638 }
1639 /* two pass mode */
1640 if (ost->encoding_needed &&
1641 (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1642 char logfilename[1024];
1643 FILE *f;
1644 int size;
1645 char *logbuffer;
1646
1647 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1648 pass_logfilename ?
1649 pass_logfilename : DEFAULT_PASS_LOGFILENAME, i);
1650 if (codec->flags & CODEC_FLAG_PASS1) {
1651 f = fopen(logfilename, "w");
1652 if (!f) {
1653 perror(logfilename);
1654 exit(1);
1655 }
1656 ost->logfile = f;
1657 } else {
1658 /* read the log file */
1659 f = fopen(logfilename, "r");
1660 if (!f) {
1661 perror(logfilename);
1662 exit(1);
1663 }
1664 fseek(f, 0, SEEK_END);
1665 size = ftell(f);
1666 fseek(f, 0, SEEK_SET);
1667 logbuffer = av_malloc(size + 1);
1668 if (!logbuffer) {
1669 fprintf(stderr, "Could not allocate log buffer\n");
1670 exit(1);
1671 }
1672 size = fread(logbuffer, 1, size, f);
1673 fclose(f);
1674 logbuffer[size] = '\0';
1675 codec->stats_in = logbuffer;
1676 }
1677 }
1678 }
1679 if(codec->codec_type == CODEC_TYPE_VIDEO){
1680 int size= codec->width * codec->height;
1681 bit_buffer_size= FFMAX(bit_buffer_size, 4*size);
1682 }
1683 }
1684
1685 if (!bit_buffer)
1686 bit_buffer = av_malloc(bit_buffer_size);
1687 if (!bit_buffer)
1688 goto fail;
1689
1690 /* dump the file output parameters - cannot be done before in case
1691 of stream copy */
1692 for(i=0;i<nb_output_files;i++) {
1693 dump_format(output_files[i], i, output_files[i]->filename, 1);
1694 }
1695
1696 /* dump the stream mapping */
1697 if (verbose >= 0) {
1698 fprintf(stderr, "Stream mapping:\n");
1699 for(i=0;i<nb_ostreams;i++) {
1700 ost = ost_table[i];
1701 fprintf(stderr, " Stream #%d.%d -> #%d.%d\n",
1702 ist_table[ost->source_index]->file_index,
1703 ist_table[ost->source_index]->index,
1704 ost->file_index,
1705 ost->index);
1706 }
1707 }
1708
1709 /* open each encoder */
1710 for(i=0;i<nb_ostreams;i++) {
1711 ost = ost_table[i];
1712 if (ost->encoding_needed) {
1713 AVCodec *codec;
1714 codec = avcodec_find_encoder(ost->st->codec.codec_id);
1715 if (!codec) {
1716 fprintf(stderr, "Unsupported codec for output stream #%d.%d\n",
1717 ost->file_index, ost->index);
1718 exit(1);
1719 }
1720 if (avcodec_open(&ost->st->codec, codec) < 0) {
1721 fprintf(stderr, "Error while opening codec for stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height\n",
1722 ost->file_index, ost->index);
1723 exit(1);
1724 }
1725 extra_size += ost->st->codec.extradata_size;
1726 }
1727 }
1728
1729 /* open each decoder */
1730 for(i=0;i<nb_istreams;i++) {
1731 ist = ist_table[i];
1732 if (ist->decoding_needed) {
1733 AVCodec *codec;
1734 codec = avcodec_find_decoder(ist->st->codec.codec_id);
1735 if (!codec) {
1736 fprintf(stderr, "Unsupported codec (id=%d) for input stream #%d.%d\n",
1737 ist->st->codec.codec_id, ist->file_index, ist->index);
1738 exit(1);
1739 }
1740 if (avcodec_open(&ist->st->codec, codec) < 0) {
1741 fprintf(stderr, "Error while opening codec for input stream #%d.%d\n",
1742 ist->file_index, ist->index);
1743 exit(1);
1744 }
1745 //if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO)
1746 // ist->st->codec.flags |= CODEC_FLAG_REPEAT_FIELD;
1747 }
1748 }
1749
1750 /* init pts */
1751 for(i=0;i<nb_istreams;i++) {
1752 ist = ist_table[i];
1753 is = input_files[ist->file_index];
1754 ist->pts = 0;
1755 ist->next_pts = ist->st->start_time;
1756 if(ist->next_pts == AV_NOPTS_VALUE)
1757 ist->next_pts=0;
1758 ist->is_start = 1;
1759 }
1760
1761 /* compute buffer size max (should use a complete heuristic) */
1762 for(i=0;i<nb_input_files;i++) {
1763 file_table[i].buffer_size_max = 2048;
1764 }
1765
1766 /* set meta data information from input file if required */
1767 for (i=0;i<nb_meta_data_maps;i++) {
1768 AVFormatContext *out_file;
1769 AVFormatContext *in_file;
1770
1771 int out_file_index = meta_data_maps[i].out_file;
1772 int in_file_index = meta_data_maps[i].in_file;
1773 if ( out_file_index < 0 || out_file_index >= nb_output_files ) {
1774 fprintf(stderr, "Invalid output file index %d map_meta_data(%d,%d)\n", out_file_index, out_file_index, in_file_index);
1775 ret = -EINVAL;
1776 goto fail;
1777 }
1778 if ( in_file_index < 0 || in_file_index >= nb_input_files ) {
1779 fprintf(stderr, "Invalid input file index %d map_meta_data(%d,%d)\n", in_file_index, out_file_index, in_file_index);
1780 ret = -EINVAL;
1781 goto fail;
1782 }
1783
1784 out_file = output_files[out_file_index];
1785 in_file = input_files[in_file_index];
1786
1787 strcpy(out_file->title, in_file->title);
1788 strcpy(out_file->author, in_file->author);
1789 strcpy(out_file->copyright, in_file->copyright);
1790 strcpy(out_file->comment, in_file->comment);
1791 strcpy(out_file->album, in_file->album);
1792 out_file->year = in_file->year;
1793 out_file->track = in_file->track;
1794 strcpy(out_file->genre, in_file->genre);
1795 }
1796
1797 /* open files and write file headers */
1798 for(i=0;i<nb_output_files;i++) {
1799 os = output_files[i];
1800 if (av_write_header(os) < 0) {
1801 fprintf(stderr, "Could not write header for output file #%d (incorrect codec parameters ?)\n", i);
1802 ret = -EINVAL;
1803 goto fail;
1804 }
1805 }
1806
1807 #ifndef CONFIG_WIN32
1808 if ( !using_stdin && verbose >= 0) {
1809 fprintf(stderr, "Press [q] to stop encoding\n");
1810 url_set_interrupt_cb(decode_interrupt_cb);
1811 }
1812 #endif
1813 term_init();
1814
1815 stream_no_data = 0;
1816 key = -1;
1817
1818 for(; received_sigterm == 0;) {
1819 int file_index, ist_index;
1820 AVPacket pkt;
1821 double ipts_min;
1822 double opts_min;
1823
1824 redo:
1825 ipts_min= 1e100;
1826 opts_min= 1e100;
1827 /* if 'q' pressed, exits */
1828 if (!using_stdin) {
1829 if (q_pressed)
1830 break;
1831 /* read_key() returns 0 on EOF */
1832 key = read_key();
1833 if (key == 'q')
1834 break;
1835 }
1836
1837 /* select the stream that we must read now by looking at the
1838 smallest output pts */
1839 file_index = -1;
1840 for(i=0;i<nb_ostreams;i++) {
1841 double ipts, opts;
1842 ost = ost_table[i];
1843 os = output_files[ost->file_index];
1844 ist = ist_table[ost->source_index];
1845 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO)
1846 opts = (double)ost->sync_opts * ost->st->codec.frame_rate_base / ost->st->codec.frame_rate;
1847 else
1848 opts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den;
1849 ipts = (double)ist->pts;
1850 if (!file_table[ist->file_index].eof_reached){
1851 if(ipts < ipts_min) {
1852 ipts_min = ipts;
1853 if(input_sync ) file_index = ist->file_index;
1854 }
1855 if(opts < opts_min) {
1856 opts_min = opts;
1857 if(!input_sync) file_index = ist->file_index;
1858 }
1859 }
1860 if(ost->frame_number >= max_frames[ost->st->codec.codec_type]){
1861 file_index= -1;
1862 break;
1863 }
1864 }
1865 /* if none, if is finished */
1866 if (file_index < 0) {
1867 break;
1868 }
1869
1870 /* finish if recording time exhausted */
1871 if (recording_time > 0 && opts_min >= (recording_time / 1000000.0))
1872 break;
1873
1874 /* read a frame from it and output it in the fifo */
1875 is = input_files[file_index];
1876 if (av_read_frame(is, &pkt) < 0) {
1877 file_table[file_index].eof_reached = 1;
1878 continue;
1879 }
1880
1881 if (!pkt.size) {
1882 stream_no_data = is;
1883 } else {
1884 stream_no_data = 0;
1885 }
1886 if (do_pkt_dump) {
1887 av_pkt_dump(stdout, &pkt, do_hex_dump);
1888 }
1889 /* the following test is needed in case new streams appear
1890 dynamically in stream : we ignore them */
1891 if (pkt.stream_index >= file_table[file_index].nb_streams)
1892 goto discard_packet;
1893 ist_index = file_table[file_index].ist_index + pkt.stream_index;
1894 ist = ist_table[ist_index];
1895 if (ist->discard)
1896 goto discard_packet;
1897
1898 // fprintf(stderr, "next:%lld dts:%lld off:%lld %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec.codec_type);
1899 if (pkt.dts != AV_NOPTS_VALUE) {
1900 int64_t delta= pkt.dts - ist->next_pts;
1901 if(ABS(delta) > 10LL*AV_TIME_BASE && !copy_ts){
1902 input_files_ts_offset[ist->file_index]-= delta;
1903 if (verbose > 2)
1904 fprintf(stderr, "timestamp discontinuity %lld, new offset= %lld\n", delta, input_files_ts_offset[ist->file_index]);
1905 for(i=0; i<file_table[file_index].nb_streams; i++){
1906 int index= file_table[file_index].ist_index + i;
1907 ist_table[index]->next_pts += delta;
1908 ist_table[index]->is_start=1;
1909 }
1910 }
1911 }
1912
1913 //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
1914 if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
1915
1916 if (verbose >= 0)
1917 fprintf(stderr, "Error while decoding stream #%d.%d\n",
1918 ist->file_index, ist->index);
1919
1920 av_free_packet(&pkt);
1921 goto redo;
1922 }
1923
1924 discard_packet:
1925 av_free_packet(&pkt);
1926
1927 /* dump report by using the output first video and audio streams */
1928 print_report(output_files, ost_table, nb_ostreams, 0);
1929 }
1930
1931 /* at the end of stream, we must flush the decoder buffers */
1932 for(i=0;i<nb_istreams;i++) {
1933 ist = ist_table[i];
1934 if (ist->decoding_needed) {
1935 output_packet(ist, i, ost_table, nb_ostreams, NULL);
1936 }
1937 }
1938
1939 term_exit();
1940
1941 /* write the trailer if needed and close file */
1942 for(i=0;i<nb_output_files;i++) {
1943 os = output_files[i];
1944 av_write_trailer(os);
1945 }
1946
1947 /* dump report by using the first video and audio streams */
1948 print_report(output_files, ost_table, nb_ostreams, 1);
1949
1950 /* close each encoder */
1951 for(i=0;i<nb_ostreams;i++) {
1952 ost = ost_table[i];
1953 if (ost->encoding_needed) {
1954 av_freep(&ost->st->codec.stats_in);
1955 avcodec_close(&ost->st->codec);
1956 }
1957 }
1958
1959 /* close each decoder */
1960 for(i=0;i<nb_istreams;i++) {
1961 ist = ist_table[i];
1962 if (ist->decoding_needed) {
1963 avcodec_close(&ist->st->codec);
1964 }
1965 }
1966
1967 /* finished ! */
1968
1969 ret = 0;
1970 fail1:
1971 av_free(file_table);
1972
1973 if (ist_table) {
1974 for(i=0;i<nb_istreams;i++) {
1975 ist = ist_table[i];
1976 av_free(ist);
1977 }
1978 av_free(ist_table);
1979 }
1980 if (ost_table) {
1981 for(i=0;i<nb_ostreams;i++) {
1982 ost = ost_table[i];
1983 if (ost) {
1984 if (ost->logfile) {
1985 fclose(ost->logfile);
1986 ost->logfile = NULL;
1987 }
1988 fifo_free(&ost->fifo); /* works even if fifo is not
1989 initialized but set to zero */
1990 av_free(ost->pict_tmp.data[0]);
1991 if (ost->video_resample)
1992 img_resample_close(ost->img_resample_ctx);
1993 if (ost->audio_resample)
1994 audio_resample_close(ost->resample);
1995 av_free(ost);
1996 }
1997 }
1998 av_free(ost_table);
1999 }
2000 return ret;
2001 fail:
2002 ret = -ENOMEM;
2003 goto fail1;
2004 }
2005
2006 #if 0
2007 int file_read(const char *filename)
2008 {
2009 URLContext *h;
2010 unsigned char buffer[1024];
2011 int len, i;
2012
2013 if (url_open(&h, filename, O_RDONLY) < 0) {
2014 printf("could not open '%s'\n", filename);
2015 return -1;
2016 }
2017 for(;;) {
2018 len = url_read(h, buffer, sizeof(buffer));
2019 if (len <= 0)
2020 break;
2021 for(i=0;i<len;i++) putchar(buffer[i]);
2022 }
2023 url_close(h);
2024 return 0;
2025 }
2026 #endif
2027
2028 static void opt_image_format(const char *arg)
2029 {
2030 AVImageFormat *f;
2031
2032 for(f = first_image_format; f != NULL; f = f->next) {
2033 if (!strcmp(arg, f->name))
2034 break;
2035 }
2036 if (!f) {
2037 fprintf(stderr, "Unknown image format: '%s'\n", arg);
2038 exit(1);
2039 }
2040 image_format = f;
2041 }
2042
2043 static void opt_format(const char *arg)
2044 {
2045 /* compatibility stuff for pgmyuv */
2046 if (!strcmp(arg, "pgmyuv")) {
2047 pgmyuv_compatibility_hack=1;
2048 // opt_image_format(arg);
2049 arg = "image2";
2050 }
2051
2052 file_iformat = av_find_input_format(arg);
2053 file_oformat = guess_format(arg, NULL, NULL);
2054 if (!file_iformat && !file_oformat) {
2055 fprintf(stderr, "Unknown input or output format: %s\n", arg);
2056 exit(1);
2057 }
2058 }
2059
2060 static void opt_video_bitrate(const char *arg)
2061 {
2062 video_bit_rate = atoi(arg) * 1000;
2063 }
2064
2065 static void opt_video_bitrate_tolerance(const char *arg)
2066 {
2067 video_bit_rate_tolerance = atoi(arg) * 1000;
2068 }
2069
2070 static void opt_video_bitrate_max(const char *arg)
2071 {
2072 video_rc_max_rate = atoi(arg) * 1000;
2073 }
2074
2075 static void opt_video_bitrate_min(const char *arg)
2076 {
2077 video_rc_min_rate = atoi(arg) * 1000;
2078 }
2079
2080 static void opt_video_buffer_size(const char *arg)
2081 {
2082 video_rc_buffer_size = atoi(arg) * 8*1024;
2083 }
2084
2085 static void opt_video_rc_eq(char *arg)
2086 {
2087 video_rc_eq = arg;
2088 }
2089
2090 static void opt_video_rc_override_string(char *arg)
2091 {
2092 video_rc_override_string = arg;
2093 }
2094
2095
2096 static void opt_workaround_bugs(const char *arg)
2097 {
2098 workaround_bugs = atoi(arg);
2099 }
2100
2101 static void opt_dct_algo(const char *arg)
2102 {
2103 dct_algo = atoi(arg);
2104 }
2105
2106 static void opt_idct_algo(const char *arg)
2107 {
2108 idct_algo = atoi(arg);
2109 }
2110
2111 static void opt_me_threshold(const char *arg)
2112 {
2113 me_threshold = atoi(arg);
2114 }
2115
2116 static void opt_mb_threshold(const char *arg)
2117 {
2118 mb_threshold = atoi(arg);
2119 }
2120
2121 static void opt_error_resilience(const char *arg)
2122 {
2123 error_resilience = atoi(arg);
2124 }
2125
2126 static void opt_error_concealment(const char *arg)
2127 {
2128 error_concealment = atoi(arg);
2129 }
2130
2131 static void opt_debug(const char *arg)
2132 {
2133 debug = atoi(arg);
2134 }
2135
2136 static void opt_vismv(const char *arg)
2137 {
2138 debug_mv = atoi(arg);
2139 }
2140
2141 static void opt_verbose(const char *arg)
2142 {
2143 verbose = atoi(arg);
2144 av_log_set_level(atoi(arg));
2145 }
2146
2147 static void opt_frame_rate(const char *arg)
2148 {
2149 if (parse_frame_rate(&frame_rate, &frame_rate_base, arg) < 0) {
2150 fprintf(stderr, "Incorrect frame rate\n");
2151 exit(1);
2152 }
2153 }
2154
2155 static void opt_frame_crop_top(const char *arg)
2156 {
2157 frame_topBand = atoi(arg);
2158 if (frame_topBand < 0) {
2159 fprintf(stderr, "Incorrect top crop size\n");
2160 exit(1);
2161 }
2162 if ((frame_topBand % 2) != 0) {
2163 fprintf(stderr, "Top crop size must be a multiple of 2\n");
2164 exit(1);
2165 }
2166 if ((frame_topBand) >= frame_height){
2167 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2168 exit(1);
2169 }
2170 frame_height -= frame_topBand;
2171 }
2172
2173 static void opt_frame_crop_bottom(const char *arg)
2174 {
2175 frame_bottomBand = atoi(arg);
2176 if (frame_bottomBand < 0) {
2177 fprintf(stderr, "Incorrect bottom crop size\n");
2178 exit(1);
2179 }
2180 if ((frame_bottomBand % 2) != 0) {
2181 fprintf(stderr, "Bottom crop size must be a multiple of 2\n");
2182 exit(1);
2183 }
2184 if ((frame_bottomBand) >= frame_height){
2185 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2186 exit(1);
2187 }
2188 frame_height -= frame_bottomBand;
2189 }
2190
2191 static void opt_frame_crop_left(const char *arg)
2192 {
2193 frame_leftBand = atoi(arg);
2194 if (frame_leftBand < 0) {
2195 fprintf(stderr, "Incorrect left crop size\n");
2196 exit(1);
2197 }
2198 if ((frame_leftBand % 2) != 0) {
2199 fprintf(stderr, "Left crop size must be a multiple of 2\n");
2200 exit(1);
2201 }
2202 if ((frame_leftBand) >= frame_width){
2203 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2204 exit(1);
2205 }
2206 frame_width -= frame_leftBand;
2207 }
2208
2209 static void opt_frame_crop_right(const char *arg)
2210 {
2211 frame_rightBand = atoi(arg);
2212 if (frame_rightBand < 0) {
2213 fprintf(stderr, "Incorrect right crop size\n");
2214 exit(1);
2215 }
2216 if ((frame_rightBand % 2) != 0) {
2217 fprintf(stderr, "Right crop size must be a multiple of 2\n");
2218 exit(1);
2219 }
2220 if ((frame_rightBand) >= frame_width){
2221 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2222 exit(1);
2223 }
2224 frame_width -= frame_rightBand;
2225 }
2226
2227 static void opt_frame_size(const char *arg)
2228 {
2229 if (parse_image_size(&frame_width, &frame_height, arg) < 0) {
2230 fprintf(stderr, "Incorrect frame size\n");
2231 exit(1);
2232 }
2233 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2234 fprintf(stderr, "Frame size must be a multiple of 2\n");
2235 exit(1);
2236 }
2237 }
2238
2239
2240 #define SCALEBITS 10
2241 #define ONE_HALF (1 << (SCALEBITS - 1))
2242 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
2243
2244 #define RGB_TO_Y(r, g, b) \
2245 ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
2246 FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
2247
2248 #define RGB_TO_U(r1, g1, b1, shift)\
2249 (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
2250 FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2251
2252 #define RGB_TO_V(r1, g1, b1, shift)\
2253 (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
2254 FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2255
2256 static void opt_pad_color(const char *arg) {
2257 /* Input is expected to be six hex digits similar to
2258 how colors are expressed in html tags (but without the #) */
2259 int rgb = strtol(arg, NULL, 16);
2260 int r,g,b;
2261
2262 r = (rgb >> 16);
2263 g = ((rgb >> 8) & 255);
2264 b = (rgb & 255);
2265
2266 padcolor[0] = RGB_TO_Y(r,g,b);
2267 padcolor[1] = RGB_TO_U(r,g,b,0);
2268 padcolor[2] = RGB_TO_V(r,g,b,0);
2269 }
2270
2271 static void opt_frame_pad_top(const char *arg)
2272 {
2273 frame_padtop = atoi(arg);
2274 if (frame_padtop < 0) {
2275 fprintf(stderr, "Incorrect top pad size\n");
2276 exit(1);
2277 }
2278 if ((frame_padtop % 2) != 0) {
2279 fprintf(stderr, "Top pad size must be a multiple of 2\n");
2280 exit(1);
2281 }
2282 }
2283
2284 static void opt_frame_pad_bottom(const char *arg)
2285 {
2286 frame_padbottom = atoi(arg);
2287 if (frame_padbottom < 0) {
2288 fprintf(stderr, "Incorrect bottom pad size\n");
2289 exit(1);
2290 }
2291 if ((frame_padbottom % 2) != 0) {
2292 fprintf(stderr, "Bottom pad size must be a multiple of 2\n");
2293 exit(1);
2294 }
2295 }
2296
2297
2298 static void opt_frame_pad_left(const char *arg)
2299 {
2300 frame_padleft = atoi(arg);
2301 if (frame_padleft < 0) {
2302 fprintf(stderr, "Incorrect left pad size\n");
2303 exit(1);
2304 }
2305 if ((frame_padleft % 2) != 0) {
2306 fprintf(stderr, "Left pad size must be a multiple of 2\n");
2307 exit(1);
2308 }
2309 }
2310
2311
2312 static void opt_frame_pad_right(const char *arg)
2313 {
2314 frame_padright = atoi(arg);
2315 if (frame_padright < 0) {
2316 fprintf(stderr, "Incorrect right pad size\n");
2317 exit(1);
2318 }
2319 if ((frame_padright % 2) != 0) {
2320 fprintf(stderr, "Right pad size must be a multiple of 2\n");
2321 exit(1);
2322 }
2323 }
2324
2325
2326 static void opt_frame_pix_fmt(const char *arg)
2327 {
2328 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2329 }
2330
2331 static void opt_frame_aspect_ratio(const char *arg)
2332 {
2333 int x = 0, y = 0;
2334 double ar = 0;
2335 const char *p;
2336
2337 p = strchr(arg, ':');
2338 if (p) {
2339 x = strtol(arg, (char **)&arg, 10);
2340 if (arg == p)
2341 y = strtol(arg+1, (char **)&arg, 10);
2342 if (x > 0 && y > 0)
2343 ar = (double)x / (double)y;
2344 } else
2345 ar = strtod(arg, (char **)&arg);
2346
2347 if (!ar) {
2348 fprintf(stderr, "Incorrect aspect ratio specification.\n");
2349 exit(1);
2350 }
2351 frame_aspect_ratio = ar;
2352 }
2353
2354 static void opt_gop_size(const char *arg)
2355 {
2356 gop_size = atoi(arg);
2357 }
2358
2359 static void opt_b_frames(const char *arg)
2360 {
2361 b_frames = atoi(arg);
2362 if (b_frames > FF_MAX_B_FRAMES) {
2363 fprintf(stderr, "\nCannot have more than %d B frames, increase FF_MAX_B_FRAMES.\n", FF_MAX_B_FRAMES);
2364 exit(1);
2365 } else if (b_frames < 1) {
2366 fprintf(stderr, "\nNumber of B frames must be higher than 0\n");
2367 exit(1);
2368 }
2369 }
2370
2371 static void opt_mb_decision(const char *arg)
2372 {
2373 mb_decision = atoi(arg);
2374 }
2375
2376 static void opt_mb_cmp(const char *arg)
2377 {
2378 mb_cmp = atoi(arg);
2379 }
2380
2381 static void opt_ildct_cmp(const char *arg)
2382 {
2383 ildct_cmp = atoi(arg);
2384 }
2385
2386 static void opt_sub_cmp(const char *arg)
2387 {
2388 sub_cmp = atoi(arg);
2389 }
2390
2391 static void opt_cmp(const char *arg)
2392 {
2393 cmp = atoi(arg);
2394 }
2395
2396 static void opt_pre_cmp(const char *arg)
2397 {
2398 pre_cmp = atoi(arg);
2399 }
2400
2401 static void opt_pre_me(const char *arg)
2402 {
2403 pre_me = atoi(arg);
2404 }
2405
2406 static void opt_lumi_mask(const char *arg)
2407 {
2408 lumi_mask = atof(arg);
2409 }
2410
2411 static void opt_dark_mask(const char *arg)
2412 {
2413 dark_mask = atof(arg);
2414 }
2415
2416 static void opt_scplx_mask(const char *arg)
2417 {
2418 scplx_mask = atof(arg);
2419 }
2420
2421 static void opt_tcplx_mask(const char *arg)
2422 {
2423 tcplx_mask = atof(arg);
2424 }
2425
2426 static void opt_p_mask(const char *arg)
2427 {
2428 p_mask = atof(arg);
2429 }
2430
2431 static void opt_qscale(const char *arg)
2432 {
2433 video_qscale = atof(arg);
2434 if (video_qscale < 0.01 ||
2435 video_qscale > 255) {
2436 fprintf(stderr, "qscale must be >= 0.01 and <= 255\n");
2437 exit(1);
2438 }
2439 }
2440
2441 static void opt_lmax(const char *arg)
2442 {
2443 video_lmax = atof(arg)*FF_QP2LAMBDA;
2444 }
2445
2446 static void opt_lmin(const char *arg)
2447 {
2448 video_lmin = atof(arg)*FF_QP2LAMBDA;
2449 }
2450
2451 static void opt_qmin(const char *arg)
2452 {
2453 video_qmin = atoi(arg);
2454 if (video_qmin < 0 ||
2455 video_qmin > 31) {
2456 fprintf(stderr, "qmin must be >= 1 and <= 31\n");
2457 exit(1);
2458 }
2459 }
2460
2461 static void opt_qmax(const char *arg)
2462 {
2463 video_qmax = atoi(arg);
2464 if (video_qmax < 0 ||
2465 video_qmax > 31) {
2466 fprintf(stderr, "qmax must be >= 1 and <= 31\n");
2467 exit(1);
2468 }
2469 }
2470
2471 static void opt_mb_qmin(const char *arg)
2472 {
2473 video_mb_qmin = atoi(arg);
2474 if (video_mb_qmin < 0 ||
2475 video_mb_qmin > 31) {
2476 fprintf(stderr, "qmin must be >= 1 and <= 31\n");
2477 exit(1);
2478 }
2479 }
2480
2481 static void opt_mb_qmax(const char *arg)
2482 {
2483 video_mb_qmax = atoi(arg);
2484 if (video_mb_qmax < 0 ||
2485 video_mb_qmax > 31) {
2486 fprintf(stderr, "qmax must be >= 1 and <= 31\n");
2487 exit(1);
2488 }
2489 }
2490
2491 static void opt_qdiff(const char *arg)
2492 {
2493 video_qdiff = atoi(arg);
2494 if (video_qdiff < 0 ||
2495 video_qdiff > 31) {
2496 fprintf(stderr, "qdiff must be >= 1 and <= 31\n");
2497 exit(1);
2498 }
2499 }
2500
2501 static void opt_qblur(const char *arg)
2502 {
2503 video_qblur = atof(arg);
2504 }
2505
2506 static void opt_qcomp(const char *arg)
2507 {
2508 video_qcomp = atof(arg);
2509 }
2510
2511 static void opt_rc_initial_cplx(const char *arg)
2512 {
2513 video_rc_initial_cplx = atof(arg);
2514 }
2515 static void opt_b_qfactor(const char *arg)
2516 {
2517 video_b_qfactor = atof(arg);
2518 }
2519 static void opt_i_qfactor(const char *arg)
2520 {
2521 video_i_qfactor = atof(arg);
2522 }
2523 static void opt_b_qoffset(const char *arg)
2524 {
2525 video_b_qoffset = atof(arg);
2526 }
2527 static void opt_i_qoffset(const char *arg)
2528 {
2529 video_i_qoffset = atof(arg);
2530 }
2531
2532 static void opt_ibias(const char *arg)
2533 {
2534 video_intra_quant_bias = atoi(arg);
2535 }
2536 static void opt_pbias(const char *arg)
2537 {
2538 video_inter_quant_bias = atoi(arg);
2539 }
2540
2541 static void opt_packet_size(const char *arg)
2542 {
2543 packet_size= atoi(arg);
2544 }
2545
2546 static void opt_error_rate(const char *arg)
2547 {
2548 error_rate= atoi(arg);
2549 }
2550
2551 static void opt_strict(const char *arg)
2552 {
2553 strict= atoi(arg);
2554 }
2555
2556 static void opt_top_field_first(const char *arg)
2557 {
2558 top_field_first= atoi(arg);
2559 }
2560
2561 static void opt_noise_reduction(const char *arg)
2562 {
2563 noise_reduction= atoi(arg);
2564 }
2565
2566 static void opt_qns(const char *arg)
2567 {
2568 qns= atoi(arg);
2569 }
2570
2571 static void opt_sc_threshold(const char *arg)
2572 {
2573 sc_threshold= atoi(arg);
2574 }
2575
2576 static void opt_me_range(const char *arg)
2577 {
2578 me_range = atoi(arg);
2579 }
2580
2581 static void opt_thread_count(const char *arg)
2582 {
2583 thread_count= atoi(arg);
2584 #if !defined(HAVE_THREADS)
2585 if (verbose >= 0)
2586 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2587 #endif
2588 }
2589
2590 static void opt_audio_bitrate(const char *arg)
2591 {
2592 audio_bit_rate = atoi(arg) * 1000;
2593 }
2594
2595 static void opt_audio_rate(const char *arg)
2596 {
2597 audio_sample_rate = atoi(arg);
2598 }
2599
2600 static void opt_audio_channels(const char *arg)
2601 {
2602 audio_channels = atoi(arg);
2603 }
2604
2605 static void opt_video_device(const char *arg)
2606 {
2607 video_device = av_strdup(arg);
2608 }
2609
2610 static void opt_grab_device(const char *arg)
2611 {
2612 grab_device = av_strdup(arg);
2613 }
2614
2615 static void opt_video_channel(const char *arg)
2616 {
2617 video_channel = strtol(arg, NULL, 0);
2618 }
2619
2620 static void opt_video_standard(const char *arg)
2621 {
2622 video_standard = av_strdup(arg);
2623 }
2624
2625 static void opt_audio_device(const char *arg)
2626 {
2627 audio_device = av_strdup(arg);
2628 }
2629
2630 static void opt_audio_codec(const char *arg)
2631 {
2632 AVCodec *p;
2633
2634 if (!strcmp(arg, "copy")) {
2635 audio_stream_copy = 1;
2636 } else {
2637 p = first_avcodec;
2638 while (p) {
2639 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_AUDIO)
2640 break;
2641 p = p->next;
2642 }
2643 if (p == NULL) {
2644 fprintf(stderr, "Unknown audio codec '%s'\n", arg);
2645 exit(1);
2646 } else {
2647 audio_codec_id = p->id;
2648 }
2649 }
2650 }
2651
2652 static void opt_audio_tag(const char *arg)
2653 {
2654 char *tail;
2655 audio_codec_tag= strtol(arg, &tail, 0);
2656
2657 if(!tail || *tail)
2658 audio_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2659 }
2660
2661 static void opt_video_tag(const char *arg)
2662 {
2663 char *tail;
2664 video_codec_tag= strtol(arg, &tail, 0);
2665
2666 if(!tail || *tail)
2667 video_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2668 }
2669
2670 static void add_frame_hooker(const char *arg)
2671 {
2672 int argc = 0;
2673 char *argv[64];
2674 int i;
2675 char *args = av_strdup(arg);
2676
2677 using_vhook = 1;
2678
2679 argv[0] = strtok(args, " ");
2680 while (argc < 62 && (argv[++argc] = strtok(NULL, " "))) {
2681 }
2682
2683 i = frame_hook_add(argc, argv);
2684
2685 if (i != 0) {
2686 fprintf(stderr, "Failed to add video hook function: %s\n", arg);
2687 exit(1);
2688 }
2689 }
2690
2691 const char *motion_str[] = {
2692 "zero",
2693 "full",
2694 "log",
2695 "phods",
2696 "epzs",
2697 "x1",
2698 NULL,
2699 };
2700
2701 static void opt_motion_estimation(const char *arg)
2702 {
2703 const char **p;
2704 p = motion_str;
2705 for(;;) {
2706 if (!*p) {
2707 fprintf(stderr, "Unknown motion estimation method '%s'\n", arg);
2708 exit(1);
2709 }
2710 if (!strcmp(*p, arg))
2711 break;
2712 p++;
2713 }
2714 me_method = (p - motion_str) + 1;
2715 }
2716
2717 static void opt_video_codec(const char *arg)
2718 {
2719 AVCodec *p;
2720
2721 if (!strcmp(arg, "copy")) {
2722 video_stream_copy = 1;
2723 } else {
2724 p = first_avcodec;
2725 while (p) {
2726 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_VIDEO)
2727 break;
2728 p = p->next;
2729 }
2730 if (p == NULL) {
2731 fprintf(stderr, "Unknown video codec '%s'\n", arg);
2732 exit(1);
2733 } else {
2734 video_codec_id = p->id;
2735 }
2736 }
2737 }
2738
2739 static void opt_map(const char *arg)
2740 {
2741 AVStreamMap *m;
2742 const char *p;
2743
2744 p = arg;
2745 m = &stream_maps[nb_stream_maps++];
2746
2747 m->file_index = strtol(arg, (char **)&p, 0);
2748 if (*p)
2749 p++;
2750
2751 m->stream_index = strtol(p, (char **)&p, 0);
2752 }
2753
2754 static void opt_map_meta_data(const char *arg)
2755 {
2756 AVMetaDataMap *m;
2757 const char *p;
2758
2759 p = arg;
2760 m = &meta_data_maps[nb_meta_data_maps++];
2761
2762 m->out_file = strtol(arg, (char **)&p, 0);
2763 if (*p)
2764 p++;
2765
2766 m->in_file = strtol(p, (char **)&p, 0);
2767 }
2768
2769 static void opt_recording_time(const char *arg)
2770 {
2771 recording_time = parse_date(arg, 1);
2772 }
2773
2774 static void opt_start_time(const char *arg)
2775 {
2776 start_time = parse_date(arg, 1);
2777 }
2778
2779 static void opt_rec_timestamp(const char *arg)
2780 {
2781 rec_timestamp = parse_date(arg, 0) / 1000000;
2782 }
2783
2784 static void opt_input_ts_offset(const char *arg)
2785 {
2786 input_ts_offset = parse_date(arg, 1);
2787 }
2788
2789 static void opt_input_file(const char *filename)
2790 {
2791 AVFormatContext *ic;
2792 AVFormatParameters params, *ap = &params;
2793 int err, i, ret, rfps, rfps_base;
2794 int64_t timestamp;
2795
2796 if (!strcmp(filename, "-"))
2797 filename = "pipe:";
2798
2799 using_stdin |= !strncmp(filename, "pipe:", 5) ||
2800 !strcmp( filename, "/dev/stdin" );
2801
2802 /* get default parameters from command line */
2803 memset(ap, 0, sizeof(*ap));
2804 ap->sample_rate = audio_sample_rate;
2805 ap->channels = audio_channels;
2806 ap->frame_rate = frame_rate;
2807 ap->frame_rate_base = frame_rate_base;
2808 ap->width = frame_width + frame_padleft + frame_padright;
2809 ap->height = frame_height + frame_padtop + frame_padbottom;
2810 ap->image_format = image_format;
2811 ap->pix_fmt = frame_pix_fmt;
2812 ap->device = grab_device;
2813 ap->channel = video_channel;
2814 ap->standard = video_standard;
2815 ap->video_codec_id = video_codec_id;
2816 ap->audio_codec_id = audio_codec_id;
2817 if(pgmyuv_compatibility_hack)
2818 ap->video_codec_id= CODEC_ID_PGMYUV;
2819
2820 /* open the input file with generic libav function */
2821 err = av_open_input_file(&ic, filename, file_iformat, 0, ap);
2822 if (err < 0) {
2823 print_error(filename, err);
2824 exit(1);
2825 }
2826
2827 /* If not enough info to get the stream parameters, we decode the
2828 first frames to get it. (used in mpeg case for example) */
2829 ret = av_find_stream_info(ic);
2830 if (ret < 0 && verbose >= 0) {
2831 fprintf(stderr, "%s: could not find codec parameters\n", filename);
2832 exit(1);
2833 }
2834
2835 timestamp = start_time;
2836 /* add the stream start time */
2837 if (ic->start_time != AV_NOPTS_VALUE)
2838 timestamp += ic->start_time;
2839
2840 /* if seeking requested, we execute it */
2841 if (start_time != 0) {
2842 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
2843 if (ret < 0) {
2844 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2845 filename, (double)timestamp / AV_TIME_BASE);
2846 }
2847 /* reset seek info */
2848 start_time = 0;
2849 }
2850
2851 /* update the current parameters so that they match the one of the input stream */
2852 for(i=0;i<ic->nb_streams;i++) {
2853 AVCodecContext *enc = &ic->streams[i]->codec;
2854 #if defined(HAVE_THREADS)
2855 if(thread_count>1)
2856 avcodec_thread_init(enc, thread_count);
2857 #endif
2858 enc->thread_count= thread_count;
2859 switch(enc->codec_type) {
2860 case CODEC_TYPE_AUDIO:
2861 //fprintf(stderr, "\nInput Audio channels: %d", enc->channels);
2862 audio_channels = enc->channels;
2863 audio_sample_rate = enc->sample_rate;
2864 break;
2865 case CODEC_TYPE_VIDEO:
2866 frame_height = enc->height;
2867 frame_width = enc->width;
2868 frame_aspect_ratio = av_q2d(enc->sample_aspect_ratio) * enc->width / enc->height;
2869 frame_pix_fmt = enc->pix_fmt;
2870 rfps = ic->streams[i]->r_frame_rate;
2871 rfps_base = ic->streams[i]->r_frame_rate_base;
2872 enc->workaround_bugs = workaround_bugs;
2873 enc->error_resilience = error_resilience;
2874 enc->error_concealment = error_concealment;
2875 enc->idct_algo = idct_algo;
2876 enc->debug = debug;
2877 enc->debug_mv = debug_mv;
2878 enc->lowres= lowres;
2879 if(bitexact)
2880 enc->flags|= CODEC_FLAG_BITEXACT;
2881 if(me_threshold)
2882 enc->debug |= FF_DEBUG_MV;
2883
2884 if (enc->frame_rate != rfps || enc->frame_rate_base != rfps_base) {
2885
2886 if (verbose >= 0)
2887 fprintf(stderr,"\nSeems that stream %d comes from film source: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
2888 i, (float)enc->frame_rate / enc->frame_rate_base, enc->frame_rate, enc->frame_rate_base,
2889
2890 (float)rfps / rfps_base, rfps, rfps_base);
2891 }
2892 /* update the current frame rate to match the stream frame rate */
2893 frame_rate = rfps;
2894 frame_rate_base = rfps_base;
2895
2896 enc->rate_emu = rate_emu;
2897 break;
2898 case CODEC_TYPE_DATA:
2899 break;
2900 default:
2901 av_abort();
2902 }
2903 }
2904
2905 input_files[nb_input_files] = ic;
2906 input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp);
2907 /* dump the file content */
2908 if (verbose >= 0)
2909 dump_format(ic, nb_input_files, filename, 0);
2910
2911 nb_input_files++;
2912 file_iformat = NULL;
2913 file_oformat = NULL;
2914 image_format = NULL;
2915
2916 grab_device = NULL;
2917 video_channel = 0;
2918
2919 rate_emu = 0;
2920 }
2921
2922 static void opt_grab(const char *arg)
2923 {
2924 file_iformat = av_find_input_format(arg);
2925 opt_input_file("");
2926 }
2927
2928 static void check_audio_video_inputs(int *has_video_ptr, int *has_audio_ptr)
2929 {
2930 int has_video, has_audio, i, j;
2931 AVFormatContext *ic;
2932
2933 has_video = 0;
2934 has_audio = 0;
2935 for(j=0;j<nb_input_files;j++) {
2936 ic = input_files[j];
2937 for(i=0;i<ic->nb_streams;i++) {
2938 AVCodecContext *enc = &ic->streams[i]->codec;
2939 switch(enc->codec_type) {
2940 case CODEC_TYPE_AUDIO:
2941 has_audio = 1;
2942 break;
2943 case CODEC_TYPE_VIDEO:
2944 has_video = 1;
2945 break;
2946 case CODEC_TYPE_DATA:
2947 break;
2948 default:
2949 av_abort();
2950 }
2951 }
2952 }
2953 *has_video_ptr = has_video;
2954 *has_audio_ptr = has_audio;
2955 }
2956
2957 static void opt_output_file(const char *filename)
2958 {
2959 AVStream *st;
2960 AVFormatContext *oc;
2961 int use_video, use_audio, nb_streams, input_has_video, input_has_audio;
2962 int codec_id;
2963 AVFormatParameters params, *ap = &params;
2964
2965 if (!strcmp(filename, "-"))
2966 filename = "pipe:";
2967
2968 oc = av_alloc_format_context();
2969
2970 if (!file_oformat) {
2971 file_oformat = guess_format(NULL, filename, NULL);
2972 if (!file_oformat) {
2973 fprintf(stderr, "Unable for find a suitable output format for '%s'\n",
2974 filename);
2975 exit(1);
2976 }
2977 }
2978
2979 oc->oformat = file_oformat;
2980
2981 if (!strcmp(file_oformat->name, "ffm") &&
2982 strstart(filename, "http:", NULL)) {
2983 /* special case for files sent to ffserver: we get the stream
2984 parameters from ffserver */
2985 if (read_ffserver_streams(oc, filename) < 0) {
2986 fprintf(stderr, "Could not read stream parameters from '%s'\n", filename);
2987 exit(1);
2988 }
2989 } else {
2990 use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy;
2991 use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy;
2992
2993 /* disable if no corresponding type found and at least one
2994 input file */
2995 if (nb_input_files > 0) {
2996 check_audio_video_inputs(&input_has_video, &input_has_audio);
2997 if (!input_has_video)
2998 use_video = 0;
2999 if (!input_has_audio)
3000 use_audio = 0;
3001 }
3002
3003 /* manual disable */
3004 if (audio_disable) {
3005 use_audio = 0;
3006 }
3007 if (video_disable) {
3008 use_video = 0;
3009 }
3010
3011 nb_streams = 0;
3012 if (use_video) {
3013 AVCodecContext *video_enc;
3014
3015 st = av_new_stream(oc, nb_streams++);
3016 if (!st) {
3017 fprintf(stderr, "Could not alloc stream\n");
3018 exit(1);
3019 }
3020 #if defined(HAVE_THREADS)
3021 if(thread_count>1)
3022 avcodec_thread_init(&st->codec, thread_count);
3023 #endif
3024
3025 video_enc = &st->codec;
3026
3027 if(video_codec_tag)
3028 video_enc->codec_tag= video_codec_tag;
3029
3030 if(!strcmp(file_oformat->name, "mp4") || !strcmp(file_oformat->name, "mov") || !strcmp(file_oformat->name, "3gp"))
3031 video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3032 if (video_stream_copy) {
3033 st->stream_copy = 1;
3034 video_enc->codec_type = CODEC_TYPE_VIDEO;
3035 } else {
3036 char *p;
3037 int i;
3038 AVCodec *codec;
3039
3040 codec_id = av_guess_codec(file_oformat, NULL, filename, NULL, CODEC_TYPE_VIDEO);
3041 if (video_codec_id != CODEC_ID_NONE)
3042 codec_id = video_codec_id;
3043
3044 video_enc->codec_id = codec_id;
3045 codec = avcodec_find_encoder(codec_id);
3046
3047 video_enc->bit_rate = video_bit_rate;
3048 video_enc->bit_rate_tolerance = video_bit_rate_tolerance;
3049 video_enc->frame_rate = frame_rate;
3050 video_enc->frame_rate_base = frame_rate_base;
3051 if(codec && codec->supported_framerates){
3052 const AVRational *p= codec->supported_framerates;
3053 AVRational req= (AVRational){frame_rate, frame_rate_base};
3054 const AVRational *best=NULL;
3055 AVRational best_error= (AVRational){INT_MAX, 1};
3056 for(; p->den!=0; p++){
3057 AVRational error= av_sub_q(req, *p);
3058 if(error.num <0) error.num *= -1;
3059 if(av_cmp_q(error, best_error) < 0){
3060 best_error= error;
3061 best= p;
3062 }
3063 }
3064 video_enc->frame_rate = best->num;
3065 video_enc->frame_rate_base= best->den;
3066 }
3067
3068 video_enc->width = frame_width + frame_padright + frame_padleft;
3069 video_enc->height = frame_height + frame_padtop + frame_padbottom;
3070 video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
3071 video_enc->pix_fmt = frame_pix_fmt;
3072
3073 if(codec && codec->pix_fmts){
3074 const enum PixelFormat *p= codec->pix_fmts;
3075 for(; *p!=-1; p++){
3076 if(*p == video_enc->pix_fmt)
3077 break;
3078 }
3079 if(*p == -1)
3080 video_enc->pix_fmt = codec->pix_fmts[0];
3081 }
3082
3083 if (!intra_only)
3084 video_enc->gop_size = gop_size;
3085 else
3086 video_enc->gop_size = 0;
3087 if (video_qscale || same_quality) {
3088 video_enc->flags |= CODEC_FLAG_QSCALE;
3089 st->quality = FF_QP2LAMBDA * video_qscale;
3090 }
3091
3092 if(intra_matrix)
3093 video_enc->intra_matrix = intra_matrix;
3094 if(inter_matrix)
3095 video_enc->inter_matrix = inter_matrix;
3096
3097 if(bitexact)
3098 video_enc->flags |= CODEC_FLAG_BITEXACT;
3099
3100 video_enc->mb_decision = mb_decision;
3101 video_enc->mb_cmp = mb_cmp;
3102 video_enc->ildct_cmp = ildct_cmp;
3103 video_enc->me_sub_cmp = sub_cmp;
3104 video_enc->me_cmp = cmp;
3105 video_enc->me_pre_cmp = pre_cmp;
3106 video_enc->pre_me = pre_me;
3107 video_enc->lumi_masking = lumi_mask;
3108 video_enc->dark_masking = dark_mask;
3109 video_enc->spatial_cplx_masking = scplx_mask;
3110 video_enc->temporal_cplx_masking = tcplx_mask;
3111 video_enc->p_masking = p_mask;
3112 video_enc->quantizer_noise_shaping= qns;
3113
3114 if (use_umv) {
3115 video_enc->flags |= CODEC_FLAG_H263P_UMV;
3116 }
3117 if (use_ss) {
3118 video_enc->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
3119 }
3120 if (use_aic) {
3121 video_enc->flags |= CODEC_FLAG_H263P_AIC;
3122 }
3123 if (use_aiv) {
3124 video_enc->flags |= CODEC_FLAG_H263P_AIV;
3125 }
3126 if (use_4mv) {
3127 video_enc->flags |= CODEC_FLAG_4MV;
3128 }
3129 if (use_obmc) {
3130 video_enc->flags |= CODEC_FLAG_OBMC;
3131 }
3132 if (use_loop) {
3133 video_enc->flags |= CODEC_FLAG_LOOP_FILTER;
3134 }
3135
3136 if(use_part) {
3137 video_enc->flags |= CODEC_FLAG_PART;
3138 }
3139 if (use_alt_scan) {
3140 video_enc->flags |= CODEC_FLAG_ALT_SCAN;
3141 }
3142 if (use_trell) {
3143 video_enc->flags |= CODEC_FLAG_TRELLIS_QUANT;
3144 }
3145 if (use_scan_offset) {
3146 video_enc->flags |= CODEC_FLAG_SVCD_SCAN_OFFSET;
3147 }
3148 if (closed_gop) {
3149 video_enc->flags |= CODEC_FLAG_CLOSED_GOP;
3150 }
3151 if (strict_gop) {
3152 video_enc->flags2 |= CODEC_FLAG2_STRICT_GOP;
3153 }
3154 if (use_qpel) {
3155 video_enc->flags |= CODEC_FLAG_QPEL;
3156 }
3157 if (use_qprd) {
3158 video_enc->flags |= CODEC_FLAG_QP_RD;
3159 }
3160 if (use_cbprd) {
3161 video_enc->flags |= CODEC_FLAG_CBP_RD;
3162 }
3163 if (b_frames) {
3164 video_enc->max_b_frames = b_frames;
3165 video_enc->b_frame_strategy = 0;
3166 video_enc->b_quant_factor = 2.0;
3167 }
3168 if (do_interlace_dct) {
3169 video_enc->flags |= CODEC_FLAG_INTERLACED_DCT;
3170 }
3171 if (do_interlace_me) {
3172 video_enc->flags |= CODEC_FLAG_INTERLACED_ME;
3173 }
3174 video_enc->qmin = video_qmin;
3175 video_enc->qmax = video_qmax;
3176 video_enc->lmin = video_lmin;
3177 video_enc->lmax = video_lmax;
3178 video_enc->mb_qmin = video_mb_qmin;
3179 video_enc->mb_qmax = video_mb_qmax;
3180 video_enc->max_qdiff = video_qdiff;
3181 video_enc->qblur = video_qblur;
3182 video_enc->qcompress = video_qcomp;
3183 video_enc->rc_eq = video_rc_eq;
3184 video_enc->debug = debug;
3185 video_enc->debug_mv = debug_mv;
3186 video_enc->thread_count = thread_count;
3187 p= video_rc_override_string;
3188 for(i=0; p; i++){
3189 int start, end, q;
3190 int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
3191 if(e!=3){
3192 fprintf(stderr, "error parsing rc_override\n");
3193 exit(1);
3194 }
3195 video_enc->rc_override=
3196 av_realloc(video_enc->rc_override,
3197 sizeof(RcOverride)*(i+1));
3198 video_enc->rc_override[i].start_frame= start;
3199 video_enc->rc_override[i].end_frame = end;
3200 if(q>0){
3201 video_enc->rc_override[i].qscale= q;
3202 video_enc->rc_override[i].quality_factor= 1.0;
3203 }
3204 else{
3205 video_enc->rc_override[i].qscale= 0;
3206 video_enc->rc_override[i].quality_factor= -q/100.0;
3207 }
3208 p= strchr(p, '/');
3209 if(p) p++;
3210 }
3211 video_enc->rc_override_count=i;
3212
3213 video_enc->rc_max_rate = video_rc_max_rate;
3214 video_enc->rc_min_rate = video_rc_min_rate;
3215 video_enc->rc_buffer_size = video_rc_buffer_size;
3216 video_enc->rc_initial_buffer_occupancy = video_rc_buffer_size*3/4;
3217 video_enc->rc_buffer_aggressivity= video_rc_buffer_aggressivity;
3218 video_enc->rc_initial_cplx= video_rc_initial_cplx;
3219 video_enc->i_quant_factor = video_i_qfactor;
3220 video_enc->b_quant_factor = video_b_qfactor;
3221 video_enc->i_quant_offset = video_i_qoffset;
3222 video_enc->b_quant_offset = video_b_qoffset;
3223 video_enc->intra_quant_bias = video_intra_quant_bias;
3224 video_enc->inter_quant_bias = video_inter_quant_bias;
3225 video_enc->dct_algo = dct_algo;
3226 video_enc->idct_algo = idct_algo;
3227 video_enc->me_threshold= me_threshold;
3228 video_enc->mb_threshold= mb_threshold;
3229 video_enc->intra_dc_precision= intra_dc_precision - 8;
3230 video_enc->strict_std_compliance = strict;
3231 video_enc->error_rate = error_rate;
3232 video_enc->noise_reduction= noise_reduction;
3233 video_enc->scenechange_threshold= sc_threshold;
3234 video_enc->me_range = me_range;
3235 video_enc->coder_type= coder;
3236 video_enc->context_model= context;
3237 video_enc->prediction_method= predictor;
3238 video_enc->profile= video_profile;
3239 video_enc->level= video_level;
3240 video_enc->nsse_weight= nsse_weight;
3241 video_enc->me_subpel_quality= subpel_quality;
3242 video_enc->frame_skip_threshold= frame_skip_threshold;
3243 video_enc->frame_skip_factor= frame_skip_factor;
3244 video_enc->frame_skip_exp= frame_skip_exp;
3245 video_enc->frame_skip_cmp= frame_skip_cmp;
3246
3247 if(packet_size){
3248 video_enc->rtp_mode= 1;
3249 video_enc->rtp_payload_size= packet_size;
3250 }
3251
3252 if (do_psnr)
3253 video_enc->flags|= CODEC_FLAG_PSNR;
3254
3255 video_enc->me_method = me_method;
3256
3257 /* two pass mode */
3258 if (do_pass) {
3259 if (do_pass == 1) {
3260 video_enc->flags |= CODEC_FLAG_PASS1;
3261 } else {
3262 video_enc->flags |= CODEC_FLAG_PASS2;
3263 }
3264 }
3265 }
3266 }
3267
3268 if (use_audio) {
3269 AVCodecContext *audio_enc;
3270
3271 st = av_new_stream(oc, nb_streams++);
3272 if (!st) {
3273 fprintf(stderr, "Could not alloc stream\n");
3274 exit(1);
3275 }
3276 #if defined(HAVE_THREADS)
3277 if(thread_count>1)
3278 avcodec_thread_init(&st->codec, thread_count);
3279 #endif
3280
3281 audio_enc = &st->codec;
3282 audio_enc->codec_type = CODEC_TYPE_AUDIO;
3283
3284 if(audio_codec_tag)
3285 audio_enc->codec_tag= audio_codec_tag;
3286
3287 if(!strcmp(file_oformat->name, "mp4") || !strcmp(file_oformat->name, "mov") || !strcmp(file_oformat->name, "3gp"))
3288 audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3289 if (audio_stream_copy) {
3290 st->stream_copy = 1;
3291 audio_enc->channels = audio_channels;
3292 } else {
3293 codec_id = av_guess_codec(file_oformat, NULL, filename, NULL, CODEC_TYPE_AUDIO);
3294 if (audio_codec_id != CODEC_ID_NONE)
3295 codec_id = audio_codec_id;
3296 audio_enc->codec_id = codec_id;
3297
3298 audio_enc->bit_rate = audio_bit_rate;
3299 audio_enc->strict_std_compliance = strict;
3300 audio_enc->thread_count = thread_count;
3301 /* For audio codecs other than AC3 or DTS we limit */
3302 /* the number of coded channels to stereo */
3303 if (audio_channels > 2 && codec_id != CODEC_ID_AC3
3304 && codec_id != CODEC_ID_DTS) {
3305 audio_enc->channels = 2;
3306 } else
3307 audio_enc->channels = audio_channels;
3308 }
3309 audio_enc->sample_rate = audio_sample_rate;
3310 }
3311
3312 oc->nb_streams = nb_streams;
3313