1/2 resolution decoding
[libav.git] / ffmpeg.c
1 /*
2 * FFmpeg main
3 * Copyright (c) 2000-2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include <limits.h>
21 #include "avformat.h"
22 #include "framehook.h"
23 #include "dsputil.h"
24
25 #ifndef CONFIG_WIN32
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/time.h>
30 #include <termios.h>
31 #include <sys/resource.h>
32 #include <signal.h>
33 #endif
34 #ifdef CONFIG_OS2
35 #include <sys/types.h>
36 #include <sys/select.h>
37 #include <stdlib.h>
38 #endif
39 #undef time //needed because HAVE_AV_CONFIG_H is defined on top
40 #include <time.h>
41
42 #include "cmdutils.h"
43
44 #undef NDEBUG
45 #include <assert.h>
46
47 #if !defined(INFINITY) && defined(HUGE_VAL)
48 #define INFINITY HUGE_VAL
49 #endif
50
51 /* select an input stream for an output stream */
52 typedef struct AVStreamMap {
53 int file_index;
54 int stream_index;
55 } AVStreamMap;
56
57 /** select an input file for an output file */
58 typedef struct AVMetaDataMap {
59 int out_file;
60 int in_file;
61 } AVMetaDataMap;
62
63 extern const OptionDef options[];
64
65 static void show_help(void);
66 static void show_license(void);
67
68 #define MAX_FILES 20
69
70 static AVFormatContext *input_files[MAX_FILES];
71 static int64_t input_files_ts_offset[MAX_FILES];
72 static int nb_input_files = 0;
73
74 static AVFormatContext *output_files[MAX_FILES];
75 static int nb_output_files = 0;
76
77 static AVStreamMap stream_maps[MAX_FILES];
78 static int nb_stream_maps;
79
80 static AVMetaDataMap meta_data_maps[MAX_FILES];
81 static int nb_meta_data_maps;
82
83 static AVInputFormat *file_iformat;
84 static AVOutputFormat *file_oformat;
85 static AVImageFormat *image_format;
86 static int frame_width = 160;
87 static int frame_height = 128;
88 static float frame_aspect_ratio = 0;
89 static enum PixelFormat frame_pix_fmt = PIX_FMT_YUV420P;
90 static int frame_padtop = 0;
91 static int frame_padbottom = 0;
92 static int frame_padleft = 0;
93 static int frame_padright = 0;
94 static int padcolor[3] = {16,128,128}; /* default to black */
95 static int frame_topBand = 0;
96 static int frame_bottomBand = 0;
97 static int frame_leftBand = 0;
98 static int frame_rightBand = 0;
99 static int frame_rate = 25;
100 static int frame_rate_base = 1;
101 static int video_bit_rate = 200*1000;
102 static int video_bit_rate_tolerance = 4000*1000;
103 static float video_qscale = 0;
104 static int video_qmin = 2;
105 static int video_qmax = 31;
106 static int video_lmin = 2*FF_QP2LAMBDA;
107 static int video_lmax = 31*FF_QP2LAMBDA;
108 static int video_mb_qmin = 2;
109 static int video_mb_qmax = 31;
110 static int video_qdiff = 3;
111 static float video_qblur = 0.5;
112 static float video_qcomp = 0.5;
113 static uint16_t *intra_matrix = NULL;
114 static uint16_t *inter_matrix = NULL;
115 #if 0 //experimental, (can be removed)
116 static float video_rc_qsquish=1.0;
117 static float video_rc_qmod_amp=0;
118 static int video_rc_qmod_freq=0;
119 #endif
120 static char *video_rc_override_string=NULL;
121 static char *video_rc_eq="tex^qComp";
122 static int video_rc_buffer_size=0;
123 static float video_rc_buffer_aggressivity=1.0;
124 static int video_rc_max_rate=0;
125 static int video_rc_min_rate=0;
126 static float video_rc_initial_cplx=0;
127 static float video_b_qfactor = 1.25;
128 static float video_b_qoffset = 1.25;
129 static float video_i_qfactor = -0.8;
130 static float video_i_qoffset = 0.0;
131 static int video_intra_quant_bias= FF_DEFAULT_QUANT_BIAS;
132 static int video_inter_quant_bias= FF_DEFAULT_QUANT_BIAS;
133 static int me_method = ME_EPZS;
134 static int video_disable = 0;
135 static int video_codec_id = CODEC_ID_NONE;
136 static int same_quality = 0;
137 static int b_frames = 0;
138 static int mb_decision = FF_MB_DECISION_SIMPLE;
139 static int ildct_cmp = FF_CMP_VSAD;
140 static int mb_cmp = FF_CMP_SAD;
141 static int sub_cmp = FF_CMP_SAD;
142 static int cmp = FF_CMP_SAD;
143 static int pre_cmp = FF_CMP_SAD;
144 static int pre_me = 0;
145 static float lumi_mask = 0;
146 static float dark_mask = 0;
147 static float scplx_mask = 0;
148 static float tcplx_mask = 0;
149 static float p_mask = 0;
150 static int use_4mv = 0;
151 static int use_obmc = 0;
152 static int use_loop = 0;
153 static int use_aic = 0;
154 static int use_aiv = 0;
155 static int use_umv = 0;
156 static int use_ss = 0;
157 static int use_alt_scan = 0;
158 static int use_trell = 0;
159 static int use_scan_offset = 0;
160 static int use_qpel = 0;
161 static int use_qprd = 0;
162 static int use_cbprd = 0;
163 static int qns = 0;
164 static int closed_gop = 0;
165 static int do_deinterlace = 0;
166 static int do_interlace_dct = 0;
167 static int do_interlace_me = 0;
168 static int workaround_bugs = FF_BUG_AUTODETECT;
169 static int error_resilience = 2;
170 static int error_concealment = 3;
171 static int dct_algo = 0;
172 static int idct_algo = 0;
173 static int use_part = 0;
174 static int packet_size = 0;
175 static int error_rate = 0;
176 static int strict = 0;
177 static int top_field_first = -1;
178 static int noise_reduction = 0;
179 static int sc_threshold = 0;
180 static int debug = 0;
181 static int debug_mv = 0;
182 static int me_threshold = 0;
183 static int mb_threshold = 0;
184 static int intra_dc_precision = 8;
185 static int coder = 0;
186 static int context = 0;
187 static int predictor = 0;
188 static int video_profile = FF_PROFILE_UNKNOWN;
189 static int video_level = FF_LEVEL_UNKNOWN;
190 static int nsse_weight = 8;
191 static int subpel_quality= 8;
192 static int lowres= 0;
193 extern int loop_input; /* currently a hack */
194
195 static int gop_size = 12;
196 static int intra_only = 0;
197 static int audio_sample_rate = 44100;
198 static int audio_bit_rate = 64000;
199 static int audio_disable = 0;
200 static int audio_channels = 1;
201 static int audio_codec_id = CODEC_ID_NONE;
202
203 static int64_t recording_time = 0;
204 static int64_t start_time = 0;
205 static int64_t rec_timestamp = 0;
206 static int64_t input_ts_offset = 0;
207 static int file_overwrite = 0;
208 static char *str_title = NULL;
209 static char *str_author = NULL;
210 static char *str_copyright = NULL;
211 static char *str_comment = NULL;
212 static int do_benchmark = 0;
213 static int do_hex_dump = 0;
214 static int do_pkt_dump = 0;
215 static int do_psnr = 0;
216 static int do_vstats = 0;
217 static int do_pass = 0;
218 static int bitexact = 0;
219 static char *pass_logfilename = NULL;
220 static int audio_stream_copy = 0;
221 static int video_stream_copy = 0;
222 static int video_sync_method= 1;
223 static int audio_sync_method= 0;
224 static int copy_ts= 0;
225
226 static int rate_emu = 0;
227
228 static char *video_grab_format = "video4linux";
229 static char *video_device = NULL;
230 static int video_channel = 0;
231 static char *video_standard = "ntsc";
232
233 static char *audio_grab_format = "audio_device";
234 static char *audio_device = NULL;
235
236 static int using_stdin = 0;
237 static int using_vhook = 0;
238 static int verbose = 1;
239 static int thread_count= 1;
240 static int q_pressed = 0;
241 static int me_range = 0;
242 static int64_t video_size = 0;
243 static int64_t audio_size = 0;
244 static int64_t extra_size = 0;
245 static int nb_frames_dup = 0;
246 static int nb_frames_drop = 0;
247 static int input_sync;
248
249 #define DEFAULT_PASS_LOGFILENAME "ffmpeg2pass"
250
251 typedef struct AVOutputStream {
252 int file_index; /* file index */
253 int index; /* stream index in the output file */
254 int source_index; /* AVInputStream index */
255 AVStream *st; /* stream in the output file */
256 int encoding_needed; /* true if encoding needed for this stream */
257 int frame_number;
258 /* input pts and corresponding output pts
259 for A/V sync */
260 double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
261 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
262 /* video only */
263 int video_resample; /* video_resample and video_crop are mutually exclusive */
264 AVFrame pict_tmp; /* temporary image for resampling */
265 ImgReSampleContext *img_resample_ctx; /* for image resampling */
266
267 int video_crop; /* video_resample and video_crop are mutually exclusive */
268 int topBand; /* cropping area sizes */
269 int leftBand;
270
271 int video_pad; /* video_resample and video_pad are mutually exclusive */
272 int padtop; /* padding area sizes */
273 int padbottom;
274 int padleft;
275 int padright;
276
277 /* audio only */
278 int audio_resample;
279 ReSampleContext *resample; /* for audio resampling */
280 FifoBuffer fifo; /* for compression: one audio fifo per codec */
281 FILE *logfile;
282 } AVOutputStream;
283
284 typedef struct AVInputStream {
285 int file_index;
286 int index;
287 AVStream *st;
288 int discard; /* true if stream data should be discarded */
289 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
290 int64_t sample_index; /* current sample */
291
292 int64_t start; /* time when read started */
293 unsigned long frame; /* current frame */
294 int64_t next_pts; /* synthetic pts for cases where pkt.pts
295 is not defined */
296 int64_t pts; /* current pts */
297 int is_start; /* is 1 at the start and after a discontinuity */
298 } AVInputStream;
299
300 typedef struct AVInputFile {
301 int eof_reached; /* true if eof reached */
302 int ist_index; /* index of first stream in ist_table */
303 int buffer_size; /* current total buffer size */
304 int buffer_size_max; /* buffer size at which we consider we can stop
305 buffering */
306 int nb_streams; /* nb streams we are aware of */
307 } AVInputFile;
308
309 #ifndef CONFIG_WIN32
310
311 /* init terminal so that we can grab keys */
312 static struct termios oldtty;
313
314 static void term_exit(void)
315 {
316 tcsetattr (0, TCSANOW, &oldtty);
317 }
318
319 static volatile sig_atomic_t received_sigterm = 0;
320
321 static void
322 sigterm_handler(int sig)
323 {
324 received_sigterm = sig;
325 term_exit();
326 }
327
328 static void term_init(void)
329 {
330 struct termios tty;
331
332 tcgetattr (0, &tty);
333 oldtty = tty;
334
335 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
336 |INLCR|IGNCR|ICRNL|IXON);
337 tty.c_oflag |= OPOST;
338 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
339 tty.c_cflag &= ~(CSIZE|PARENB);
340 tty.c_cflag |= CS8;
341 tty.c_cc[VMIN] = 1;
342 tty.c_cc[VTIME] = 0;
343
344 tcsetattr (0, TCSANOW, &tty);
345
346 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
347 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
348 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
349 /*
350 register a function to be called at normal program termination
351 */
352 atexit(term_exit);
353 #ifdef CONFIG_BEOS_NETSERVER
354 fcntl(0, F_SETFL, fcntl(0, F_GETFL) | O_NONBLOCK);
355 #endif
356 }
357
358 /* read a key without blocking */
359 static int read_key(void)
360 {
361 int n = 1;
362 unsigned char ch;
363 #ifndef CONFIG_BEOS_NETSERVER
364 struct timeval tv;
365 fd_set rfds;
366
367 FD_ZERO(&rfds);
368 FD_SET(0, &rfds);
369 tv.tv_sec = 0;
370 tv.tv_usec = 0;
371 n = select(1, &rfds, NULL, NULL, &tv);
372 #endif
373 if (n > 0) {
374 n = read(0, &ch, 1);
375 if (n == 1)
376 return ch;
377
378 return n;
379 }
380 return -1;
381 }
382
383 static int decode_interrupt_cb(void)
384 {
385 return q_pressed || (q_pressed = read_key() == 'q');
386 }
387
388 #else
389
390 static volatile int received_sigterm = 0;
391
392 /* no interactive support */
393 static void term_exit(void)
394 {
395 }
396
397 static void term_init(void)
398 {
399 }
400
401 static int read_key(void)
402 {
403 return 0;
404 }
405
406 #endif
407
408 static int read_ffserver_streams(AVFormatContext *s, const char *filename)
409 {
410 int i, err;
411 AVFormatContext *ic;
412
413 err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL);
414 if (err < 0)
415 return err;
416 /* copy stream format */
417 s->nb_streams = ic->nb_streams;
418 for(i=0;i<ic->nb_streams;i++) {
419 AVStream *st;
420
421 st = av_mallocz(sizeof(AVStream));
422 memcpy(st, ic->streams[i], sizeof(AVStream));
423 s->streams[i] = st;
424 }
425
426 av_close_input_file(ic);
427 return 0;
428 }
429
430 #define MAX_AUDIO_PACKET_SIZE (128 * 1024)
431
432 static void do_audio_out(AVFormatContext *s,
433 AVOutputStream *ost,
434 AVInputStream *ist,
435 unsigned char *buf, int size)
436 {
437 uint8_t *buftmp;
438 static uint8_t *audio_buf = NULL;
439 static uint8_t *audio_out = NULL;
440 const int audio_out_size= 4*MAX_AUDIO_PACKET_SIZE;
441
442 int size_out, frame_bytes, ret;
443 AVCodecContext *enc= &ost->st->codec;
444
445 /* SC: dynamic allocation of buffers */
446 if (!audio_buf)
447 audio_buf = av_malloc(2*MAX_AUDIO_PACKET_SIZE);
448 if (!audio_out)
449 audio_out = av_malloc(audio_out_size);
450 if (!audio_buf || !audio_out)
451 return; /* Should signal an error ! */
452
453 if(audio_sync_method){
454 double delta = ost->sync_ipts * enc->sample_rate - ost->sync_opts
455 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2);
456 double idelta= delta*ist->st->codec.sample_rate / enc->sample_rate;
457 int byte_delta= ((int)idelta)*2*ist->st->codec.channels;
458
459 //FIXME resample delay
460 if(fabs(delta) > 50){
461 if(ist->is_start){
462 if(byte_delta < 0){
463 byte_delta= FFMAX(byte_delta, -size);
464 size += byte_delta;
465 buf -= byte_delta;
466 if(verbose > 2)
467 fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
468 if(!size)
469 return;
470 ist->is_start=0;
471 }else{
472 static uint8_t *input_tmp= NULL;
473 input_tmp= av_realloc(input_tmp, byte_delta + size);
474
475 if(byte_delta + size <= MAX_AUDIO_PACKET_SIZE)
476 ist->is_start=0;
477 else
478 byte_delta= MAX_AUDIO_PACKET_SIZE - size;
479
480 memset(input_tmp, 0, byte_delta);
481 memcpy(input_tmp + byte_delta, buf, size);
482 buf= input_tmp;
483 size += byte_delta;
484 if(verbose > 2)
485 fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
486 }
487 }else if(audio_sync_method>1){
488 int comp= clip(delta, -audio_sync_method, audio_sync_method);
489 assert(ost->audio_resample);
490 if(verbose > 2)
491 fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
492 fprintf(stderr, "drift:%f len:%d opts:%lld ipts:%lld fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(ost->sync_ipts * enc->sample_rate), fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2));
493 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
494 }
495 }
496 }else
497 ost->sync_opts= lrintf(ost->sync_ipts * enc->sample_rate)
498 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2); //FIXME wrong
499
500 if (ost->audio_resample) {
501 buftmp = audio_buf;
502 size_out = audio_resample(ost->resample,
503 (short *)buftmp, (short *)buf,
504 size / (ist->st->codec.channels * 2));
505 size_out = size_out * enc->channels * 2;
506 } else {
507 buftmp = buf;
508 size_out = size;
509 }
510
511 /* now encode as many frames as possible */
512 if (enc->frame_size > 1) {
513 /* output resampled raw samples */
514 fifo_write(&ost->fifo, buftmp, size_out,
515 &ost->fifo.wptr);
516
517 frame_bytes = enc->frame_size * 2 * enc->channels;
518
519 while (fifo_read(&ost->fifo, audio_buf, frame_bytes,
520 &ost->fifo.rptr) == 0) {
521 AVPacket pkt;
522 av_init_packet(&pkt);
523
524 ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
525 (short *)audio_buf);
526 audio_size += ret;
527 pkt.stream_index= ost->index;
528 pkt.data= audio_out;
529 pkt.size= ret;
530 if(enc->coded_frame)
531 pkt.pts= enc->coded_frame->pts;
532 pkt.flags |= PKT_FLAG_KEY;
533 av_interleaved_write_frame(s, &pkt);
534
535 ost->sync_opts += enc->frame_size;
536 }
537 } else {
538 AVPacket pkt;
539 av_init_packet(&pkt);
540
541 ost->sync_opts += size_out / (2 * enc->channels);
542
543 /* output a pcm frame */
544 /* XXX: change encoding codec API to avoid this ? */
545 switch(enc->codec->id) {
546 case CODEC_ID_PCM_S16LE:
547 case CODEC_ID_PCM_S16BE:
548 case CODEC_ID_PCM_U16LE:
549 case CODEC_ID_PCM_U16BE:
550 break;
551 default:
552 size_out = size_out >> 1;
553 break;
554 }
555 ret = avcodec_encode_audio(enc, audio_out, size_out,
556 (short *)buftmp);
557 audio_size += ret;
558 pkt.stream_index= ost->index;
559 pkt.data= audio_out;
560 pkt.size= ret;
561 if(enc->coded_frame)
562 pkt.pts= enc->coded_frame->pts;
563 pkt.flags |= PKT_FLAG_KEY;
564 av_interleaved_write_frame(s, &pkt);
565 }
566 }
567
568 static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void **bufp)
569 {
570 AVCodecContext *dec;
571 AVPicture *picture2;
572 AVPicture picture_tmp;
573 uint8_t *buf = 0;
574
575 dec = &ist->st->codec;
576
577 /* deinterlace : must be done before any resize */
578 if (do_deinterlace || using_vhook) {
579 int size;
580
581 /* create temporary picture */
582 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
583 buf = av_malloc(size);
584 if (!buf)
585 return;
586
587 picture2 = &picture_tmp;
588 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
589
590 if (do_deinterlace){
591 if(avpicture_deinterlace(picture2, picture,
592 dec->pix_fmt, dec->width, dec->height) < 0) {
593 /* if error, do not deinterlace */
594 av_free(buf);
595 buf = NULL;
596 picture2 = picture;
597 }
598 } else {
599 if (img_convert(picture2, dec->pix_fmt, picture,
600 dec->pix_fmt, dec->width, dec->height) < 0) {
601 /* if error, do not copy */
602 av_free(buf);
603 buf = NULL;
604 picture2 = picture;
605 }
606 }
607 } else {
608 picture2 = picture;
609 }
610
611 frame_hook_process(picture2, dec->pix_fmt, dec->width, dec->height);
612
613 if (picture != picture2)
614 *picture = *picture2;
615 *bufp = buf;
616 }
617
618 /* we begin to correct av delay at this threshold */
619 #define AV_DELAY_MAX 0.100
620
621
622 /* Expects img to be yuv420 */
623 static void fill_pad_region(AVPicture* img, int height, int width,
624 int padtop, int padbottom, int padleft, int padright, int *color) {
625
626 int i, y, shift;
627 uint8_t *optr;
628
629 for (i = 0; i < 3; i++) {
630 shift = (i == 0) ? 0 : 1;
631
632 if (padtop || padleft) {
633 memset(img->data[i], color[i], (((img->linesize[i] * padtop) +
634 padleft) >> shift));
635 }
636
637 if (padleft || padright) {
638 optr = img->data[i] + (img->linesize[i] * (padtop >> shift)) +
639 (img->linesize[i] - (padright >> shift));
640
641 for (y = 0; y < ((height - (padtop + padbottom)) >> shift); y++) {
642 memset(optr, color[i], (padleft + padright) >> shift);
643 optr += img->linesize[i];
644 }
645 }
646
647 if (padbottom) {
648 optr = img->data[i] + (img->linesize[i] * ((height - padbottom) >> shift));
649 memset(optr, color[i], ((img->linesize[i] * padbottom) >> shift));
650 }
651 }
652 }
653
654 static uint8_t *bit_buffer= NULL;
655
656 static void do_video_out(AVFormatContext *s,
657 AVOutputStream *ost,
658 AVInputStream *ist,
659 AVFrame *in_picture,
660 int *frame_size)
661 {
662 int nb_frames, i, ret;
663 AVFrame *final_picture, *formatted_picture;
664 AVFrame picture_format_temp, picture_crop_temp;
665 uint8_t *buf = NULL, *buf1 = NULL;
666 AVCodecContext *enc, *dec;
667 enum PixelFormat target_pixfmt;
668
669 #define VIDEO_BUFFER_SIZE (1024*1024)
670
671 avcodec_get_frame_defaults(&picture_format_temp);
672 avcodec_get_frame_defaults(&picture_crop_temp);
673
674 enc = &ost->st->codec;
675 dec = &ist->st->codec;
676
677 /* by default, we output a single frame */
678 nb_frames = 1;
679
680 *frame_size = 0;
681
682 if(video_sync_method){
683 double vdelta;
684 vdelta = ost->sync_ipts * enc->frame_rate / enc->frame_rate_base - ost->sync_opts;
685 //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
686 if (vdelta < -1.1)
687 nb_frames = 0;
688 else if (vdelta > 1.1)
689 nb_frames = lrintf(vdelta - 1.1 + 0.5);
690 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%lld, ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, ost->sync_ipts, nb_frames);
691 if (nb_frames == 0){
692 ++nb_frames_drop;
693 if (verbose>2)
694 fprintf(stderr, "*** drop!\n");
695 }else if (nb_frames > 1) {
696 nb_frames_dup += nb_frames;
697 if (verbose>2)
698 fprintf(stderr, "*** %d dup!\n", nb_frames-1);
699 }
700 }else
701 ost->sync_opts= lrintf(ost->sync_ipts * enc->frame_rate / enc->frame_rate_base);
702
703 if (nb_frames <= 0)
704 return;
705
706 /* convert pixel format if needed */
707 target_pixfmt = ost->video_resample || ost->video_pad
708 ? PIX_FMT_YUV420P : enc->pix_fmt;
709 if (dec->pix_fmt != target_pixfmt) {
710 int size;
711
712 /* create temporary picture */
713 size = avpicture_get_size(target_pixfmt, dec->width, dec->height);
714 buf = av_malloc(size);
715 if (!buf)
716 return;
717 formatted_picture = &picture_format_temp;
718 avpicture_fill((AVPicture*)formatted_picture, buf, target_pixfmt, dec->width, dec->height);
719
720 if (img_convert((AVPicture*)formatted_picture, target_pixfmt,
721 (AVPicture *)in_picture, dec->pix_fmt,
722 dec->width, dec->height) < 0) {
723
724 if (verbose >= 0)
725 fprintf(stderr, "pixel format conversion not handled\n");
726
727 goto the_end;
728 }
729 } else {
730 formatted_picture = in_picture;
731 }
732
733 /* XXX: resampling could be done before raw format conversion in
734 some cases to go faster */
735 /* XXX: only works for YUV420P */
736 if (ost->video_resample) {
737 final_picture = &ost->pict_tmp;
738 img_resample(ost->img_resample_ctx, (AVPicture*)final_picture, (AVPicture*)formatted_picture);
739
740 if (ost->padtop || ost->padbottom || ost->padleft || ost->padright) {
741 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
742 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
743 padcolor);
744 }
745
746 if (enc->pix_fmt != PIX_FMT_YUV420P) {
747 int size;
748
749 av_free(buf);
750 /* create temporary picture */
751 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
752 buf = av_malloc(size);
753 if (!buf)
754 return;
755 final_picture = &picture_format_temp;
756 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
757
758 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
759 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
760 enc->width, enc->height) < 0) {
761
762 if (verbose >= 0)
763 fprintf(stderr, "pixel format conversion not handled\n");
764
765 goto the_end;
766 }
767 }
768 } else if (ost->video_crop) {
769 picture_crop_temp.data[0] = formatted_picture->data[0] +
770 (ost->topBand * formatted_picture->linesize[0]) + ost->leftBand;
771
772 picture_crop_temp.data[1] = formatted_picture->data[1] +
773 ((ost->topBand >> 1) * formatted_picture->linesize[1]) +
774 (ost->leftBand >> 1);
775
776 picture_crop_temp.data[2] = formatted_picture->data[2] +
777 ((ost->topBand >> 1) * formatted_picture->linesize[2]) +
778 (ost->leftBand >> 1);
779
780 picture_crop_temp.linesize[0] = formatted_picture->linesize[0];
781 picture_crop_temp.linesize[1] = formatted_picture->linesize[1];
782 picture_crop_temp.linesize[2] = formatted_picture->linesize[2];
783 final_picture = &picture_crop_temp;
784 } else if (ost->video_pad) {
785 final_picture = &ost->pict_tmp;
786
787 for (i = 0; i < 3; i++) {
788 uint8_t *optr, *iptr;
789 int shift = (i == 0) ? 0 : 1;
790 int y, yheight;
791
792 /* set offset to start writing image into */
793 optr = final_picture->data[i] + (((final_picture->linesize[i] *
794 ost->padtop) + ost->padleft) >> shift);
795 iptr = formatted_picture->data[i];
796
797 yheight = (enc->height - ost->padtop - ost->padbottom) >> shift;
798 for (y = 0; y < yheight; y++) {
799 /* copy unpadded image row into padded image row */
800 memcpy(optr, iptr, formatted_picture->linesize[i]);
801 optr += final_picture->linesize[i];
802 iptr += formatted_picture->linesize[i];
803 }
804 }
805
806 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
807 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
808 padcolor);
809
810 if (enc->pix_fmt != PIX_FMT_YUV420P) {
811 int size;
812
813 av_free(buf);
814 /* create temporary picture */
815 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
816 buf = av_malloc(size);
817 if (!buf)
818 return;
819 final_picture = &picture_format_temp;
820 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
821
822 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
823 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
824 enc->width, enc->height) < 0) {
825
826 if (verbose >= 0)
827 fprintf(stderr, "pixel format conversion not handled\n");
828
829 goto the_end;
830 }
831 }
832 } else {
833 final_picture = formatted_picture;
834 }
835 /* duplicates frame if needed */
836 for(i=0;i<nb_frames;i++) {
837 AVPacket pkt;
838 av_init_packet(&pkt);
839 pkt.stream_index= ost->index;
840
841 if (s->oformat->flags & AVFMT_RAWPICTURE) {
842 /* raw pictures are written as AVPicture structure to
843 avoid any copies. We support temorarily the older
844 method. */
845 AVFrame* old_frame = enc->coded_frame;
846 enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
847 pkt.data= (uint8_t *)final_picture;
848 pkt.size= sizeof(AVPicture);
849 if(dec->coded_frame)
850 pkt.pts= dec->coded_frame->pts;
851 if(dec->coded_frame && dec->coded_frame->key_frame)
852 pkt.flags |= PKT_FLAG_KEY;
853
854 av_interleaved_write_frame(s, &pkt);
855 enc->coded_frame = old_frame;
856 } else {
857 AVFrame big_picture;
858
859 big_picture= *final_picture;
860 /* better than nothing: use input picture interlaced
861 settings */
862 big_picture.interlaced_frame = in_picture->interlaced_frame;
863 if(do_interlace_me || do_interlace_dct){
864 if(top_field_first == -1)
865 big_picture.top_field_first = in_picture->top_field_first;
866 else
867 big_picture.top_field_first = top_field_first;
868 }
869
870 /* handles sameq here. This is not correct because it may
871 not be a global option */
872 if (same_quality) {
873 big_picture.quality = ist->st->quality;
874 }else
875 big_picture.quality = ost->st->quality;
876 if(!me_threshold)
877 big_picture.pict_type = 0;
878 // big_picture.pts = AV_NOPTS_VALUE;
879 big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->frame_rate_base, enc->frame_rate);
880 //av_log(NULL, AV_LOG_DEBUG, "%lld -> encoder\n", ost->sync_opts);
881 ret = avcodec_encode_video(enc,
882 bit_buffer, VIDEO_BUFFER_SIZE,
883 &big_picture);
884 //enc->frame_number = enc->real_pict_num;
885 if(ret){
886 pkt.data= bit_buffer;
887 pkt.size= ret;
888 if(enc->coded_frame)
889 pkt.pts= enc->coded_frame->pts;
890 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %lld/%lld\n",
891 pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->frame_rate, AV_TIME_BASE*(int64_t)enc->frame_rate_base) : -1,
892 pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->frame_rate, AV_TIME_BASE*(int64_t)enc->frame_rate_base) : -1);*/
893
894 if(enc->coded_frame && enc->coded_frame->key_frame)
895 pkt.flags |= PKT_FLAG_KEY;
896 av_interleaved_write_frame(s, &pkt);
897 *frame_size = ret;
898 //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d",
899 // enc->frame_number-1, enc->real_pict_num, ret,
900 // enc->pict_type);
901 /* if two pass, output log */
902 if (ost->logfile && enc->stats_out) {
903 fprintf(ost->logfile, "%s", enc->stats_out);
904 }
905 }
906 }
907 ost->sync_opts++;
908 ost->frame_number++;
909 }
910 the_end:
911 av_free(buf);
912 av_free(buf1);
913 }
914
915 static double psnr(double d){
916 if(d==0) return INFINITY;
917 return -10.0*log(d)/log(10.0);
918 }
919
920 static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
921 int frame_size)
922 {
923 static FILE *fvstats=NULL;
924 char filename[40];
925 time_t today2;
926 struct tm *today;
927 AVCodecContext *enc;
928 int frame_number;
929 int64_t ti;
930 double ti1, bitrate, avg_bitrate;
931
932 if (!fvstats) {
933 today2 = time(NULL);
934 today = localtime(&today2);
935 sprintf(filename, "vstats_%02d%02d%02d.log", today->tm_hour,
936 today->tm_min,
937 today->tm_sec);
938 fvstats = fopen(filename,"w");
939 if (!fvstats) {
940 perror("fopen");
941 exit(1);
942 }
943 }
944
945 ti = MAXINT64;
946 enc = &ost->st->codec;
947 if (enc->codec_type == CODEC_TYPE_VIDEO) {
948 frame_number = ost->frame_number;
949 fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
950 if (enc->flags&CODEC_FLAG_PSNR)
951 fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
952
953 fprintf(fvstats,"f_size= %6d ", frame_size);
954 /* compute pts value */
955 ti1 = (double)ost->sync_opts *enc->frame_rate_base / enc->frame_rate;
956 if (ti1 < 0.01)
957 ti1 = 0.01;
958
959 bitrate = (double)(frame_size * 8) * enc->frame_rate / enc->frame_rate_base / 1000.0;
960 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
961 fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
962 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
963 fprintf(fvstats,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
964 }
965 }
966
967 static void print_report(AVFormatContext **output_files,
968 AVOutputStream **ost_table, int nb_ostreams,
969 int is_last_report)
970 {
971 char buf[1024];
972 AVOutputStream *ost;
973 AVFormatContext *oc, *os;
974 int64_t total_size;
975 AVCodecContext *enc;
976 int frame_number, vid, i;
977 double bitrate, ti1, pts;
978 static int64_t last_time = -1;
979
980 if (!is_last_report) {
981 int64_t cur_time;
982 /* display the report every 0.5 seconds */
983 cur_time = av_gettime();
984 if (last_time == -1) {
985 last_time = cur_time;
986 return;
987 }
988 if ((cur_time - last_time) < 500000)
989 return;
990 last_time = cur_time;
991 }
992
993
994 oc = output_files[0];
995
996 total_size = url_ftell(&oc->pb);
997
998 buf[0] = '\0';
999 ti1 = 1e10;
1000 vid = 0;
1001 for(i=0;i<nb_ostreams;i++) {
1002 ost = ost_table[i];
1003 os = output_files[ost->file_index];
1004 enc = &ost->st->codec;
1005 if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1006 sprintf(buf + strlen(buf), "q=%2.1f ",
1007 enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1008 }
1009 if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1010 frame_number = ost->frame_number;
1011 sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ",
1012 frame_number, enc->coded_frame ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : 0);
1013 if(is_last_report)
1014 sprintf(buf + strlen(buf), "L");
1015 if (enc->flags&CODEC_FLAG_PSNR){
1016 int j;
1017 double error, error_sum=0;
1018 double scale, scale_sum=0;
1019 char type[3]= {'Y','U','V'};
1020 sprintf(buf + strlen(buf), "PSNR=");
1021 for(j=0; j<3; j++){
1022 if(is_last_report){
1023 error= enc->error[j];
1024 scale= enc->width*enc->height*255.0*255.0*frame_number;
1025 }else{
1026 error= enc->coded_frame->error[j];
1027 scale= enc->width*enc->height*255.0*255.0;
1028 }
1029 if(j) scale/=4;
1030 error_sum += error;
1031 scale_sum += scale;
1032 sprintf(buf + strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
1033 }
1034 sprintf(buf + strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
1035 }
1036 vid = 1;
1037 }
1038 /* compute min output value */
1039 pts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den;
1040 if ((pts < ti1) && (pts > 0))
1041 ti1 = pts;
1042 }
1043 if (ti1 < 0.01)
1044 ti1 = 0.01;
1045
1046 if (verbose || is_last_report) {
1047 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1048
1049 sprintf(buf + strlen(buf),
1050 "size=%8.0fkB time=%0.1f bitrate=%6.1fkbits/s",
1051 (double)total_size / 1024, ti1, bitrate);
1052
1053 if (verbose > 1)
1054 sprintf(buf + strlen(buf), " dup=%d drop=%d",
1055 nb_frames_dup, nb_frames_drop);
1056
1057 if (verbose >= 0)
1058 fprintf(stderr, "%s \r", buf);
1059
1060 fflush(stderr);
1061 }
1062
1063 if (is_last_report && verbose >= 0){
1064 int64_t raw= audio_size + video_size + extra_size;
1065 fprintf(stderr, "\n");
1066 fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1067 video_size/1024.0,
1068 audio_size/1024.0,
1069 extra_size/1024.0,
1070 100.0*(total_size - raw)/raw
1071 );
1072 }
1073 }
1074
1075 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1076 static int output_packet(AVInputStream *ist, int ist_index,
1077 AVOutputStream **ost_table, int nb_ostreams,
1078 const AVPacket *pkt)
1079 {
1080 AVFormatContext *os;
1081 AVOutputStream *ost;
1082 uint8_t *ptr;
1083 int len, ret, i;
1084 uint8_t *data_buf;
1085 int data_size, got_picture;
1086 AVFrame picture;
1087 void *buffer_to_free;
1088
1089 if(!pkt){
1090 ist->pts= ist->next_pts; // needed for last packet if vsync=0
1091 } else if (pkt->dts != AV_NOPTS_VALUE) { //FIXME seems redundant, as libavformat does this too
1092 ist->next_pts = ist->pts = pkt->dts;
1093 } else {
1094 assert(ist->pts == ist->next_pts);
1095 }
1096
1097 if (pkt == NULL) {
1098 /* EOF handling */
1099 ptr = NULL;
1100 len = 0;
1101 goto handle_eof;
1102 }
1103
1104 len = pkt->size;
1105 ptr = pkt->data;
1106 while (len > 0) {
1107 handle_eof:
1108 /* decode the packet if needed */
1109 data_buf = NULL; /* fail safe */
1110 data_size = 0;
1111 if (ist->decoding_needed) {
1112 switch(ist->st->codec.codec_type) {
1113 case CODEC_TYPE_AUDIO:{
1114 /* XXX: could avoid copy if PCM 16 bits with same
1115 endianness as CPU */
1116 short samples[pkt && pkt->size > AVCODEC_MAX_AUDIO_FRAME_SIZE/2 ? pkt->size : AVCODEC_MAX_AUDIO_FRAME_SIZE/2];
1117 ret = avcodec_decode_audio(&ist->st->codec, samples, &data_size,
1118 ptr, len);
1119 if (ret < 0)
1120 goto fail_decode;
1121 ptr += ret;
1122 len -= ret;
1123 /* Some bug in mpeg audio decoder gives */
1124 /* data_size < 0, it seems they are overflows */
1125 if (data_size <= 0) {
1126 /* no audio frame */
1127 continue;
1128 }
1129 data_buf = (uint8_t *)samples;
1130 ist->next_pts += ((int64_t)AV_TIME_BASE/2 * data_size) /
1131 (ist->st->codec.sample_rate * ist->st->codec.channels);
1132 break;}
1133 case CODEC_TYPE_VIDEO:
1134 data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
1135 /* XXX: allocate picture correctly */
1136 avcodec_get_frame_defaults(&picture);
1137
1138 ret = avcodec_decode_video(&ist->st->codec,
1139 &picture, &got_picture, ptr, len);
1140 ist->st->quality= picture.quality;
1141 if (ret < 0)
1142 goto fail_decode;
1143 if (!got_picture) {
1144 /* no picture yet */
1145 goto discard_packet;
1146 }
1147 if (ist->st->codec.frame_rate_base != 0) {
1148 ist->next_pts += ((int64_t)AV_TIME_BASE *
1149 ist->st->codec.frame_rate_base) /
1150 ist->st->codec.frame_rate;
1151 }
1152 len = 0;
1153 break;
1154 default:
1155 goto fail_decode;
1156 }
1157 } else {
1158 data_buf = ptr;
1159 data_size = len;
1160 ret = len;
1161 len = 0;
1162 }
1163
1164 buffer_to_free = NULL;
1165 if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO) {
1166 pre_process_video_frame(ist, (AVPicture *)&picture,
1167 &buffer_to_free);
1168 }
1169
1170 /* frame rate emulation */
1171 if (ist->st->codec.rate_emu) {
1172 int64_t pts = av_rescale((int64_t) ist->frame * ist->st->codec.frame_rate_base, 1000000, ist->st->codec.frame_rate);
1173 int64_t now = av_gettime() - ist->start;
1174 if (pts > now)
1175 usleep(pts - now);
1176
1177 ist->frame++;
1178 }
1179
1180 #if 0
1181 /* mpeg PTS deordering : if it is a P or I frame, the PTS
1182 is the one of the next displayed one */
1183 /* XXX: add mpeg4 too ? */
1184 if (ist->st->codec.codec_id == CODEC_ID_MPEG1VIDEO) {
1185 if (ist->st->codec.pict_type != B_TYPE) {
1186 int64_t tmp;
1187 tmp = ist->last_ip_pts;
1188 ist->last_ip_pts = ist->frac_pts.val;
1189 ist->frac_pts.val = tmp;
1190 }
1191 }
1192 #endif
1193 /* if output time reached then transcode raw format,
1194 encode packets and output them */
1195 if (start_time == 0 || ist->pts >= start_time)
1196 for(i=0;i<nb_ostreams;i++) {
1197 int frame_size;
1198
1199 ost = ost_table[i];
1200 if (ost->source_index == ist_index) {
1201 os = output_files[ost->file_index];
1202
1203 #if 0
1204 printf("%d: got pts=%0.3f %0.3f\n", i,
1205 (double)pkt->pts / AV_TIME_BASE,
1206 ((double)ist->pts / AV_TIME_BASE) -
1207 ((double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den));
1208 #endif
1209 /* set the input output pts pairs */
1210 ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index])/ AV_TIME_BASE;
1211
1212 if (ost->encoding_needed) {
1213 switch(ost->st->codec.codec_type) {
1214 case CODEC_TYPE_AUDIO:
1215 do_audio_out(os, ost, ist, data_buf, data_size);
1216 break;
1217 case CODEC_TYPE_VIDEO:
1218 do_video_out(os, ost, ist, &picture, &frame_size);
1219 video_size += frame_size;
1220 if (do_vstats && frame_size)
1221 do_video_stats(os, ost, frame_size);
1222 break;
1223 default:
1224 av_abort();
1225 }
1226 } else {
1227 AVFrame avframe; //FIXME/XXX remove this
1228 AVPacket opkt;
1229 av_init_packet(&opkt);
1230
1231 /* no reencoding needed : output the packet directly */
1232 /* force the input stream PTS */
1233
1234 avcodec_get_frame_defaults(&avframe);
1235 ost->st->codec.coded_frame= &avframe;
1236 avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
1237
1238 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO)
1239 audio_size += data_size;
1240 else if (ost->st->codec.codec_type == CODEC_TYPE_VIDEO)
1241 video_size += data_size;
1242
1243 opkt.stream_index= ost->index;
1244 opkt.data= data_buf;
1245 opkt.size= data_size;
1246 opkt.pts= pkt->pts + input_files_ts_offset[ist->file_index];
1247 opkt.dts= pkt->dts + input_files_ts_offset[ist->file_index];
1248 opkt.flags= pkt->flags;
1249
1250 av_interleaved_write_frame(os, &opkt);
1251 ost->st->codec.frame_number++;
1252 ost->frame_number++;
1253 }
1254 }
1255 }
1256 av_free(buffer_to_free);
1257 }
1258 discard_packet:
1259 if (pkt == NULL) {
1260 /* EOF handling */
1261
1262 for(i=0;i<nb_ostreams;i++) {
1263 ost = ost_table[i];
1264 if (ost->source_index == ist_index) {
1265 AVCodecContext *enc= &ost->st->codec;
1266 os = output_files[ost->file_index];
1267
1268 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO && enc->frame_size <=1)
1269 continue;
1270 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
1271 continue;
1272
1273 if (ost->encoding_needed) {
1274 for(;;) {
1275 AVPacket pkt;
1276 av_init_packet(&pkt);
1277 pkt.stream_index= ost->index;
1278
1279 switch(ost->st->codec.codec_type) {
1280 case CODEC_TYPE_AUDIO:
1281 ret = avcodec_encode_audio(enc, bit_buffer, VIDEO_BUFFER_SIZE, NULL);
1282 audio_size += ret;
1283 pkt.flags |= PKT_FLAG_KEY;
1284 break;
1285 case CODEC_TYPE_VIDEO:
1286 ret = avcodec_encode_video(enc, bit_buffer, VIDEO_BUFFER_SIZE, NULL);
1287 video_size += ret;
1288 if(enc->coded_frame && enc->coded_frame->key_frame)
1289 pkt.flags |= PKT_FLAG_KEY;
1290 if (ost->logfile && enc->stats_out) {
1291 fprintf(ost->logfile, "%s", enc->stats_out);
1292 }
1293 break;
1294 default:
1295 ret=-1;
1296 }
1297
1298 if(ret<=0)
1299 break;
1300 pkt.data= bit_buffer;
1301 pkt.size= ret;
1302 if(enc->coded_frame)
1303 pkt.pts= enc->coded_frame->pts;
1304 av_interleaved_write_frame(os, &pkt);
1305 }
1306 }
1307 }
1308 }
1309 }
1310
1311 return 0;
1312 fail_decode:
1313 return -1;
1314 }
1315
1316
1317 /*
1318 * The following code is the main loop of the file converter
1319 */
1320 static int av_encode(AVFormatContext **output_files,
1321 int nb_output_files,
1322 AVFormatContext **input_files,
1323 int nb_input_files,
1324 AVStreamMap *stream_maps, int nb_stream_maps)
1325 {
1326 int ret, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
1327 AVFormatContext *is, *os;
1328 AVCodecContext *codec, *icodec;
1329 AVOutputStream *ost, **ost_table = NULL;
1330 AVInputStream *ist, **ist_table = NULL;
1331 AVInputFile *file_table;
1332 AVFormatContext *stream_no_data;
1333 int key;
1334
1335 file_table= (AVInputFile*) av_mallocz(nb_input_files * sizeof(AVInputFile));
1336 if (!file_table)
1337 goto fail;
1338
1339 if (!bit_buffer)
1340 bit_buffer = av_malloc(VIDEO_BUFFER_SIZE);
1341 if (!bit_buffer)
1342 goto fail;
1343
1344 /* input stream init */
1345 j = 0;
1346 for(i=0;i<nb_input_files;i++) {
1347 is = input_files[i];
1348 file_table[i].ist_index = j;
1349 file_table[i].nb_streams = is->nb_streams;
1350 j += is->nb_streams;
1351 }
1352 nb_istreams = j;
1353
1354 ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
1355 if (!ist_table)
1356 goto fail;
1357
1358 for(i=0;i<nb_istreams;i++) {
1359 ist = av_mallocz(sizeof(AVInputStream));
1360 if (!ist)
1361 goto fail;
1362 ist_table[i] = ist;
1363 }
1364 j = 0;
1365 for(i=0;i<nb_input_files;i++) {
1366 is = input_files[i];
1367 for(k=0;k<is->nb_streams;k++) {
1368 ist = ist_table[j++];
1369 ist->st = is->streams[k];
1370 ist->file_index = i;
1371 ist->index = k;
1372 ist->discard = 1; /* the stream is discarded by default
1373 (changed later) */
1374
1375 if (ist->st->codec.rate_emu) {
1376 ist->start = av_gettime();
1377 ist->frame = 0;
1378 }
1379 }
1380 }
1381
1382 /* output stream init */
1383 nb_ostreams = 0;
1384 for(i=0;i<nb_output_files;i++) {
1385 os = output_files[i];
1386 nb_ostreams += os->nb_streams;
1387 }
1388 if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
1389 fprintf(stderr, "Number of stream maps must match number of output streams\n");
1390 exit(1);
1391 }
1392
1393 /* Sanity check the mapping args -- do the input files & streams exist? */
1394 for(i=0;i<nb_stream_maps;i++) {
1395 int fi = stream_maps[i].file_index;
1396 int si = stream_maps[i].stream_index;
1397
1398 if (fi < 0 || fi > nb_input_files - 1 ||
1399 si < 0 || si > file_table[fi].nb_streams - 1) {
1400 fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
1401 exit(1);
1402 }
1403 }
1404
1405 ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
1406 if (!ost_table)
1407 goto fail;
1408 for(i=0;i<nb_ostreams;i++) {
1409 ost = av_mallocz(sizeof(AVOutputStream));
1410 if (!ost)
1411 goto fail;
1412 ost_table[i] = ost;
1413 }
1414
1415 n = 0;
1416 for(k=0;k<nb_output_files;k++) {
1417 os = output_files[k];
1418 for(i=0;i<os->nb_streams;i++) {
1419 int found;
1420 ost = ost_table[n++];
1421 ost->file_index = k;
1422 ost->index = i;
1423 ost->st = os->streams[i];
1424 if (nb_stream_maps > 0) {
1425 ost->source_index = file_table[stream_maps[n-1].file_index].ist_index +
1426 stream_maps[n-1].stream_index;
1427
1428 /* Sanity check that the stream types match */
1429 if (ist_table[ost->source_index]->st->codec.codec_type != ost->st->codec.codec_type) {
1430 fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
1431 stream_maps[n-1].file_index, stream_maps[n-1].stream_index,
1432 ost->file_index, ost->index);
1433 exit(1);
1434 }
1435
1436 } else {
1437 /* get corresponding input stream index : we select the first one with the right type */
1438 found = 0;
1439 for(j=0;j<nb_istreams;j++) {
1440 ist = ist_table[j];
1441 if (ist->discard &&
1442 ist->st->codec.codec_type == ost->st->codec.codec_type) {
1443 ost->source_index = j;
1444 found = 1;
1445 }
1446 }
1447
1448 if (!found) {
1449 /* try again and reuse existing stream */
1450 for(j=0;j<nb_istreams;j++) {
1451 ist = ist_table[j];
1452 if (ist->st->codec.codec_type == ost->st->codec.codec_type) {
1453 ost->source_index = j;
1454 found = 1;
1455 }
1456 }
1457 if (!found) {
1458 fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
1459 ost->file_index, ost->index);
1460 exit(1);
1461 }
1462 }
1463 }
1464 ist = ist_table[ost->source_index];
1465 ist->discard = 0;
1466 }
1467 }
1468
1469 /* for each output stream, we compute the right encoding parameters */
1470 for(i=0;i<nb_ostreams;i++) {
1471 ost = ost_table[i];
1472 ist = ist_table[ost->source_index];
1473
1474 codec = &ost->st->codec;
1475 icodec = &ist->st->codec;
1476
1477 if (ost->st->stream_copy) {
1478 /* if stream_copy is selected, no need to decode or encode */
1479 codec->codec_id = icodec->codec_id;
1480 codec->codec_type = icodec->codec_type;
1481 codec->codec_tag = icodec->codec_tag;
1482 codec->bit_rate = icodec->bit_rate;
1483 switch(codec->codec_type) {
1484 case CODEC_TYPE_AUDIO:
1485 codec->sample_rate = icodec->sample_rate;
1486 codec->channels = icodec->channels;
1487 codec->frame_size = icodec->frame_size;
1488 break;
1489 case CODEC_TYPE_VIDEO:
1490 codec->frame_rate = icodec->frame_rate;
1491 codec->frame_rate_base = icodec->frame_rate_base;
1492 codec->width = icodec->width;
1493 codec->height = icodec->height;
1494 break;
1495 default:
1496 av_abort();
1497 }
1498 } else {
1499 switch(codec->codec_type) {
1500 case CODEC_TYPE_AUDIO:
1501 if (fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE))
1502 goto fail;
1503
1504 if (codec->channels == icodec->channels &&
1505 codec->sample_rate == icodec->sample_rate) {
1506 ost->audio_resample = 0;
1507 } else {
1508 if (codec->channels != icodec->channels &&
1509 (icodec->codec_id == CODEC_ID_AC3 ||
1510 icodec->codec_id == CODEC_ID_DTS)) {
1511 /* Special case for 5:1 AC3 and DTS input */
1512 /* and mono or stereo output */
1513 /* Request specific number of channels */
1514 icodec->channels = codec->channels;
1515 if (codec->sample_rate == icodec->sample_rate)
1516 ost->audio_resample = 0;
1517 else {
1518 ost->audio_resample = 1;
1519 }
1520 } else {
1521 ost->audio_resample = 1;
1522 }
1523 }
1524 if(audio_sync_method>1)
1525 ost->audio_resample = 1;
1526
1527 if(ost->audio_resample){
1528 ost->resample = audio_resample_init(codec->channels, icodec->channels,
1529 codec->sample_rate, icodec->sample_rate);
1530 if(!ost->resample){
1531 printf("Can't resample. Aborting.\n");
1532 av_abort();
1533 }
1534 }
1535 ist->decoding_needed = 1;
1536 ost->encoding_needed = 1;
1537 break;
1538 case CODEC_TYPE_VIDEO:
1539 if (codec->width == icodec->width &&
1540 codec->height == icodec->height &&
1541 frame_topBand == 0 &&
1542 frame_bottomBand == 0 &&
1543 frame_leftBand == 0 &&
1544 frame_rightBand == 0 &&
1545 frame_padtop == 0 &&
1546 frame_padbottom == 0 &&
1547 frame_padleft == 0 &&
1548 frame_padright == 0)
1549 {
1550 ost->video_resample = 0;
1551 ost->video_crop = 0;
1552 ost->video_pad = 0;
1553 } else if ((codec->width == icodec->width -
1554 (frame_leftBand + frame_rightBand)) &&
1555 (codec->height == icodec->height -
1556 (frame_topBand + frame_bottomBand)))
1557 {
1558 ost->video_resample = 0;
1559 ost->video_crop = 1;
1560 ost->topBand = frame_topBand;
1561 ost->leftBand = frame_leftBand;
1562 } else if ((codec->width == icodec->width +
1563 (frame_padleft + frame_padright)) &&
1564 (codec->height == icodec->height +
1565 (frame_padtop + frame_padbottom))) {
1566 ost->video_resample = 0;
1567 ost->video_crop = 0;
1568 ost->video_pad = 1;
1569 ost->padtop = frame_padtop;
1570 ost->padleft = frame_padleft;
1571 ost->padbottom = frame_padbottom;
1572 ost->padright = frame_padright;
1573 avcodec_get_frame_defaults(&ost->pict_tmp);
1574 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1575 codec->width, codec->height ) )
1576 goto fail;
1577 } else {
1578 ost->video_resample = 1;
1579 ost->video_crop = 0; // cropping is handled as part of resample
1580 avcodec_get_frame_defaults(&ost->pict_tmp);
1581 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1582 codec->width, codec->height ) )
1583 goto fail;
1584
1585 ost->img_resample_ctx = img_resample_full_init(
1586 ost->st->codec.width, ost->st->codec.height,
1587 ist->st->codec.width, ist->st->codec.height,
1588 frame_topBand, frame_bottomBand,
1589 frame_leftBand, frame_rightBand,
1590 frame_padtop, frame_padbottom,
1591 frame_padleft, frame_padright);
1592
1593 ost->padtop = frame_padtop;
1594 ost->padleft = frame_padleft;
1595 ost->padbottom = frame_padbottom;
1596 ost->padright = frame_padright;
1597
1598 }
1599 ost->encoding_needed = 1;
1600 ist->decoding_needed = 1;
1601 break;
1602 default:
1603 av_abort();
1604 }
1605 /* two pass mode */
1606 if (ost->encoding_needed &&
1607 (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1608 char logfilename[1024];
1609 FILE *f;
1610 int size;
1611 char *logbuffer;
1612
1613 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1614 pass_logfilename ?
1615 pass_logfilename : DEFAULT_PASS_LOGFILENAME, i);
1616 if (codec->flags & CODEC_FLAG_PASS1) {
1617 f = fopen(logfilename, "w");
1618 if (!f) {
1619 perror(logfilename);
1620 exit(1);
1621 }
1622 ost->logfile = f;
1623 } else {
1624 /* read the log file */
1625 f = fopen(logfilename, "r");
1626 if (!f) {
1627 perror(logfilename);
1628 exit(1);
1629 }
1630 fseek(f, 0, SEEK_END);
1631 size = ftell(f);
1632 fseek(f, 0, SEEK_SET);
1633 logbuffer = av_malloc(size + 1);
1634 if (!logbuffer) {
1635 fprintf(stderr, "Could not allocate log buffer\n");
1636 exit(1);
1637 }
1638 size = fread(logbuffer, 1, size, f);
1639 fclose(f);
1640 logbuffer[size] = '\0';
1641 codec->stats_in = logbuffer;
1642 }
1643 }
1644 }
1645 }
1646
1647 /* dump the file output parameters - cannot be done before in case
1648 of stream copy */
1649 for(i=0;i<nb_output_files;i++) {
1650 dump_format(output_files[i], i, output_files[i]->filename, 1);
1651 }
1652
1653 /* dump the stream mapping */
1654 if (verbose >= 0) {
1655 fprintf(stderr, "Stream mapping:\n");
1656 for(i=0;i<nb_ostreams;i++) {
1657 ost = ost_table[i];
1658 fprintf(stderr, " Stream #%d.%d -> #%d.%d\n",
1659 ist_table[ost->source_index]->file_index,
1660 ist_table[ost->source_index]->index,
1661 ost->file_index,
1662 ost->index);
1663 }
1664 }
1665
1666 /* open each encoder */
1667 for(i=0;i<nb_ostreams;i++) {
1668 ost = ost_table[i];
1669 if (ost->encoding_needed) {
1670 AVCodec *codec;
1671 codec = avcodec_find_encoder(ost->st->codec.codec_id);
1672 if (!codec) {
1673 fprintf(stderr, "Unsupported codec for output stream #%d.%d\n",
1674 ost->file_index, ost->index);
1675 exit(1);
1676 }
1677 if (avcodec_open(&ost->st->codec, codec) < 0) {
1678 fprintf(stderr, "Error while opening codec for stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height\n",
1679 ost->file_index, ost->index);
1680 exit(1);
1681 }
1682 extra_size += ost->st->codec.extradata_size;
1683 }
1684 }
1685
1686 /* open each decoder */
1687 for(i=0;i<nb_istreams;i++) {
1688 ist = ist_table[i];
1689 if (ist->decoding_needed) {
1690 AVCodec *codec;
1691 codec = avcodec_find_decoder(ist->st->codec.codec_id);
1692 if (!codec) {
1693 fprintf(stderr, "Unsupported codec (id=%d) for input stream #%d.%d\n",
1694 ist->st->codec.codec_id, ist->file_index, ist->index);
1695 exit(1);
1696 }
1697 if (avcodec_open(&ist->st->codec, codec) < 0) {
1698 fprintf(stderr, "Error while opening codec for input stream #%d.%d\n",
1699 ist->file_index, ist->index);
1700 exit(1);
1701 }
1702 //if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO)
1703 // ist->st->codec.flags |= CODEC_FLAG_REPEAT_FIELD;
1704 }
1705 }
1706
1707 /* init pts */
1708 for(i=0;i<nb_istreams;i++) {
1709 ist = ist_table[i];
1710 is = input_files[ist->file_index];
1711 ist->pts = 0;
1712 ist->next_pts = ist->st->start_time;
1713 if(ist->next_pts == AV_NOPTS_VALUE)
1714 ist->next_pts=0;
1715 ist->is_start = 1;
1716 }
1717
1718 /* compute buffer size max (should use a complete heuristic) */
1719 for(i=0;i<nb_input_files;i++) {
1720 file_table[i].buffer_size_max = 2048;
1721 }
1722
1723 /* set meta data information from input file if required */
1724 for (i=0;i<nb_meta_data_maps;i++) {
1725 AVFormatContext *out_file;
1726 AVFormatContext *in_file;
1727
1728 int out_file_index = meta_data_maps[i].out_file;
1729 int in_file_index = meta_data_maps[i].in_file;
1730 if ( out_file_index < 0 || out_file_index >= nb_output_files ) {
1731 fprintf(stderr, "Invalid output file index %d map_meta_data(%d,%d)\n", out_file_index, out_file_index, in_file_index);
1732 ret = -EINVAL;
1733 goto fail;
1734 }
1735 if ( in_file_index < 0 || in_file_index >= nb_input_files ) {
1736 fprintf(stderr, "Invalid input file index %d map_meta_data(%d,%d)\n", in_file_index, out_file_index, in_file_index);
1737 ret = -EINVAL;
1738 goto fail;
1739 }
1740
1741 out_file = output_files[out_file_index];
1742 in_file = input_files[in_file_index];
1743
1744 strcpy(out_file->title, in_file->title);
1745 strcpy(out_file->author, in_file->author);
1746 strcpy(out_file->copyright, in_file->copyright);
1747 strcpy(out_file->comment, in_file->comment);
1748 strcpy(out_file->album, in_file->album);
1749 out_file->year = in_file->year;
1750 out_file->track = in_file->track;
1751 strcpy(out_file->genre, in_file->genre);
1752 }
1753
1754 /* open files and write file headers */
1755 for(i=0;i<nb_output_files;i++) {
1756 os = output_files[i];
1757 if (av_write_header(os) < 0) {
1758 fprintf(stderr, "Could not write header for output file #%d (incorrect codec parameters ?)\n", i);
1759 ret = -EINVAL;
1760 goto fail;
1761 }
1762 }
1763
1764 #ifndef CONFIG_WIN32
1765 if ( !using_stdin && verbose >= 0) {
1766 fprintf(stderr, "Press [q] to stop encoding\n");
1767 url_set_interrupt_cb(decode_interrupt_cb);
1768 }
1769 #endif
1770 term_init();
1771
1772 stream_no_data = 0;
1773 key = -1;
1774
1775 for(; received_sigterm == 0;) {
1776 int file_index, ist_index;
1777 AVPacket pkt;
1778 double ipts_min;
1779 double opts_min;
1780
1781 redo:
1782 ipts_min= 1e100;
1783 opts_min= 1e100;
1784 /* if 'q' pressed, exits */
1785 if (!using_stdin) {
1786 if (q_pressed)
1787 break;
1788 /* read_key() returns 0 on EOF */
1789 key = read_key();
1790 if (key == 'q')
1791 break;
1792 }
1793
1794 /* select the stream that we must read now by looking at the
1795 smallest output pts */
1796 file_index = -1;
1797 for(i=0;i<nb_ostreams;i++) {
1798 double ipts, opts;
1799 ost = ost_table[i];
1800 os = output_files[ost->file_index];
1801 ist = ist_table[ost->source_index];
1802 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO)
1803 opts = (double)ost->sync_opts * ost->st->codec.frame_rate_base / ost->st->codec.frame_rate;
1804 else
1805 opts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den;
1806 ipts = (double)ist->pts;
1807 if (!file_table[ist->file_index].eof_reached){
1808 if(ipts < ipts_min) {
1809 ipts_min = ipts;
1810 if(input_sync ) file_index = ist->file_index;
1811 }
1812 if(opts < opts_min) {
1813 opts_min = opts;
1814 if(!input_sync) file_index = ist->file_index;
1815 }
1816 }
1817 }
1818 /* if none, if is finished */
1819 if (file_index < 0) {
1820 break;
1821 }
1822
1823 /* finish if recording time exhausted */
1824 if (recording_time > 0 && opts_min >= (recording_time / 1000000.0))
1825 break;
1826
1827 /* read a frame from it and output it in the fifo */
1828 is = input_files[file_index];
1829 if (av_read_frame(is, &pkt) < 0) {
1830 file_table[file_index].eof_reached = 1;
1831 continue;
1832 }
1833
1834 if (!pkt.size) {
1835 stream_no_data = is;
1836 } else {
1837 stream_no_data = 0;
1838 }
1839 if (do_pkt_dump) {
1840 av_pkt_dump(stdout, &pkt, do_hex_dump);
1841 }
1842 /* the following test is needed in case new streams appear
1843 dynamically in stream : we ignore them */
1844 if (pkt.stream_index >= file_table[file_index].nb_streams)
1845 goto discard_packet;
1846 ist_index = file_table[file_index].ist_index + pkt.stream_index;
1847 ist = ist_table[ist_index];
1848 if (ist->discard)
1849 goto discard_packet;
1850
1851 // fprintf(stderr, "next:%lld dts:%lld off:%lld %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec.codec_type);
1852 if (pkt.dts != AV_NOPTS_VALUE) {
1853 int64_t delta= pkt.dts - ist->next_pts;
1854 if(ABS(delta) > 10LL*AV_TIME_BASE && !copy_ts){
1855 input_files_ts_offset[ist->file_index]-= delta;
1856 if (verbose > 2)
1857 fprintf(stderr, "timestamp discontinuity %lld, new offset= %lld\n", delta, input_files_ts_offset[ist->file_index]);
1858 for(i=0; i<file_table[file_index].nb_streams; i++){
1859 int index= file_table[file_index].ist_index + i;
1860 ist_table[index]->next_pts += delta;
1861 ist_table[index]->is_start=1;
1862 }
1863 }
1864 }
1865
1866 //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
1867 if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
1868
1869 if (verbose >= 0)
1870 fprintf(stderr, "Error while decoding stream #%d.%d\n",
1871 ist->file_index, ist->index);
1872
1873 av_free_packet(&pkt);
1874 goto redo;
1875 }
1876
1877 discard_packet:
1878 av_free_packet(&pkt);
1879
1880 /* dump report by using the output first video and audio streams */
1881 print_report(output_files, ost_table, nb_ostreams, 0);
1882 }
1883
1884 /* at the end of stream, we must flush the decoder buffers */
1885 for(i=0;i<nb_istreams;i++) {
1886 ist = ist_table[i];
1887 if (ist->decoding_needed) {
1888 output_packet(ist, i, ost_table, nb_ostreams, NULL);
1889 }
1890 }
1891
1892 term_exit();
1893
1894 /* write the trailer if needed and close file */
1895 for(i=0;i<nb_output_files;i++) {
1896 os = output_files[i];
1897 av_write_trailer(os);
1898 }
1899
1900 /* dump report by using the first video and audio streams */
1901 print_report(output_files, ost_table, nb_ostreams, 1);
1902
1903 /* close each encoder */
1904 for(i=0;i<nb_ostreams;i++) {
1905 ost = ost_table[i];
1906 if (ost->encoding_needed) {
1907 av_freep(&ost->st->codec.stats_in);
1908 avcodec_close(&ost->st->codec);
1909 }
1910 }
1911
1912 /* close each decoder */
1913 for(i=0;i<nb_istreams;i++) {
1914 ist = ist_table[i];
1915 if (ist->decoding_needed) {
1916 avcodec_close(&ist->st->codec);
1917 }
1918 }
1919
1920 /* finished ! */
1921
1922 ret = 0;
1923 fail1:
1924 av_free(file_table);
1925
1926 if (ist_table) {
1927 for(i=0;i<nb_istreams;i++) {
1928 ist = ist_table[i];
1929 av_free(ist);
1930 }
1931 av_free(ist_table);
1932 }
1933 if (ost_table) {
1934 for(i=0;i<nb_ostreams;i++) {
1935 ost = ost_table[i];
1936 if (ost) {
1937 if (ost->logfile) {
1938 fclose(ost->logfile);
1939 ost->logfile = NULL;
1940 }
1941 fifo_free(&ost->fifo); /* works even if fifo is not
1942 initialized but set to zero */
1943 av_free(ost->pict_tmp.data[0]);
1944 if (ost->video_resample)
1945 img_resample_close(ost->img_resample_ctx);
1946 if (ost->audio_resample)
1947 audio_resample_close(ost->resample);
1948 av_free(ost);
1949 }
1950 }
1951 av_free(ost_table);
1952 }
1953 return ret;
1954 fail:
1955 ret = -ENOMEM;
1956 goto fail1;
1957 }
1958
1959 #if 0
1960 int file_read(const char *filename)
1961 {
1962 URLContext *h;
1963 unsigned char buffer[1024];
1964 int len, i;
1965
1966 if (url_open(&h, filename, O_RDONLY) < 0) {
1967 printf("could not open '%s'\n", filename);
1968 return -1;
1969 }
1970 for(;;) {
1971 len = url_read(h, buffer, sizeof(buffer));
1972 if (len <= 0)
1973 break;
1974 for(i=0;i<len;i++) putchar(buffer[i]);
1975 }
1976 url_close(h);
1977 return 0;
1978 }
1979 #endif
1980
1981 static void opt_image_format(const char *arg)
1982 {
1983 AVImageFormat *f;
1984
1985 for(f = first_image_format; f != NULL; f = f->next) {
1986 if (!strcmp(arg, f->name))
1987 break;
1988 }
1989 if (!f) {
1990 fprintf(stderr, "Unknown image format: '%s'\n", arg);
1991 exit(1);
1992 }
1993 image_format = f;
1994 }
1995
1996 static void opt_format(const char *arg)
1997 {
1998 /* compatibility stuff for pgmyuv */
1999 if (!strcmp(arg, "pgmyuv")) {
2000 opt_image_format(arg);
2001 arg = "image";
2002 }
2003
2004 file_iformat = av_find_input_format(arg);
2005 file_oformat = guess_format(arg, NULL, NULL);
2006 if (!file_iformat && !file_oformat) {
2007 fprintf(stderr, "Unknown input or output format: %s\n", arg);
2008 exit(1);
2009 }
2010 }
2011
2012 static void opt_video_bitrate(const char *arg)
2013 {
2014 video_bit_rate = atoi(arg) * 1000;
2015 }
2016
2017 static void opt_video_bitrate_tolerance(const char *arg)
2018 {
2019 video_bit_rate_tolerance = atoi(arg) * 1000;
2020 }
2021
2022 static void opt_video_bitrate_max(const char *arg)
2023 {
2024 video_rc_max_rate = atoi(arg) * 1000;
2025 }
2026
2027 static void opt_video_bitrate_min(const char *arg)
2028 {
2029 video_rc_min_rate = atoi(arg) * 1000;
2030 }
2031
2032 static void opt_video_buffer_size(const char *arg)
2033 {
2034 video_rc_buffer_size = atoi(arg) * 8*1024;
2035 }
2036
2037 static void opt_video_rc_eq(char *arg)
2038 {
2039 video_rc_eq = arg;
2040 }
2041
2042 static void opt_video_rc_override_string(char *arg)
2043 {
2044 video_rc_override_string = arg;
2045 }
2046
2047
2048 static void opt_workaround_bugs(const char *arg)
2049 {
2050 workaround_bugs = atoi(arg);
2051 }
2052
2053 static void opt_dct_algo(const char *arg)
2054 {
2055 dct_algo = atoi(arg);
2056 }
2057
2058 static void opt_idct_algo(const char *arg)
2059 {
2060 idct_algo = atoi(arg);
2061 }
2062
2063 static void opt_me_threshold(const char *arg)
2064 {
2065 me_threshold = atoi(arg);
2066 }
2067
2068 static void opt_mb_threshold(const char *arg)
2069 {
2070 mb_threshold = atoi(arg);
2071 }
2072
2073 static void opt_error_resilience(const char *arg)
2074 {
2075 error_resilience = atoi(arg);
2076 }
2077
2078 static void opt_error_concealment(const char *arg)
2079 {
2080 error_concealment = atoi(arg);
2081 }
2082
2083 static void opt_debug(const char *arg)
2084 {
2085 debug = atoi(arg);
2086 }
2087
2088 static void opt_vismv(const char *arg)
2089 {
2090 debug_mv = atoi(arg);
2091 }
2092
2093 static void opt_verbose(const char *arg)
2094 {
2095 verbose = atoi(arg);
2096 av_log_set_level(atoi(arg));
2097 }
2098
2099 static void opt_frame_rate(const char *arg)
2100 {
2101 if (parse_frame_rate(&frame_rate, &frame_rate_base, arg) < 0) {
2102 fprintf(stderr, "Incorrect frame rate\n");
2103 exit(1);
2104 }
2105 }
2106
2107 static void opt_frame_crop_top(const char *arg)
2108 {
2109 frame_topBand = atoi(arg);
2110 if (frame_topBand < 0) {
2111 fprintf(stderr, "Incorrect top crop size\n");
2112 exit(1);
2113 }
2114 if ((frame_topBand % 2) != 0) {
2115 fprintf(stderr, "Top crop size must be a multiple of 2\n");
2116 exit(1);
2117 }
2118 if ((frame_topBand) >= frame_height){
2119 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2120 exit(1);
2121 }
2122 frame_height -= frame_topBand;
2123 }
2124
2125 static void opt_frame_crop_bottom(const char *arg)
2126 {
2127 frame_bottomBand = atoi(arg);
2128 if (frame_bottomBand < 0) {
2129 fprintf(stderr, "Incorrect bottom crop size\n");
2130 exit(1);
2131 }
2132 if ((frame_bottomBand % 2) != 0) {
2133 fprintf(stderr, "Bottom crop size must be a multiple of 2\n");
2134 exit(1);
2135 }
2136 if ((frame_bottomBand) >= frame_height){
2137 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2138 exit(1);
2139 }
2140 frame_height -= frame_bottomBand;
2141 }
2142
2143 static void opt_frame_crop_left(const char *arg)
2144 {
2145 frame_leftBand = atoi(arg);
2146 if (frame_leftBand < 0) {
2147 fprintf(stderr, "Incorrect left crop size\n");
2148 exit(1);
2149 }
2150 if ((frame_leftBand % 2) != 0) {
2151 fprintf(stderr, "Left crop size must be a multiple of 2\n");
2152 exit(1);
2153 }
2154 if ((frame_leftBand) >= frame_width){
2155 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2156 exit(1);
2157 }
2158 frame_width -= frame_leftBand;
2159 }
2160
2161 static void opt_frame_crop_right(const char *arg)
2162 {
2163 frame_rightBand = atoi(arg);
2164 if (frame_rightBand < 0) {
2165 fprintf(stderr, "Incorrect right crop size\n");
2166 exit(1);
2167 }
2168 if ((frame_rightBand % 2) != 0) {
2169 fprintf(stderr, "Right crop size must be a multiple of 2\n");
2170 exit(1);
2171 }
2172 if ((frame_rightBand) >= frame_width){
2173 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2174 exit(1);
2175 }
2176 frame_width -= frame_rightBand;
2177 }
2178
2179 static void opt_frame_size(const char *arg)
2180 {
2181 if (parse_image_size(&frame_width, &frame_height, arg) < 0) {
2182 fprintf(stderr, "Incorrect frame size\n");
2183 exit(1);
2184 }
2185 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2186 fprintf(stderr, "Frame size must be a multiple of 2\n");
2187 exit(1);
2188 }
2189 }
2190
2191
2192 #define SCALEBITS 10
2193 #define ONE_HALF (1 << (SCALEBITS - 1))
2194 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
2195
2196 #define RGB_TO_Y(r, g, b) \
2197 ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
2198 FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
2199
2200 #define RGB_TO_U(r1, g1, b1, shift)\
2201 (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
2202 FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2203
2204 #define RGB_TO_V(r1, g1, b1, shift)\
2205 (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
2206 FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2207
2208 static void opt_pad_color(const char *arg) {
2209 /* Input is expected to be six hex digits similar to
2210 how colors are expressed in html tags (but without the #) */
2211 int rgb = strtol(arg, NULL, 16);
2212 int r,g,b;
2213
2214 r = (rgb >> 16);
2215 g = ((rgb >> 8) & 255);
2216 b = (rgb & 255);
2217
2218 padcolor[0] = RGB_TO_Y(r,g,b);
2219 padcolor[1] = RGB_TO_U(r,g,b,0);
2220 padcolor[2] = RGB_TO_V(r,g,b,0);
2221 }
2222
2223 static void opt_frame_pad_top(const char *arg)
2224 {
2225 frame_padtop = atoi(arg);
2226 if (frame_padtop < 0) {
2227 fprintf(stderr, "Incorrect top pad size\n");
2228 exit(1);
2229 }
2230 if ((frame_padtop % 2) != 0) {
2231 fprintf(stderr, "Top pad size must be a multiple of 2\n");
2232 exit(1);
2233 }
2234 }
2235
2236 static void opt_frame_pad_bottom(const char *arg)
2237 {
2238 frame_padbottom = atoi(arg);
2239 if (frame_padbottom < 0) {
2240 fprintf(stderr, "Incorrect bottom pad size\n");
2241 exit(1);
2242 }
2243 if ((frame_padbottom % 2) != 0) {
2244 fprintf(stderr, "Bottom pad size must be a multiple of 2\n");
2245 exit(1);
2246 }
2247 }
2248
2249
2250 static void opt_frame_pad_left(const char *arg)
2251 {
2252 frame_padleft = atoi(arg);
2253 if (frame_padleft < 0) {
2254 fprintf(stderr, "Incorrect left pad size\n");
2255 exit(1);
2256 }
2257 if ((frame_padleft % 2) != 0) {
2258 fprintf(stderr, "Left pad size must be a multiple of 2\n");
2259 exit(1);
2260 }
2261 }
2262
2263
2264 static void opt_frame_pad_right(const char *arg)
2265 {
2266 frame_padright = atoi(arg);
2267 if (frame_padright < 0) {
2268 fprintf(stderr, "Incorrect right pad size\n");
2269 exit(1);
2270 }
2271 if ((frame_padright % 2) != 0) {
2272 fprintf(stderr, "Right pad size must be a multiple of 2\n");
2273 exit(1);
2274 }
2275 }
2276
2277
2278 static void opt_frame_pix_fmt(const char *arg)
2279 {
2280 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2281 }
2282
2283 static void opt_frame_aspect_ratio(const char *arg)
2284 {
2285 int x = 0, y = 0;
2286 double ar = 0;
2287 const char *p;
2288
2289 p = strchr(arg, ':');
2290 if (p) {
2291 x = strtol(arg, (char **)&arg, 10);
2292 if (arg == p)
2293 y = strtol(arg+1, (char **)&arg, 10);
2294 if (x > 0 && y > 0)
2295 ar = (double)x / (double)y;
2296 } else
2297 ar = strtod(arg, (char **)&arg);
2298
2299 if (!ar) {
2300 fprintf(stderr, "Incorrect aspect ratio specification.\n");
2301 exit(1);
2302 }
2303 frame_aspect_ratio = ar;
2304 }
2305
2306 static void opt_gop_size(const char *arg)
2307 {
2308 gop_size = atoi(arg);
2309 }
2310
2311 static void opt_b_frames(const char *arg)
2312 {
2313 b_frames = atoi(arg);
2314 if (b_frames > FF_MAX_B_FRAMES) {
2315 fprintf(stderr, "\nCannot have more than %d B frames, increase FF_MAX_B_FRAMES.\n", FF_MAX_B_FRAMES);
2316 exit(1);
2317 } else if (b_frames < 1) {
2318 fprintf(stderr, "\nNumber of B frames must be higher than 0\n");
2319 exit(1);
2320 }
2321 }
2322
2323 static void opt_mb_decision(const char *arg)
2324 {
2325 mb_decision = atoi(arg);
2326 }
2327
2328 static void opt_mb_cmp(const char *arg)
2329 {
2330 mb_cmp = atoi(arg);
2331 }
2332
2333 static void opt_ildct_cmp(const char *arg)
2334 {
2335 ildct_cmp = atoi(arg);
2336 }
2337
2338 static void opt_sub_cmp(const char *arg)
2339 {
2340 sub_cmp = atoi(arg);
2341 }
2342
2343 static void opt_cmp(const char *arg)
2344 {
2345 cmp = atoi(arg);
2346 }
2347
2348 static void opt_pre_cmp(const char *arg)
2349 {
2350 pre_cmp = atoi(arg);
2351 }
2352
2353 static void opt_pre_me(const char *arg)
2354 {
2355 pre_me = atoi(arg);
2356 }
2357
2358 static void opt_lumi_mask(const char *arg)
2359 {
2360 lumi_mask = atof(arg);
2361 }
2362
2363 static void opt_dark_mask(const char *arg)
2364 {
2365 dark_mask = atof(arg);
2366 }
2367
2368 static void opt_scplx_mask(const char *arg)
2369 {
2370 scplx_mask = atof(arg);
2371 }
2372
2373 static void opt_tcplx_mask(const char *arg)
2374 {
2375 tcplx_mask = atof(arg);
2376 }
2377
2378 static void opt_p_mask(const char *arg)
2379 {
2380 p_mask = atof(arg);
2381 }
2382
2383 static void opt_qscale(const char *arg)
2384 {
2385 video_qscale = atof(arg);
2386 if (video_qscale < 0.01 ||
2387 video_qscale > 255) {
2388 fprintf(stderr, "qscale must be >= 0.01 and <= 255\n");
2389 exit(1);
2390 }
2391 }
2392
2393 static void opt_lmax(const char *arg)
2394 {
2395 video_lmax = atof(arg)*FF_QP2LAMBDA;
2396 }
2397
2398 static void opt_lmin(const char *arg)
2399 {
2400 video_lmin = atof(arg)*FF_QP2LAMBDA;
2401 }
2402
2403 static void opt_qmin(const char *arg)
2404 {
2405 video_qmin = atoi(arg);
2406 if (video_qmin < 0 ||
2407 video_qmin > 31) {
2408 fprintf(stderr, "qmin must be >= 1 and <= 31\n");
2409 exit(1);
2410 }
2411 }
2412
2413 static void opt_qmax(const char *arg)
2414 {
2415 video_qmax = atoi(arg);
2416 if (video_qmax < 0 ||
2417 video_qmax > 31) {
2418 fprintf(stderr, "qmax must be >= 1 and <= 31\n");
2419 exit(1);
2420 }
2421 }
2422
2423 static void opt_mb_qmin(const char *arg)
2424 {
2425 video_mb_qmin = atoi(arg);
2426 if (video_mb_qmin < 0 ||
2427 video_mb_qmin > 31) {
2428 fprintf(stderr, "qmin must be >= 1 and <= 31\n");
2429 exit(1);
2430 }
2431 }
2432
2433 static void opt_mb_qmax(const char *arg)
2434 {
2435 video_mb_qmax = atoi(arg);
2436 if (video_mb_qmax < 0 ||
2437 video_mb_qmax > 31) {
2438 fprintf(stderr, "qmax must be >= 1 and <= 31\n");
2439 exit(1);
2440 }
2441 }
2442
2443 static void opt_qdiff(const char *arg)
2444 {
2445 video_qdiff = atoi(arg);
2446 if (video_qdiff < 0 ||
2447 video_qdiff > 31) {
2448 fprintf(stderr, "qdiff must be >= 1 and <= 31\n");
2449 exit(1);
2450 }
2451 }
2452
2453 static void opt_qblur(const char *arg)
2454 {
2455 video_qblur = atof(arg);
2456 }
2457
2458 static void opt_qcomp(const char *arg)
2459 {
2460 video_qcomp = atof(arg);
2461 }
2462
2463 static void opt_rc_initial_cplx(const char *arg)
2464 {
2465 video_rc_initial_cplx = atof(arg);
2466 }
2467 static void opt_b_qfactor(const char *arg)
2468 {
2469 video_b_qfactor = atof(arg);
2470 }
2471 static void opt_i_qfactor(const char *arg)
2472 {
2473 video_i_qfactor = atof(arg);
2474 }
2475 static void opt_b_qoffset(const char *arg)
2476 {
2477 video_b_qoffset = atof(arg);
2478 }
2479 static void opt_i_qoffset(const char *arg)
2480 {
2481 video_i_qoffset = atof(arg);
2482 }
2483
2484 static void opt_ibias(const char *arg)
2485 {
2486 video_intra_quant_bias = atoi(arg);
2487 }
2488 static void opt_pbias(const char *arg)
2489 {
2490 video_inter_quant_bias = atoi(arg);
2491 }
2492
2493 static void opt_packet_size(const char *arg)
2494 {
2495 packet_size= atoi(arg);
2496 }
2497
2498 static void opt_error_rate(const char *arg)
2499 {
2500 error_rate= atoi(arg);
2501 }
2502
2503 static void opt_strict(const char *arg)
2504 {
2505 strict= atoi(arg);
2506 }
2507
2508 static void opt_top_field_first(const char *arg)
2509 {
2510 top_field_first= atoi(arg);
2511 }
2512
2513 static void opt_noise_reduction(const char *arg)
2514 {
2515 noise_reduction= atoi(arg);
2516 }
2517
2518 static void opt_qns(const char *arg)
2519 {
2520 qns= atoi(arg);
2521 }
2522
2523 static void opt_sc_threshold(const char *arg)
2524 {
2525 sc_threshold= atoi(arg);
2526 }
2527
2528 static void opt_me_range(const char *arg)
2529 {
2530 me_range = atoi(arg);
2531 }
2532
2533 static void opt_thread_count(const char *arg)
2534 {
2535 thread_count= atoi(arg);
2536 #if !defined(HAVE_PTHREADS) && !defined(HAVE_W32THREADS)
2537 if (verbose >= 0)
2538 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2539 #endif
2540 }
2541
2542 static void opt_audio_bitrate(const char *arg)
2543 {
2544 audio_bit_rate = atoi(arg) * 1000;
2545 }
2546
2547 static void opt_audio_rate(const char *arg)
2548 {
2549 audio_sample_rate = atoi(arg);
2550 }
2551
2552 static void opt_audio_channels(const char *arg)
2553 {
2554 audio_channels = atoi(arg);
2555 }
2556
2557 static void opt_video_device(const char *arg)
2558 {
2559 video_device = av_strdup(arg);
2560 }
2561
2562 static void opt_video_channel(const char *arg)
2563 {
2564 video_channel = strtol(arg, NULL, 0);
2565 }
2566
2567 static void opt_video_standard(const char *arg)
2568 {
2569 video_standard = av_strdup(arg);
2570 }
2571
2572 static void opt_audio_device(const char *arg)
2573 {
2574 audio_device = av_strdup(arg);
2575 }
2576
2577 static void opt_dv1394(const char *arg)
2578 {
2579 video_grab_format = "dv1394";
2580 audio_grab_format = NULL;
2581 }
2582
2583 static void opt_audio_codec(const char *arg)
2584 {
2585 AVCodec *p;
2586
2587 if (!strcmp(arg, "copy")) {
2588 audio_stream_copy = 1;
2589 } else {
2590 p = first_avcodec;
2591 while (p) {
2592 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_AUDIO)
2593 break;
2594 p = p->next;
2595 }
2596 if (p == NULL) {
2597 fprintf(stderr, "Unknown audio codec '%s'\n", arg);
2598 exit(1);
2599 } else {
2600 audio_codec_id = p->id;
2601 }
2602 }
2603 }
2604
2605 static void add_frame_hooker(const char *arg)
2606 {
2607 int argc = 0;
2608 char *argv[64];
2609 int i;
2610 char *args = av_strdup(arg);
2611
2612 using_vhook = 1;
2613
2614 argv[0] = strtok(args, " ");
2615 while (argc < 62 && (argv[++argc] = strtok(NULL, " "))) {
2616 }
2617
2618 i = frame_hook_add(argc, argv);
2619
2620 if (i != 0) {
2621 fprintf(stderr, "Failed to add video hook function: %s\n", arg);
2622 exit(1);
2623 }
2624 }
2625
2626 const char *motion_str[] = {
2627 "zero",
2628 "full",
2629 "log",
2630 "phods",
2631 "epzs",
2632 "x1",
2633 NULL,
2634 };
2635
2636 static void opt_motion_estimation(const char *arg)
2637 {
2638 const char **p;
2639 p = motion_str;
2640 for(;;) {
2641 if (!*p) {
2642 fprintf(stderr, "Unknown motion estimation method '%s'\n", arg);
2643 exit(1);
2644 }
2645 if (!strcmp(*p, arg))
2646 break;
2647 p++;
2648 }
2649 me_method = (p - motion_str) + 1;
2650 }
2651
2652 static void opt_video_codec(const char *arg)
2653 {
2654 AVCodec *p;
2655
2656 if (!strcmp(arg, "copy")) {
2657 video_stream_copy = 1;
2658 } else {
2659 p = first_avcodec;
2660 while (p) {
2661 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_VIDEO)
2662 break;
2663 p = p->next;
2664 }
2665 if (p == NULL) {
2666 fprintf(stderr, "Unknown video codec '%s'\n", arg);
2667 exit(1);
2668 } else {
2669 video_codec_id = p->id;
2670 }
2671 }
2672 }
2673
2674 static void opt_map(const char *arg)
2675 {
2676 AVStreamMap *m;
2677 const char *p;
2678
2679 p = arg;
2680 m = &stream_maps[nb_stream_maps++];
2681
2682 m->file_index = strtol(arg, (char **)&p, 0);
2683 if (*p)
2684 p++;
2685
2686 m->stream_index = strtol(p, (char **)&p, 0);
2687 }
2688
2689 static void opt_map_meta_data(const char *arg)
2690 {
2691 AVMetaDataMap *m;
2692 const char *p;
2693
2694 p = arg;
2695 m = &meta_data_maps[nb_meta_data_maps++];
2696
2697 m->out_file = strtol(arg, (char **)&p, 0);
2698 if (*p)
2699 p++;
2700
2701 m->in_file = strtol(p, (char **)&p, 0);
2702 }
2703
2704 static void opt_recording_time(const char *arg)
2705 {
2706 recording_time = parse_date(arg, 1);
2707 }
2708
2709 static void opt_start_time(const char *arg)
2710 {
2711 start_time = parse_date(arg, 1);
2712 }
2713
2714 static void opt_rec_timestamp(const char *arg)
2715 {
2716 rec_timestamp = parse_date(arg, 0) / 1000000;
2717 }
2718
2719 static void opt_input_ts_offset(const char *arg)
2720 {
2721 input_ts_offset = parse_date(arg, 1);
2722 }
2723
2724 static void opt_input_file(const char *filename)
2725 {
2726 AVFormatContext *ic;
2727 AVFormatParameters params, *ap = &params;
2728 int err, i, ret, rfps, rfps_base;
2729 int64_t timestamp;
2730
2731 if (!strcmp(filename, "-"))
2732 filename = "pipe:";
2733
2734 using_stdin |= !strcmp(filename, "pipe:" ) ||
2735 !strcmp( filename, "/dev/stdin" );
2736
2737 /* get default parameters from command line */
2738 memset(ap, 0, sizeof(*ap));
2739 ap->sample_rate = audio_sample_rate;
2740 ap->channels = audio_channels;
2741 ap->frame_rate = frame_rate;
2742 ap->frame_rate_base = frame_rate_base;
2743 ap->width = frame_width + frame_padleft + frame_padright;
2744 ap->height = frame_height + frame_padtop + frame_padbottom;
2745 ap->image_format = image_format;
2746 ap->pix_fmt = frame_pix_fmt;
2747
2748 /* open the input file with generic libav function */
2749 err = av_open_input_file(&ic, filename, file_iformat, 0, ap);
2750 if (err < 0) {
2751 print_error(filename, err);
2752 exit(1);
2753 }
2754
2755 /* If not enough info to get the stream parameters, we decode the
2756 first frames to get it. (used in mpeg case for example) */
2757 ret = av_find_stream_info(ic);
2758 if (ret < 0 && verbose >= 0) {
2759 fprintf(stderr, "%s: could not find codec parameters\n", filename);
2760 exit(1);
2761 }
2762
2763 timestamp = start_time;
2764 /* add the stream start time */
2765 if (ic->start_time != AV_NOPTS_VALUE)
2766 timestamp += ic->start_time;
2767
2768 /* if seeking requested, we execute it */
2769 if (start_time != 0) {
2770 ret = av_seek_frame(ic, -1, timestamp);
2771 if (ret < 0) {
2772 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2773 filename, (double)timestamp / AV_TIME_BASE);
2774 }
2775 /* reset seek info */
2776 start_time = 0;
2777 }
2778
2779 /* update the current parameters so that they match the one of the input stream */
2780 for(i=0;i<ic->nb_streams;i++) {
2781 AVCodecContext *enc = &ic->streams[i]->codec;
2782 #if defined(HAVE_PTHREADS) || defined(HAVE_W32THREADS)
2783 if(thread_count>1)
2784 avcodec_thread_init(enc, thread_count);
2785 #endif
2786 enc->thread_count= thread_count;
2787 switch(enc->codec_type) {
2788 case CODEC_TYPE_AUDIO:
2789 //fprintf(stderr, "\nInput Audio channels: %d", enc->channels);
2790 audio_channels = enc->channels;
2791 audio_sample_rate = enc->sample_rate;
2792 break;
2793 case CODEC_TYPE_VIDEO:
2794 frame_height = enc->height;
2795 frame_width = enc->width;
2796 frame_aspect_ratio = av_q2d(enc->sample_aspect_ratio) * enc->width / enc->height;
2797 frame_pix_fmt = enc->pix_fmt;
2798 rfps = ic->streams[i]->r_frame_rate;
2799 rfps_base = ic->streams[i]->r_frame_rate_base;
2800 enc->workaround_bugs = workaround_bugs;
2801 enc->error_resilience = error_resilience;
2802 enc->error_concealment = error_concealment;
2803 enc->idct_algo = idct_algo;
2804 enc->debug = debug;
2805 enc->debug_mv = debug_mv;
2806 if(bitexact)
2807 enc->flags|= CODEC_FLAG_BITEXACT;
2808 if(me_threshold)
2809 enc->debug |= FF_DEBUG_MV;
2810
2811 assert(enc->frame_rate_base == rfps_base); // should be true for now
2812 if (enc->frame_rate != rfps) {
2813
2814 if (verbose >= 0)
2815 fprintf(stderr,"\nSeems that stream %d comes from film source: %2.2f->%2.2f\n",
2816 i, (float)enc->frame_rate / enc->frame_rate_base,
2817
2818 (float)rfps / rfps_base);
2819 }
2820 /* update the current frame rate to match the stream frame rate */
2821 frame_rate = rfps;
2822 frame_rate_base = rfps_base;
2823
2824 enc->rate_emu = rate_emu;
2825 break;
2826 case CODEC_TYPE_DATA:
2827 break;
2828 default:
2829 av_abort();
2830 }
2831 }
2832
2833 input_files[nb_input_files] = ic;
2834 input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp);
2835 /* dump the file content */
2836 if (verbose >= 0)
2837 dump_format(ic, nb_input_files, filename, 0);
2838
2839 nb_input_files++;
2840 file_iformat = NULL;
2841 file_oformat = NULL;
2842 image_format = NULL;
2843
2844 rate_emu = 0;
2845 }
2846
2847 static void check_audio_video_inputs(int *has_video_ptr, int *has_audio_ptr)
2848 {
2849 int has_video, has_audio, i, j;
2850 AVFormatContext *ic;
2851
2852 has_video = 0;
2853 has_audio = 0;
2854 for(j=0;j<nb_input_files;j++) {
2855 ic = input_files[j];
2856 for(i=0;i<ic->nb_streams;i++) {
2857 AVCodecContext *enc = &ic->streams[i]->codec;
2858 switch(enc->codec_type) {
2859 case CODEC_TYPE_AUDIO:
2860 has_audio = 1;
2861 break;
2862 case CODEC_TYPE_VIDEO:
2863 has_video = 1;
2864 break;
2865 case CODEC_TYPE_DATA:
2866 break;
2867 default:
2868 av_abort();
2869 }
2870 }
2871 }
2872 *has_video_ptr = has_video;
2873 *has_audio_ptr = has_audio;
2874 }
2875
2876 static void opt_output_file(const char *filename)
2877 {
2878 AVStream *st;
2879 AVFormatContext *oc;
2880 int use_video, use_audio, nb_streams, input_has_video, input_has_audio;
2881 int codec_id;
2882 AVFormatParameters params, *ap = &params;
2883
2884 if (!strcmp(filename, "-"))
2885 filename = "pipe:";
2886
2887 oc = av_alloc_format_context();
2888
2889 if (!file_oformat) {
2890 file_oformat = guess_format(NULL, filename, NULL);
2891 if (!file_oformat) {
2892 fprintf(stderr, "Unable for find a suitable output format for '%s'\n",
2893 filename);
2894 exit(1);
2895 }
2896 }
2897
2898 oc->oformat = file_oformat;
2899
2900 if (!strcmp(file_oformat->name, "ffm") &&
2901 strstart(filename, "http:", NULL)) {
2902 /* special case for files sent to ffserver: we get the stream
2903 parameters from ffserver */
2904 if (read_ffserver_streams(oc, filename) < 0) {
2905 fprintf(stderr, "Could not read stream parameters from '%s'\n", filename);
2906 exit(1);
2907 }
2908 } else {
2909 use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy;
2910 use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy;
2911
2912 /* disable if no corresponding type found and at least one
2913 input file */
2914 if (nb_input_files > 0) {
2915 check_audio_video_inputs(&input_has_video, &input_has_audio);
2916 if (!input_has_video)
2917 use_video = 0;
2918 if (!input_has_audio)
2919 use_audio = 0;
2920 }
2921
2922 /* manual disable */
2923 if (audio_disable) {
2924 use_audio = 0;
2925 }
2926 if (video_disable) {
2927 use_video = 0;
2928 }
2929
2930 nb_streams = 0;
2931 if (use_video) {
2932 AVCodecContext *video_enc;
2933
2934 st = av_new_stream(oc, nb_streams++);
2935 if (!st) {
2936 fprintf(stderr, "Could not alloc stream\n");
2937 exit(1);
2938 }
2939 #if defined(HAVE_PTHREADS) || defined(HAVE_W32THREADS)
2940 if(thread_count>1)
2941 avcodec_thread_init(&st->codec, thread_count);
2942 #endif
2943
2944 video_enc = &st->codec;
2945
2946 if(!strcmp(file_oformat->name, "mp4") || !strcmp(file_oformat->name, "mov") || !strcmp(file_oformat->name, "3gp"))
2947 video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
2948 if (video_stream_copy) {
2949 st->stream_copy = 1;
2950 video_enc->codec_type = CODEC_TYPE_VIDEO;
2951 } else {
2952 char *p;
2953 int i;
2954 AVCodec *codec;
2955
2956 codec_id = file_oformat->video_codec;
2957 if (video_codec_id != CODEC_ID_NONE)
2958 codec_id = video_codec_id;
2959
2960 video_enc->codec_id = codec_id;
2961 codec = avcodec_find_encoder(codec_id);
2962
2963 video_enc->bit_rate = video_bit_rate;
2964 video_enc->bit_rate_tolerance = video_bit_rate_tolerance;
2965 video_enc->frame_rate = frame_rate;
2966 video_enc->frame_rate_base = frame_rate_base;
2967 if(codec && codec->supported_framerates){
2968 const AVRational *p= codec->supported_framerates;
2969 AVRational req= (AVRational){frame_rate, frame_rate_base};
2970 const AVRational *best=NULL;
2971 AVRational best_error= (AVRational){INT_MAX, 1};
2972 for(; p->den!=0; p++){
2973 AVRational error= av_sub_q(req, *p);
2974 if(error.num <0) error.num *= -1;
2975 if(av_cmp_q(error, best_error) < 0){
2976 best_error= error;
2977 best= p;
2978 }
2979 }
2980 video_enc->frame_rate = best->num;
2981 video_enc->frame_rate_base= best->den;
2982 }
2983
2984 video_enc->width = frame_width + frame_padright + frame_padleft;
2985 video_enc->height = frame_height + frame_padtop + frame_padbottom;
2986 video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
2987 video_enc->pix_fmt = frame_pix_fmt;
2988
2989 if(codec && codec->pix_fmts){
2990 const enum PixelFormat *p= codec->pix_fmts;
2991 for(; *p!=-1; p++){
2992 if(*p == video_enc->pix_fmt)
2993 break;
2994 }
2995 if(*p == -1)
2996 video_enc->pix_fmt = codec->pix_fmts[0];
2997 }
2998
2999 if (!intra_only)
3000 video_enc->gop_size = gop_size;
3001 else
3002 video_enc->gop_size = 0;
3003 if (video_qscale || same_quality) {
3004 video_enc->flags |= CODEC_FLAG_QSCALE;
3005 st->quality = FF_QP2LAMBDA * video_qscale;
3006 }
3007
3008 if(intra_matrix)
3009 video_enc->intra_matrix = intra_matrix;
3010 if(inter_matrix)
3011 video_enc->inter_matrix = inter_matrix;
3012
3013 if(bitexact)
3014 video_enc->flags |= CODEC_FLAG_BITEXACT;
3015
3016 video_enc->mb_decision = mb_decision;
3017 video_enc->mb_cmp = mb_cmp;
3018 video_enc->ildct_cmp = ildct_cmp;
3019 video_enc->me_sub_cmp = sub_cmp;
3020 video_enc->me_cmp = cmp;
3021 video_enc->me_pre_cmp = pre_cmp;
3022 video_enc->pre_me = pre_me;
3023 video_enc->lumi_masking = lumi_mask;
3024 video_enc->dark_masking = dark_mask;
3025 video_enc->spatial_cplx_masking = scplx_mask;
3026 video_enc->temporal_cplx_masking = tcplx_mask;
3027 video_enc->p_masking = p_mask;
3028 video_enc->quantizer_noise_shaping= qns;
3029
3030 if (use_umv) {
3031 video_enc->flags |= CODEC_FLAG_H263P_UMV;
3032 }
3033 if (use_ss) {
3034 video_enc->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
3035 }
3036 if (use_aic) {
3037 video_enc->flags |= CODEC_FLAG_H263P_AIC;
3038 }
3039 if (use_aiv) {
3040 video_enc->flags |= CODEC_FLAG_H263P_AIV;
3041 }
3042 if (use_4mv) {
3043 video_enc->flags |= CODEC_FLAG_4MV;
3044 }
3045 if (use_obmc) {
3046 video_enc->flags |= CODEC_FLAG_OBMC;
3047 }
3048 if (use_loop) {
3049 video_enc->flags |= CODEC_FLAG_LOOP_FILTER;
3050 }
3051
3052 if(use_part) {
3053 video_enc->flags |= CODEC_FLAG_PART;
3054 }
3055 if (use_alt_scan) {
3056 video_enc->flags |= CODEC_FLAG_ALT_SCAN;
3057 }
3058 if (use_trell) {
3059 video_enc->flags |= CODEC_FLAG_TRELLIS_QUANT;
3060 }
3061 if (use_scan_offset) {
3062 video_enc->flags |= CODEC_FLAG_SVCD_SCAN_OFFSET;
3063 }
3064 if (closed_gop) {
3065 video_enc->flags |= CODEC_FLAG_CLOSED_GOP;
3066 }
3067 if (use_qpel) {
3068 video_enc->flags |= CODEC_FLAG_QPEL;
3069 }
3070 if (use_qprd) {
3071 video_enc->flags |= CODEC_FLAG_QP_RD;
3072 }
3073 if (use_cbprd) {
3074 video_enc->flags |= CODEC_FLAG_CBP_RD;
3075 }
3076 if (b_frames) {
3077 video_enc->max_b_frames = b_frames;
3078 video_enc->b_frame_strategy = 0;
3079 video_enc->b_quant_factor = 2.0;
3080 }
3081 if (do_interlace_dct) {
3082 video_enc->flags |= CODEC_FLAG_INTERLACED_DCT;
3083 }
3084 if (do_interlace_me) {
3085 video_enc->flags |= CODEC_FLAG_INTERLACED_ME;
3086 }
3087 video_enc->qmin = video_qmin;
3088 video_enc->qmax = video_qmax;
3089 video_enc->lmin = video_lmin;
3090 video_enc->lmax = video_lmax;
3091 video_enc->mb_qmin = video_mb_qmin;
3092 video_enc->mb_qmax = video_mb_qmax;
3093 video_enc->max_qdiff = video_qdiff;
3094 video_enc->qblur = video_qblur;
3095 video_enc->qcompress = video_qcomp;
3096 video_enc->rc_eq = video_rc_eq;
3097 video_enc->debug = debug;
3098 video_enc->debug_mv = debug_mv;
3099 video_enc->thread_count = thread_count;
3100 p= video_rc_override_string;
3101 for(i=0; p; i++){
3102 int start, end, q;
3103 int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
3104 if(e!=3){
3105 fprintf(stderr, "error parsing rc_override\n");
3106 exit(1);
3107 }
3108 video_enc->rc_override=
3109 av_realloc(video_enc->rc_override,
3110 sizeof(RcOverride)*(i+1));
3111 video_enc->rc_override[i].start_frame= start;
3112 video_enc->rc_override[i].end_frame = end;
3113 if(q>0){
3114 video_enc->rc_override[i].qscale= q;
3115 video_enc->rc_override[i].quality_factor= 1.0;
3116 }
3117 else{
3118 video_enc->rc_override[i].qscale= 0;
3119 video_enc->rc_override[i].quality_factor= -q/100.0;
3120 }
3121 p= strchr(p, '/');
3122 if(p) p++;
3123 }
3124 video_enc->rc_override_count=i;
3125
3126 video_enc->rc_max_rate = video_rc_max_rate;
3127 video_enc->rc_min_rate = video_rc_min_rate;
3128 video_enc->rc_buffer_size = video_rc_buffer_size;
3129 video_enc->rc_buffer_aggressivity= video_rc_buffer_aggressivity;
3130 video_enc->rc_initial_cplx= video_rc_initial_cplx;
3131 video_enc->i_quant_factor = video_i_qfactor;
3132 video_enc->b_quant_factor = video_b_qfactor;
3133 video_enc->i_quant_offset = video_i_qoffset;
3134 video_enc->b_quant_offset = video_b_qoffset;
3135 video_enc->intra_quant_bias = video_intra_quant_bias;
3136 video_enc->inter_quant_bias = video_inter_quant_bias;
3137 video_enc->dct_algo = dct_algo;
3138 video_enc->idct_algo = idct_algo;
3139 video_enc->me_threshold= me_threshold;
3140 video_enc->mb_threshold= mb_threshold;
3141 video_enc->intra_dc_precision= intra_dc_precision - 8;
3142 video_enc->strict_std_compliance = strict;
3143 video_enc->error_rate = error_rate;
3144 video_enc->noise_reduction= noise_reduction;
3145 video_enc->scenechange_threshold= sc_threshold;
3146 video_enc->me_range = me_range;
3147 video_enc->coder_type= coder;
3148 video_enc->context_model= context;
3149 video_enc->prediction_method= predictor;
3150 video_enc->profile= video_profile;
3151 video_enc->level= video_level;
3152 video_enc->nsse_weight= nsse_weight;
3153 video_enc->me_subpel_quality= subpel_quality;
3154 video_enc->lowres= lowres;
3155
3156 if(packet_size){
3157 video_enc->rtp_mode= 1;
3158 video_enc->rtp_payload_size= packet_size;
3159 }
3160
3161 if (do_psnr)
3162 video_enc->flags|= CODEC_FLAG_PSNR;
3163
3164 video_enc->me_method = me_method;
3165
3166 /* two pass mode */
3167 if (do_pass) {
3168 if (do_pass == 1) {
3169 video_enc->flags |= CODEC_FLAG_PASS1;
3170 } else {
3171 video_enc->flags |= CODEC_FLAG_PASS2;
3172 }
3173 }
3174 }
3175 }
3176
3177 if (use_audio) {
3178 AVCodecContext *audio_enc;
3179
3180 st = av_new_stream(oc, nb_streams++);
3181 if (!st) {
3182 fprintf(stderr, "Could not alloc stream\n");
3183 exit(1);
3184 }
3185 #if defined(HAVE_PTHREADS) || defined(HAVE_W32THREADS)
3186 if(thread_count>1)
3187 avcodec_thread_init(&st->codec, thread_count);
3188 #endif
3189
3190 audio_enc = &st->codec;
3191 audio_enc->codec_type = CODEC_TYPE_AUDIO;
3192
3193 if(!strcmp(file_oformat->name, "mp4") || !strcmp(file_oformat->name, "mov") || !strcmp(file_oformat->name, "3gp"))
3194 audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3195 if (audio_stream_copy) {
3196 st->stream_copy = 1;
3197 audio_enc->channels = audio_channels;
3198 } else {
3199 codec_id = file_oformat->audio_codec;
3200 if (audio_codec_id != CODEC_ID_NONE)
3201 codec_id = audio_codec_id;
3202 audio_enc->codec_id = codec_id;
3203
3204 audio_enc->bit_rate = audio_bit_rate;
3205 audio_enc->strict_std_compliance = strict;
3206 audio_enc->thread_count = thread_count;
3207 /* For audio codecs other than AC3 or DTS we limit */
3208 /* the number of coded channels to stereo */
3209 if (audio_channels > 2 && codec_id != CODEC_ID_AC3
3210 && codec_id != CODEC_ID_DTS) {
3211 audio_enc->channels = 2;
3212 } else
3213 audio_enc->channels = audio_channels;
3214 }
3215 audio_enc->sample_rate = audio_sample_rate;
3216 }
3217
3218 oc->nb_streams = nb_streams;
3219
3220 if (!nb_streams) {
3221 fprintf(stderr, "No audio or video streams available\n");
3222 exit(1);
3223 }
3224
3225 oc->timestamp = rec_timestamp;
3226
3227 if (str_title)
3228 pstrcpy(oc->title, sizeof(oc->title), str_title);
3229 if (str_author)
3230 pstrcpy(oc->author, sizeof(oc->author), str_author);
3231 if (str_copyright)
3232 pstrcpy(oc->copyright, sizeof(oc->copyright), str_copyright);
3233 if (str_comment)
3234 pstrcpy(oc->comment, sizeof(oc->comment), str_comment);
3235 }
3236
3237 output_files[nb_output_files++] = oc;
3238
3239 strcpy(oc->filename, filename);
3240
3241 /* check filename in case of an image number is expected */
3242 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
3243 if (filename_number_test(oc->filename) < 0) {
3244 print_error(oc->filename, AVERROR_NUMEXPECTED);
3245 exit(1);
3246 }
3247 }
3248
3249 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
3250 /* test if it already exists to avoid loosing precious files */
3251 if (!file_overwrite &&
3252 (strchr(filename, ':') == NULL ||
3253 strstart(filename, "file:", NULL))) {
3254 if (url_exist(filename)) {
3255 int c;
3256
3257 if ( !using_stdin ) {
3258 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3259 fflush(stderr);
3260 c = getchar();
3261 if (toupper(c) != 'Y') {
3262 fprintf(stderr, "Not overwriting - exiting\n");
3263 exit(1);
3264 }
3265 }
3266 else {
3267 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3268 exit(1);
3269 }
3270 }
3271 }
3272
3273 /* open the file */
3274 if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
3275 fprintf(stderr, "Could not open '%s'\n", filename);
3276 exit(1);
3277 }
3278 }
3279
3280 memset(ap, 0, sizeof(*ap));
3281 ap->image_format = image_format;
3282 if (av_set_parameters(oc, ap) < 0) {
3283 fprintf(stderr, "%s: Invalid encoding parameters\n",
3284 oc->filename);
3285 exit(1);
3286 }
3287
3288 /* reset some options */
3289 file_oformat = NULL;
3290 file_iformat = NULL;
3291 image_format = NULL;
3292 audio_disable = 0;
3293 video_disable = 0;
3294 audio_codec_id = CODEC_ID_NONE;
3295 video_codec_id = CODEC_ID_NONE;
3296 audio_stream_copy = 0;
3297 video_stream_copy = 0;
3298 }
3299
3300 /* prepare dummy protocols for grab */
3301 static void prepare_grab(void)
3302 {
3303 int has_video, has_audio, i, j;
3304 AVFormatContext *oc;
3305 AVFormatContext *ic;
3306 AVFormatParameters vp1, *vp = &vp1;
3307 AVFormatParameters ap1, *ap = &ap1;
3308
3309 /* see if audio/video inputs are needed */
3310 has_video = 0;
3311 has_audio = 0;
3312 memset(ap, 0, sizeof(*ap));
3313 memset(vp, 0, sizeof(*vp));
3314 for(j=0;j<nb_output_files;j++) {
3315 oc = output_files[j];
3316 for(i=0;i<oc->nb_streams;