10l (wrong coded_picture)
[libav.git] / ffmpeg.c
1 /*
2 * FFmpeg main
3 * Copyright (c) 2000-2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include <limits.h>
21 #include "avformat.h"
22 #include "framehook.h"
23 #include "dsputil.h"
24
25 #ifndef CONFIG_WIN32
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/time.h>
30 #include <termios.h>
31 #include <sys/resource.h>
32 #include <signal.h>
33 #endif
34 #ifdef CONFIG_OS2
35 #include <sys/types.h>
36 #include <sys/select.h>
37 #include <stdlib.h>
38 #endif
39 #undef time //needed because HAVE_AV_CONFIG_H is defined on top
40 #include <time.h>
41
42 #include "cmdutils.h"
43
44 #if !defined(INFINITY) && defined(HUGE_VAL)
45 #define INFINITY HUGE_VAL
46 #endif
47
48 /* select an input stream for an output stream */
49 typedef struct AVStreamMap {
50 int file_index;
51 int stream_index;
52 } AVStreamMap;
53
54 /** select an input file for an output file */
55 typedef struct AVMetaDataMap {
56 int out_file;
57 int in_file;
58 } AVMetaDataMap;
59
60 extern const OptionDef options[];
61
62 static void show_help(void);
63 static void show_license(void);
64
65 #define MAX_FILES 20
66
67 static AVFormatContext *input_files[MAX_FILES];
68 static int64_t input_files_ts_offset[MAX_FILES];
69 static int nb_input_files = 0;
70
71 static AVFormatContext *output_files[MAX_FILES];
72 static int nb_output_files = 0;
73
74 static AVStreamMap stream_maps[MAX_FILES];
75 static int nb_stream_maps;
76
77 static AVMetaDataMap meta_data_maps[MAX_FILES];
78 static int nb_meta_data_maps;
79
80 static AVInputFormat *file_iformat;
81 static AVOutputFormat *file_oformat;
82 static AVImageFormat *image_format;
83 static int frame_width = 160;
84 static int frame_height = 128;
85 static float frame_aspect_ratio = 0;
86 static enum PixelFormat frame_pix_fmt = PIX_FMT_YUV420P;
87 static int frame_padtop = 0;
88 static int frame_padbottom = 0;
89 static int frame_padleft = 0;
90 static int frame_padright = 0;
91 static int padcolor[3] = {16,128,128}; /* default to black */
92 static int frame_topBand = 0;
93 static int frame_bottomBand = 0;
94 static int frame_leftBand = 0;
95 static int frame_rightBand = 0;
96 static int frame_rate = 25;
97 static int frame_rate_base = 1;
98 static int video_bit_rate = 200*1000;
99 static int video_bit_rate_tolerance = 4000*1000;
100 static float video_qscale = 0;
101 static int video_qmin = 2;
102 static int video_qmax = 31;
103 static int video_lmin = 2*FF_QP2LAMBDA;
104 static int video_lmax = 31*FF_QP2LAMBDA;
105 static int video_mb_qmin = 2;
106 static int video_mb_qmax = 31;
107 static int video_qdiff = 3;
108 static float video_qblur = 0.5;
109 static float video_qcomp = 0.5;
110 static uint16_t *intra_matrix = NULL;
111 static uint16_t *inter_matrix = NULL;
112 #if 0 //experimental, (can be removed)
113 static float video_rc_qsquish=1.0;
114 static float video_rc_qmod_amp=0;
115 static int video_rc_qmod_freq=0;
116 #endif
117 static char *video_rc_override_string=NULL;
118 static char *video_rc_eq="tex^qComp";
119 static int video_rc_buffer_size=0;
120 static float video_rc_buffer_aggressivity=1.0;
121 static int video_rc_max_rate=0;
122 static int video_rc_min_rate=0;
123 static float video_rc_initial_cplx=0;
124 static float video_b_qfactor = 1.25;
125 static float video_b_qoffset = 1.25;
126 static float video_i_qfactor = -0.8;
127 static float video_i_qoffset = 0.0;
128 static int video_intra_quant_bias= FF_DEFAULT_QUANT_BIAS;
129 static int video_inter_quant_bias= FF_DEFAULT_QUANT_BIAS;
130 static int me_method = ME_EPZS;
131 static int video_disable = 0;
132 static int video_codec_id = CODEC_ID_NONE;
133 static int same_quality = 0;
134 static int b_frames = 0;
135 static int mb_decision = FF_MB_DECISION_SIMPLE;
136 static int ildct_cmp = FF_CMP_VSAD;
137 static int mb_cmp = FF_CMP_SAD;
138 static int sub_cmp = FF_CMP_SAD;
139 static int cmp = FF_CMP_SAD;
140 static int pre_cmp = FF_CMP_SAD;
141 static int pre_me = 0;
142 static float lumi_mask = 0;
143 static float dark_mask = 0;
144 static float scplx_mask = 0;
145 static float tcplx_mask = 0;
146 static float p_mask = 0;
147 static int use_4mv = 0;
148 static int use_obmc = 0;
149 static int use_loop = 0;
150 static int use_aic = 0;
151 static int use_aiv = 0;
152 static int use_umv = 0;
153 static int use_ss = 0;
154 static int use_alt_scan = 0;
155 static int use_trell = 0;
156 static int use_scan_offset = 0;
157 static int use_qpel = 0;
158 static int use_qprd = 0;
159 static int use_cbprd = 0;
160 static int qns = 0;
161 static int closed_gop = 0;
162 static int do_deinterlace = 0;
163 static int do_interlace_dct = 0;
164 static int do_interlace_me = 0;
165 static int workaround_bugs = FF_BUG_AUTODETECT;
166 static int error_resilience = 2;
167 static int error_concealment = 3;
168 static int dct_algo = 0;
169 static int idct_algo = 0;
170 static int use_part = 0;
171 static int packet_size = 0;
172 static int error_rate = 0;
173 static int strict = 0;
174 static int top_field_first = -1;
175 static int noise_reduction = 0;
176 static int sc_threshold = 0;
177 static int debug = 0;
178 static int debug_mv = 0;
179 static int me_threshold = 0;
180 static int mb_threshold = 0;
181 static int intra_dc_precision = 8;
182 static int coder = 0;
183 static int context = 0;
184 static int predictor = 0;
185 static int video_profile = FF_PROFILE_UNKNOWN;
186 static int video_level = FF_LEVEL_UNKNOWN;
187 static int nsse_weight = 8;
188 extern int loop_input; /* currently a hack */
189
190 static int gop_size = 12;
191 static int intra_only = 0;
192 static int audio_sample_rate = 44100;
193 static int audio_bit_rate = 64000;
194 static int audio_disable = 0;
195 static int audio_channels = 1;
196 static int audio_codec_id = CODEC_ID_NONE;
197
198 static int64_t recording_time = 0;
199 static int64_t start_time = 0;
200 static int64_t rec_timestamp = 0;
201 static int64_t input_ts_offset = 0;
202 static int file_overwrite = 0;
203 static char *str_title = NULL;
204 static char *str_author = NULL;
205 static char *str_copyright = NULL;
206 static char *str_comment = NULL;
207 static int do_benchmark = 0;
208 static int do_hex_dump = 0;
209 static int do_pkt_dump = 0;
210 static int do_psnr = 0;
211 static int do_vstats = 0;
212 static int do_pass = 0;
213 static int bitexact = 0;
214 static char *pass_logfilename = NULL;
215 static int audio_stream_copy = 0;
216 static int video_stream_copy = 0;
217 static int video_sync_method= 1;
218 static int audio_sync_method= 0;
219 static int copy_ts= 0;
220
221 static int rate_emu = 0;
222
223 static char *video_grab_format = "video4linux";
224 static char *video_device = NULL;
225 static int video_channel = 0;
226 static char *video_standard = "ntsc";
227
228 static char *audio_grab_format = "audio_device";
229 static char *audio_device = NULL;
230
231 static int using_stdin = 0;
232 static int using_vhook = 0;
233 static int verbose = 1;
234 static int thread_count= 1;
235 static int q_pressed = 0;
236 static int me_range = 0;
237 static int64_t video_size = 0;
238 static int64_t audio_size = 0;
239 static int64_t extra_size = 0;
240 static int nb_frames_dup = 0;
241 static int nb_frames_drop = 0;
242 static int input_sync;
243
244 #define DEFAULT_PASS_LOGFILENAME "ffmpeg2pass"
245
246 typedef struct AVOutputStream {
247 int file_index; /* file index */
248 int index; /* stream index in the output file */
249 int source_index; /* AVInputStream index */
250 AVStream *st; /* stream in the output file */
251 int encoding_needed; /* true if encoding needed for this stream */
252 int frame_number;
253 /* input pts and corresponding output pts
254 for A/V sync */
255 double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
256 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
257 /* video only */
258 int video_resample; /* video_resample and video_crop are mutually exclusive */
259 AVFrame pict_tmp; /* temporary image for resampling */
260 ImgReSampleContext *img_resample_ctx; /* for image resampling */
261
262 int video_crop; /* video_resample and video_crop are mutually exclusive */
263 int topBand; /* cropping area sizes */
264 int leftBand;
265
266 int video_pad; /* video_resample and video_pad are mutually exclusive */
267 int padtop; /* padding area sizes */
268 int padbottom;
269 int padleft;
270 int padright;
271
272 /* audio only */
273 int audio_resample;
274 ReSampleContext *resample; /* for audio resampling */
275 FifoBuffer fifo; /* for compression: one audio fifo per codec */
276 FILE *logfile;
277 } AVOutputStream;
278
279 typedef struct AVInputStream {
280 int file_index;
281 int index;
282 AVStream *st;
283 int discard; /* true if stream data should be discarded */
284 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
285 int64_t sample_index; /* current sample */
286
287 int64_t start; /* time when read started */
288 unsigned long frame; /* current frame */
289 int64_t next_pts; /* synthetic pts for cases where pkt.pts
290 is not defined */
291 int64_t pts; /* current pts */
292 int is_start; /* is 1 at the start and after a discontinuity */
293 } AVInputStream;
294
295 typedef struct AVInputFile {
296 int eof_reached; /* true if eof reached */
297 int ist_index; /* index of first stream in ist_table */
298 int buffer_size; /* current total buffer size */
299 int buffer_size_max; /* buffer size at which we consider we can stop
300 buffering */
301 int nb_streams; /* nb streams we are aware of */
302 } AVInputFile;
303
304 #ifndef CONFIG_WIN32
305
306 /* init terminal so that we can grab keys */
307 static struct termios oldtty;
308
309 static void term_exit(void)
310 {
311 tcsetattr (0, TCSANOW, &oldtty);
312 }
313
314 static volatile sig_atomic_t received_sigterm = 0;
315
316 static void
317 sigterm_handler(int sig)
318 {
319 received_sigterm = sig;
320 term_exit();
321 }
322
323 static void term_init(void)
324 {
325 struct termios tty;
326
327 tcgetattr (0, &tty);
328 oldtty = tty;
329
330 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
331 |INLCR|IGNCR|ICRNL|IXON);
332 tty.c_oflag |= OPOST;
333 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
334 tty.c_cflag &= ~(CSIZE|PARENB);
335 tty.c_cflag |= CS8;
336 tty.c_cc[VMIN] = 1;
337 tty.c_cc[VTIME] = 0;
338
339 tcsetattr (0, TCSANOW, &tty);
340
341 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
342 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
343 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
344 /*
345 register a function to be called at normal program termination
346 */
347 atexit(term_exit);
348 #ifdef CONFIG_BEOS_NETSERVER
349 fcntl(0, F_SETFL, fcntl(0, F_GETFL) | O_NONBLOCK);
350 #endif
351 }
352
353 /* read a key without blocking */
354 static int read_key(void)
355 {
356 int n = 1;
357 unsigned char ch;
358 #ifndef CONFIG_BEOS_NETSERVER
359 struct timeval tv;
360 fd_set rfds;
361
362 FD_ZERO(&rfds);
363 FD_SET(0, &rfds);
364 tv.tv_sec = 0;
365 tv.tv_usec = 0;
366 n = select(1, &rfds, NULL, NULL, &tv);
367 #endif
368 if (n > 0) {
369 n = read(0, &ch, 1);
370 if (n == 1)
371 return ch;
372
373 return n;
374 }
375 return -1;
376 }
377
378 static int decode_interrupt_cb(void)
379 {
380 return q_pressed || (q_pressed = read_key() == 'q');
381 }
382
383 #else
384
385 static volatile int received_sigterm = 0;
386
387 /* no interactive support */
388 static void term_exit(void)
389 {
390 }
391
392 static void term_init(void)
393 {
394 }
395
396 static int read_key(void)
397 {
398 return 0;
399 }
400
401 #endif
402
403 static int read_ffserver_streams(AVFormatContext *s, const char *filename)
404 {
405 int i, err;
406 AVFormatContext *ic;
407
408 err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL);
409 if (err < 0)
410 return err;
411 /* copy stream format */
412 s->nb_streams = ic->nb_streams;
413 for(i=0;i<ic->nb_streams;i++) {
414 AVStream *st;
415
416 st = av_mallocz(sizeof(AVStream));
417 memcpy(st, ic->streams[i], sizeof(AVStream));
418 s->streams[i] = st;
419 }
420
421 av_close_input_file(ic);
422 return 0;
423 }
424
425 #define MAX_AUDIO_PACKET_SIZE (128 * 1024)
426
427 static void do_audio_out(AVFormatContext *s,
428 AVOutputStream *ost,
429 AVInputStream *ist,
430 unsigned char *buf, int size)
431 {
432 uint8_t *buftmp;
433 static uint8_t *audio_buf = NULL;
434 static uint8_t *audio_out = NULL;
435 const int audio_out_size= 4*MAX_AUDIO_PACKET_SIZE;
436
437 int size_out, frame_bytes, ret;
438 AVCodecContext *enc= &ost->st->codec;
439
440 /* SC: dynamic allocation of buffers */
441 if (!audio_buf)
442 audio_buf = av_malloc(2*MAX_AUDIO_PACKET_SIZE);
443 if (!audio_out)
444 audio_out = av_malloc(audio_out_size);
445 if (!audio_buf || !audio_out)
446 return; /* Should signal an error ! */
447
448 if(audio_sync_method){
449 double delta = ost->sync_ipts * enc->sample_rate - ost->sync_opts
450 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2);
451 double idelta= delta*ist->st->codec.sample_rate / enc->sample_rate;
452 int byte_delta= ((int)idelta)*2*ist->st->codec.channels;
453
454 //FIXME resample delay
455 if(fabs(delta) > 50){
456 if(ist->is_start){
457 if(byte_delta < 0){
458 byte_delta= FFMAX(byte_delta, -size);
459 size += byte_delta;
460 buf -= byte_delta;
461 if(verbose > 2)
462 fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
463 if(!size)
464 return;
465 ist->is_start=0;
466 }else{
467 static uint8_t *input_tmp= NULL;
468 input_tmp= av_realloc(input_tmp, byte_delta + size);
469
470 if(byte_delta + size <= MAX_AUDIO_PACKET_SIZE)
471 ist->is_start=0;
472 else
473 byte_delta= MAX_AUDIO_PACKET_SIZE - size;
474
475 memset(input_tmp, 0, byte_delta);
476 memcpy(input_tmp + byte_delta, buf, size);
477 buf= input_tmp;
478 size += byte_delta;
479 if(verbose > 2)
480 fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
481 }
482 }else if(audio_sync_method>1){
483 int comp= clip(delta, -audio_sync_method, audio_sync_method);
484 assert(ost->audio_resample);
485 if(verbose > 2)
486 fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
487 fprintf(stderr, "drift:%f len:%d opts:%lld ipts:%lld fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(ost->sync_ipts * enc->sample_rate), fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2));
488 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
489 }
490 }
491 }else
492 ost->sync_opts= lrintf(ost->sync_ipts * enc->sample_rate)
493 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2); //FIXME wrong
494
495 if (ost->audio_resample) {
496 buftmp = audio_buf;
497 size_out = audio_resample(ost->resample,
498 (short *)buftmp, (short *)buf,
499 size / (ist->st->codec.channels * 2));
500 size_out = size_out * enc->channels * 2;
501 } else {
502 buftmp = buf;
503 size_out = size;
504 }
505
506 /* now encode as many frames as possible */
507 if (enc->frame_size > 1) {
508 /* output resampled raw samples */
509 fifo_write(&ost->fifo, buftmp, size_out,
510 &ost->fifo.wptr);
511
512 frame_bytes = enc->frame_size * 2 * enc->channels;
513
514 while (fifo_read(&ost->fifo, audio_buf, frame_bytes,
515 &ost->fifo.rptr) == 0) {
516 AVPacket pkt;
517 av_init_packet(&pkt);
518
519 ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
520 (short *)audio_buf);
521 audio_size += ret;
522 pkt.stream_index= ost->index;
523 pkt.data= audio_out;
524 pkt.size= ret;
525 if(enc->coded_frame)
526 pkt.pts= enc->coded_frame->pts;
527 pkt.flags |= PKT_FLAG_KEY;
528 av_interleaved_write_frame(s, &pkt);
529
530 ost->sync_opts += enc->frame_size;
531 }
532 } else {
533 AVPacket pkt;
534 av_init_packet(&pkt);
535
536 ost->sync_opts += size_out / (2 * enc->channels);
537
538 /* output a pcm frame */
539 /* XXX: change encoding codec API to avoid this ? */
540 switch(enc->codec->id) {
541 case CODEC_ID_PCM_S16LE:
542 case CODEC_ID_PCM_S16BE:
543 case CODEC_ID_PCM_U16LE:
544 case CODEC_ID_PCM_U16BE:
545 break;
546 default:
547 size_out = size_out >> 1;
548 break;
549 }
550 ret = avcodec_encode_audio(enc, audio_out, size_out,
551 (short *)buftmp);
552 audio_size += ret;
553 pkt.stream_index= ost->index;
554 pkt.data= audio_out;
555 pkt.size= ret;
556 if(enc->coded_frame)
557 pkt.pts= enc->coded_frame->pts;
558 pkt.flags |= PKT_FLAG_KEY;
559 av_interleaved_write_frame(s, &pkt);
560 }
561 }
562
563 static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void **bufp)
564 {
565 AVCodecContext *dec;
566 AVPicture *picture2;
567 AVPicture picture_tmp;
568 uint8_t *buf = 0;
569
570 dec = &ist->st->codec;
571
572 /* deinterlace : must be done before any resize */
573 if (do_deinterlace || using_vhook) {
574 int size;
575
576 /* create temporary picture */
577 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
578 buf = av_malloc(size);
579 if (!buf)
580 return;
581
582 picture2 = &picture_tmp;
583 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
584
585 if (do_deinterlace){
586 if(avpicture_deinterlace(picture2, picture,
587 dec->pix_fmt, dec->width, dec->height) < 0) {
588 /* if error, do not deinterlace */
589 av_free(buf);
590 buf = NULL;
591 picture2 = picture;
592 }
593 } else {
594 if (img_convert(picture2, dec->pix_fmt, picture,
595 dec->pix_fmt, dec->width, dec->height) < 0) {
596 /* if error, do not copy */
597 av_free(buf);
598 buf = NULL;
599 picture2 = picture;
600 }
601 }
602 } else {
603 picture2 = picture;
604 }
605
606 frame_hook_process(picture2, dec->pix_fmt, dec->width, dec->height);
607
608 if (picture != picture2)
609 *picture = *picture2;
610 *bufp = buf;
611 }
612
613 /* we begin to correct av delay at this threshold */
614 #define AV_DELAY_MAX 0.100
615
616
617 /* Expects img to be yuv420 */
618 static void fill_pad_region(AVPicture* img, int height, int width,
619 int padtop, int padbottom, int padleft, int padright, int *color) {
620
621 int i, y, shift;
622 uint8_t *optr;
623
624 for (i = 0; i < 3; i++) {
625 shift = (i == 0) ? 0 : 1;
626
627 if (padtop || padleft) {
628 memset(img->data[i], color[i], (((img->linesize[i] * padtop) +
629 padleft) >> shift));
630 }
631
632 if (padleft || padright) {
633 optr = img->data[i] + (img->linesize[i] * (padtop >> shift)) +
634 (img->linesize[i] - (padright >> shift));
635
636 for (y = 0; y < ((height - (padtop + padbottom)) >> shift); y++) {
637 memset(optr, color[i], (padleft + padright) >> shift);
638 optr += img->linesize[i];
639 }
640 }
641
642 if (padbottom) {
643 optr = img->data[i] + (img->linesize[i] * ((height - padbottom) >> shift));
644 memset(optr, color[i], ((img->linesize[i] * padbottom) >> shift));
645 }
646 }
647 }
648
649 static uint8_t *bit_buffer= NULL;
650
651 static void do_video_out(AVFormatContext *s,
652 AVOutputStream *ost,
653 AVInputStream *ist,
654 AVFrame *in_picture,
655 int *frame_size)
656 {
657 int nb_frames, i, ret;
658 AVFrame *final_picture, *formatted_picture;
659 AVFrame picture_format_temp, picture_crop_temp;
660 uint8_t *buf = NULL, *buf1 = NULL;
661 AVCodecContext *enc, *dec;
662 enum PixelFormat target_pixfmt;
663
664 #define VIDEO_BUFFER_SIZE (1024*1024)
665
666 avcodec_get_frame_defaults(&picture_format_temp);
667 avcodec_get_frame_defaults(&picture_crop_temp);
668
669 enc = &ost->st->codec;
670 dec = &ist->st->codec;
671
672 /* by default, we output a single frame */
673 nb_frames = 1;
674
675 *frame_size = 0;
676
677 if(video_sync_method){
678 double vdelta;
679 vdelta = ost->sync_ipts * enc->frame_rate / enc->frame_rate_base - ost->sync_opts;
680 //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
681 if (vdelta < -1.1)
682 nb_frames = 0;
683 else if (vdelta > 1.1)
684 nb_frames = lrintf(vdelta - 1.1 + 0.5);
685 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%lld, ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, ost->sync_ipts, nb_frames);
686 if (nb_frames == 0){
687 ++nb_frames_drop;
688 if (verbose>2)
689 fprintf(stderr, "*** drop!\n");
690 }else if (nb_frames > 1) {
691 nb_frames_dup += nb_frames;
692 if (verbose>2)
693 fprintf(stderr, "*** %d dup!\n", nb_frames-1);
694 }
695 }else
696 ost->sync_opts= lrintf(ost->sync_ipts * enc->frame_rate / enc->frame_rate_base);
697
698 if (nb_frames <= 0)
699 return;
700
701 /* convert pixel format if needed */
702 target_pixfmt = ost->video_resample || ost->video_pad
703 ? PIX_FMT_YUV420P : enc->pix_fmt;
704 if (dec->pix_fmt != target_pixfmt) {
705 int size;
706
707 /* create temporary picture */
708 size = avpicture_get_size(target_pixfmt, dec->width, dec->height);
709 buf = av_malloc(size);
710 if (!buf)
711 return;
712 formatted_picture = &picture_format_temp;
713 avpicture_fill((AVPicture*)formatted_picture, buf, target_pixfmt, dec->width, dec->height);
714
715 if (img_convert((AVPicture*)formatted_picture, target_pixfmt,
716 (AVPicture *)in_picture, dec->pix_fmt,
717 dec->width, dec->height) < 0) {
718
719 if (verbose >= 0)
720 fprintf(stderr, "pixel format conversion not handled\n");
721
722 goto the_end;
723 }
724 } else {
725 formatted_picture = in_picture;
726 }
727
728 /* XXX: resampling could be done before raw format conversion in
729 some cases to go faster */
730 /* XXX: only works for YUV420P */
731 if (ost->video_resample) {
732 final_picture = &ost->pict_tmp;
733 img_resample(ost->img_resample_ctx, (AVPicture*)final_picture, (AVPicture*)formatted_picture);
734
735 if (ost->padtop || ost->padbottom || ost->padleft || ost->padright) {
736 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
737 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
738 padcolor);
739 }
740
741 if (enc->pix_fmt != PIX_FMT_YUV420P) {
742 int size;
743
744 av_free(buf);
745 /* create temporary picture */
746 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
747 buf = av_malloc(size);
748 if (!buf)
749 return;
750 final_picture = &picture_format_temp;
751 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
752
753 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
754 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
755 enc->width, enc->height) < 0) {
756
757 if (verbose >= 0)
758 fprintf(stderr, "pixel format conversion not handled\n");
759
760 goto the_end;
761 }
762 }
763 } else if (ost->video_crop) {
764 picture_crop_temp.data[0] = formatted_picture->data[0] +
765 (ost->topBand * formatted_picture->linesize[0]) + ost->leftBand;
766
767 picture_crop_temp.data[1] = formatted_picture->data[1] +
768 ((ost->topBand >> 1) * formatted_picture->linesize[1]) +
769 (ost->leftBand >> 1);
770
771 picture_crop_temp.data[2] = formatted_picture->data[2] +
772 ((ost->topBand >> 1) * formatted_picture->linesize[2]) +
773 (ost->leftBand >> 1);
774
775 picture_crop_temp.linesize[0] = formatted_picture->linesize[0];
776 picture_crop_temp.linesize[1] = formatted_picture->linesize[1];
777 picture_crop_temp.linesize[2] = formatted_picture->linesize[2];
778 final_picture = &picture_crop_temp;
779 } else if (ost->video_pad) {
780 final_picture = &ost->pict_tmp;
781
782 for (i = 0; i < 3; i++) {
783 uint8_t *optr, *iptr;
784 int shift = (i == 0) ? 0 : 1;
785 int y, yheight;
786
787 /* set offset to start writing image into */
788 optr = final_picture->data[i] + (((final_picture->linesize[i] *
789 ost->padtop) + ost->padleft) >> shift);
790 iptr = formatted_picture->data[i];
791
792 yheight = (enc->height - ost->padtop - ost->padbottom) >> shift;
793 for (y = 0; y < yheight; y++) {
794 /* copy unpadded image row into padded image row */
795 memcpy(optr, iptr, formatted_picture->linesize[i]);
796 optr += final_picture->linesize[i];
797 iptr += formatted_picture->linesize[i];
798 }
799 }
800
801 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
802 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
803 padcolor);
804
805 if (enc->pix_fmt != PIX_FMT_YUV420P) {
806 int size;
807
808 av_free(buf);
809 /* create temporary picture */
810 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
811 buf = av_malloc(size);
812 if (!buf)
813 return;
814 final_picture = &picture_format_temp;
815 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
816
817 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
818 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
819 enc->width, enc->height) < 0) {
820
821 if (verbose >= 0)
822 fprintf(stderr, "pixel format conversion not handled\n");
823
824 goto the_end;
825 }
826 }
827 } else {
828 final_picture = formatted_picture;
829 }
830 /* duplicates frame if needed */
831 for(i=0;i<nb_frames;i++) {
832 AVPacket pkt;
833 av_init_packet(&pkt);
834 pkt.stream_index= ost->index;
835
836 if (s->oformat->flags & AVFMT_RAWPICTURE) {
837 /* raw pictures are written as AVPicture structure to
838 avoid any copies. We support temorarily the older
839 method. */
840 AVFrame* old_frame = enc->coded_frame;
841 enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
842 pkt.data= (uint8_t *)final_picture;
843 pkt.size= sizeof(AVPicture);
844 if(dec->coded_frame)
845 pkt.pts= dec->coded_frame->pts;
846 if(dec->coded_frame && dec->coded_frame->key_frame)
847 pkt.flags |= PKT_FLAG_KEY;
848
849 av_interleaved_write_frame(s, &pkt);
850 enc->coded_frame = old_frame;
851 } else {
852 AVFrame big_picture;
853
854 big_picture= *final_picture;
855 /* better than nothing: use input picture interlaced
856 settings */
857 big_picture.interlaced_frame = in_picture->interlaced_frame;
858 if(do_interlace_me || do_interlace_dct){
859 if(top_field_first == -1)
860 big_picture.top_field_first = in_picture->top_field_first;
861 else
862 big_picture.top_field_first = top_field_first;
863 }
864
865 /* handles sameq here. This is not correct because it may
866 not be a global option */
867 if (same_quality) {
868 big_picture.quality = ist->st->quality;
869 }else
870 big_picture.quality = ost->st->quality;
871 if(!me_threshold)
872 big_picture.pict_type = 0;
873 // big_picture.pts = AV_NOPTS_VALUE;
874 big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->frame_rate_base, enc->frame_rate);
875 //av_log(NULL, AV_LOG_DEBUG, "%lld -> encoder\n", ost->sync_opts);
876 ret = avcodec_encode_video(enc,
877 bit_buffer, VIDEO_BUFFER_SIZE,
878 &big_picture);
879 //enc->frame_number = enc->real_pict_num;
880 if(ret){
881 pkt.data= bit_buffer;
882 pkt.size= ret;
883 if(enc->coded_frame)
884 pkt.pts= enc->coded_frame->pts;
885 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %lld/%lld\n",
886 pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->frame_rate, AV_TIME_BASE*(int64_t)enc->frame_rate_base) : -1,
887 pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->frame_rate, AV_TIME_BASE*(int64_t)enc->frame_rate_base) : -1);*/
888
889 if(enc->coded_frame && enc->coded_frame->key_frame)
890 pkt.flags |= PKT_FLAG_KEY;
891 av_interleaved_write_frame(s, &pkt);
892 *frame_size = ret;
893 //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d",
894 // enc->frame_number-1, enc->real_pict_num, ret,
895 // enc->pict_type);
896 /* if two pass, output log */
897 if (ost->logfile && enc->stats_out) {
898 fprintf(ost->logfile, "%s", enc->stats_out);
899 }
900 }
901 }
902 ost->sync_opts++;
903 ost->frame_number++;
904 }
905 the_end:
906 av_free(buf);
907 av_free(buf1);
908 }
909
910 static double psnr(double d){
911 if(d==0) return INFINITY;
912 return -10.0*log(d)/log(10.0);
913 }
914
915 static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
916 int frame_size)
917 {
918 static FILE *fvstats=NULL;
919 char filename[40];
920 time_t today2;
921 struct tm *today;
922 AVCodecContext *enc;
923 int frame_number;
924 int64_t ti;
925 double ti1, bitrate, avg_bitrate;
926
927 if (!fvstats) {
928 today2 = time(NULL);
929 today = localtime(&today2);
930 sprintf(filename, "vstats_%02d%02d%02d.log", today->tm_hour,
931 today->tm_min,
932 today->tm_sec);
933 fvstats = fopen(filename,"w");
934 if (!fvstats) {
935 perror("fopen");
936 exit(1);
937 }
938 }
939
940 ti = MAXINT64;
941 enc = &ost->st->codec;
942 if (enc->codec_type == CODEC_TYPE_VIDEO) {
943 frame_number = ost->frame_number;
944 fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
945 if (enc->flags&CODEC_FLAG_PSNR)
946 fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
947
948 fprintf(fvstats,"f_size= %6d ", frame_size);
949 /* compute pts value */
950 ti1 = (double)ost->sync_opts *enc->frame_rate_base / enc->frame_rate;
951 if (ti1 < 0.01)
952 ti1 = 0.01;
953
954 bitrate = (double)(frame_size * 8) * enc->frame_rate / enc->frame_rate_base / 1000.0;
955 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
956 fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
957 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
958 fprintf(fvstats,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
959 }
960 }
961
962 static void print_report(AVFormatContext **output_files,
963 AVOutputStream **ost_table, int nb_ostreams,
964 int is_last_report)
965 {
966 char buf[1024];
967 AVOutputStream *ost;
968 AVFormatContext *oc, *os;
969 int64_t total_size;
970 AVCodecContext *enc;
971 int frame_number, vid, i;
972 double bitrate, ti1, pts;
973 static int64_t last_time = -1;
974
975 if (!is_last_report) {
976 int64_t cur_time;
977 /* display the report every 0.5 seconds */
978 cur_time = av_gettime();
979 if (last_time == -1) {
980 last_time = cur_time;
981 return;
982 }
983 if ((cur_time - last_time) < 500000)
984 return;
985 last_time = cur_time;
986 }
987
988
989 oc = output_files[0];
990
991 total_size = url_ftell(&oc->pb);
992
993 buf[0] = '\0';
994 ti1 = 1e10;
995 vid = 0;
996 for(i=0;i<nb_ostreams;i++) {
997 ost = ost_table[i];
998 os = output_files[ost->file_index];
999 enc = &ost->st->codec;
1000 if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1001 sprintf(buf + strlen(buf), "q=%2.1f ",
1002 enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1003 }
1004 if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1005 frame_number = ost->frame_number;
1006 sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ",
1007 frame_number, enc->coded_frame ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : 0);
1008 if(is_last_report)
1009 sprintf(buf + strlen(buf), "L");
1010 if (enc->flags&CODEC_FLAG_PSNR){
1011 int j;
1012 double error, error_sum=0;
1013 double scale, scale_sum=0;
1014 char type[3]= {'Y','U','V'};
1015 sprintf(buf + strlen(buf), "PSNR=");
1016 for(j=0; j<3; j++){
1017 if(is_last_report){
1018 error= enc->error[j];
1019 scale= enc->width*enc->height*255.0*255.0*frame_number;
1020 }else{
1021 error= enc->coded_frame->error[j];
1022 scale= enc->width*enc->height*255.0*255.0;
1023 }
1024 if(j) scale/=4;
1025 error_sum += error;
1026 scale_sum += scale;
1027 sprintf(buf + strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
1028 }
1029 sprintf(buf + strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
1030 }
1031 vid = 1;
1032 }
1033 /* compute min output value */
1034 pts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den;
1035 if ((pts < ti1) && (pts > 0))
1036 ti1 = pts;
1037 }
1038 if (ti1 < 0.01)
1039 ti1 = 0.01;
1040
1041 if (verbose || is_last_report) {
1042 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1043
1044 sprintf(buf + strlen(buf),
1045 "size=%8.0fkB time=%0.1f bitrate=%6.1fkbits/s",
1046 (double)total_size / 1024, ti1, bitrate);
1047
1048 if (verbose > 1)
1049 sprintf(buf + strlen(buf), " dup=%d drop=%d",
1050 nb_frames_dup, nb_frames_drop);
1051
1052 if (verbose >= 0)
1053 fprintf(stderr, "%s \r", buf);
1054
1055 fflush(stderr);
1056 }
1057
1058 if (is_last_report && verbose >= 0){
1059 int64_t raw= audio_size + video_size + extra_size;
1060 fprintf(stderr, "\n");
1061 fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1062 video_size/1024.0,
1063 audio_size/1024.0,
1064 extra_size/1024.0,
1065 100.0*(total_size - raw)/raw
1066 );
1067 }
1068 }
1069
1070 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1071 static int output_packet(AVInputStream *ist, int ist_index,
1072 AVOutputStream **ost_table, int nb_ostreams,
1073 const AVPacket *pkt)
1074 {
1075 AVFormatContext *os;
1076 AVOutputStream *ost;
1077 uint8_t *ptr;
1078 int len, ret, i;
1079 uint8_t *data_buf;
1080 int data_size, got_picture;
1081 AVFrame picture;
1082 void *buffer_to_free;
1083
1084 if (pkt && pkt->dts != AV_NOPTS_VALUE) { //FIXME seems redundant, as libavformat does this too
1085 ist->next_pts = ist->pts = pkt->dts;
1086 } else {
1087 assert(ist->pts == ist->next_pts);
1088 }
1089
1090 if (pkt == NULL) {
1091 /* EOF handling */
1092 ptr = NULL;
1093 len = 0;
1094 goto handle_eof;
1095 }
1096
1097 len = pkt->size;
1098 ptr = pkt->data;
1099 while (len > 0) {
1100 handle_eof:
1101 /* decode the packet if needed */
1102 data_buf = NULL; /* fail safe */
1103 data_size = 0;
1104 if (ist->decoding_needed) {
1105 switch(ist->st->codec.codec_type) {
1106 case CODEC_TYPE_AUDIO:{
1107 /* XXX: could avoid copy if PCM 16 bits with same
1108 endianness as CPU */
1109 short samples[pkt && pkt->size > AVCODEC_MAX_AUDIO_FRAME_SIZE/2 ? pkt->size : AVCODEC_MAX_AUDIO_FRAME_SIZE/2];
1110 ret = avcodec_decode_audio(&ist->st->codec, samples, &data_size,
1111 ptr, len);
1112 if (ret < 0)
1113 goto fail_decode;
1114 ptr += ret;
1115 len -= ret;
1116 /* Some bug in mpeg audio decoder gives */
1117 /* data_size < 0, it seems they are overflows */
1118 if (data_size <= 0) {
1119 /* no audio frame */
1120 continue;
1121 }
1122 data_buf = (uint8_t *)samples;
1123 ist->next_pts += ((int64_t)AV_TIME_BASE/2 * data_size) /
1124 (ist->st->codec.sample_rate * ist->st->codec.channels);
1125 break;}
1126 case CODEC_TYPE_VIDEO:
1127 data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
1128 /* XXX: allocate picture correctly */
1129 avcodec_get_frame_defaults(&picture);
1130
1131 ret = avcodec_decode_video(&ist->st->codec,
1132 &picture, &got_picture, ptr, len);
1133 ist->st->quality= picture.quality;
1134 if (ret < 0)
1135 goto fail_decode;
1136 if (!got_picture) {
1137 /* no picture yet */
1138 goto discard_packet;
1139 }
1140 if (ist->st->codec.frame_rate_base != 0) {
1141 ist->next_pts += ((int64_t)AV_TIME_BASE *
1142 ist->st->codec.frame_rate_base) /
1143 ist->st->codec.frame_rate;
1144 }
1145 len = 0;
1146 break;
1147 default:
1148 goto fail_decode;
1149 }
1150 } else {
1151 data_buf = ptr;
1152 data_size = len;
1153 ret = len;
1154 len = 0;
1155 }
1156
1157 buffer_to_free = NULL;
1158 if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO) {
1159 pre_process_video_frame(ist, (AVPicture *)&picture,
1160 &buffer_to_free);
1161 }
1162
1163 /* frame rate emulation */
1164 if (ist->st->codec.rate_emu) {
1165 int64_t pts = av_rescale((int64_t) ist->frame * ist->st->codec.frame_rate_base, 1000000, ist->st->codec.frame_rate);
1166 int64_t now = av_gettime() - ist->start;
1167 if (pts > now)
1168 usleep(pts - now);
1169
1170 ist->frame++;
1171 }
1172
1173 #if 0
1174 /* mpeg PTS deordering : if it is a P or I frame, the PTS
1175 is the one of the next displayed one */
1176 /* XXX: add mpeg4 too ? */
1177 if (ist->st->codec.codec_id == CODEC_ID_MPEG1VIDEO) {
1178 if (ist->st->codec.pict_type != B_TYPE) {
1179 int64_t tmp;
1180 tmp = ist->last_ip_pts;
1181 ist->last_ip_pts = ist->frac_pts.val;
1182 ist->frac_pts.val = tmp;
1183 }
1184 }
1185 #endif
1186 /* if output time reached then transcode raw format,
1187 encode packets and output them */
1188 if (start_time == 0 || ist->pts >= start_time)
1189 for(i=0;i<nb_ostreams;i++) {
1190 int frame_size;
1191
1192 ost = ost_table[i];
1193 if (ost->source_index == ist_index) {
1194 os = output_files[ost->file_index];
1195
1196 #if 0
1197 printf("%d: got pts=%0.3f %0.3f\n", i,
1198 (double)pkt->pts / AV_TIME_BASE,
1199 ((double)ist->pts / AV_TIME_BASE) -
1200 ((double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den));
1201 #endif
1202 /* set the input output pts pairs */
1203 ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index])/ AV_TIME_BASE;
1204
1205 if (ost->encoding_needed) {
1206 switch(ost->st->codec.codec_type) {
1207 case CODEC_TYPE_AUDIO:
1208 do_audio_out(os, ost, ist, data_buf, data_size);
1209 break;
1210 case CODEC_TYPE_VIDEO:
1211 /* find an audio stream for synchro */
1212 {
1213 int i;
1214 AVOutputStream *audio_sync, *ost1;
1215 audio_sync = NULL;
1216 for(i=0;i<nb_ostreams;i++) {
1217 ost1 = ost_table[i];
1218 if (ost1->file_index == ost->file_index &&
1219 ost1->st->codec.codec_type == CODEC_TYPE_AUDIO) {
1220 audio_sync = ost1;
1221 break;
1222 }
1223 }
1224
1225 do_video_out(os, ost, ist, &picture, &frame_size);
1226 video_size += frame_size;
1227 if (do_vstats && frame_size)
1228 do_video_stats(os, ost, frame_size);
1229 }
1230 break;
1231 default:
1232 av_abort();
1233 }
1234 } else {
1235 AVFrame avframe; //FIXME/XXX remove this
1236 AVPacket opkt;
1237 av_init_packet(&opkt);
1238
1239 /* no reencoding needed : output the packet directly */
1240 /* force the input stream PTS */
1241
1242 avcodec_get_frame_defaults(&avframe);
1243 ost->st->codec.coded_frame= &avframe;
1244 avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
1245
1246 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO)
1247 audio_size += data_size;
1248 else if (ost->st->codec.codec_type == CODEC_TYPE_VIDEO)
1249 video_size += data_size;
1250
1251 opkt.stream_index= ost->index;
1252 opkt.data= data_buf;
1253 opkt.size= data_size;
1254 opkt.pts= pkt->pts + input_files_ts_offset[ist->file_index];
1255 opkt.dts= pkt->dts + input_files_ts_offset[ist->file_index];
1256 opkt.flags= pkt->flags;
1257
1258 av_interleaved_write_frame(os, &opkt);
1259 ost->st->codec.frame_number++;
1260 ost->frame_number++;
1261 }
1262 }
1263 }
1264 av_free(buffer_to_free);
1265 }
1266 discard_packet:
1267 if (pkt == NULL) {
1268 /* EOF handling */
1269
1270 for(i=0;i<nb_ostreams;i++) {
1271 ost = ost_table[i];
1272 if (ost->source_index == ist_index) {
1273 AVCodecContext *enc= &ost->st->codec;
1274 os = output_files[ost->file_index];
1275
1276 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO && enc->frame_size <=1)
1277 continue;
1278 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
1279 continue;
1280
1281 if (ost->encoding_needed) {
1282 for(;;) {
1283 AVPacket pkt;
1284 av_init_packet(&pkt);
1285 pkt.stream_index= ost->index;
1286
1287 switch(ost->st->codec.codec_type) {
1288 case CODEC_TYPE_AUDIO:
1289 ret = avcodec_encode_audio(enc, bit_buffer, VIDEO_BUFFER_SIZE, NULL);
1290 audio_size += ret;
1291 pkt.flags |= PKT_FLAG_KEY;
1292 break;
1293 case CODEC_TYPE_VIDEO:
1294 ret = avcodec_encode_video(enc, bit_buffer, VIDEO_BUFFER_SIZE, NULL);
1295 video_size += ret;
1296 if(enc->coded_frame && enc->coded_frame->key_frame)
1297 pkt.flags |= PKT_FLAG_KEY;
1298 if (ost->logfile && enc->stats_out) {
1299 fprintf(ost->logfile, "%s", enc->stats_out);
1300 }
1301 break;
1302 default:
1303 ret=-1;
1304 }
1305
1306 if(ret<=0)
1307 break;
1308 pkt.data= bit_buffer;
1309 pkt.size= ret;
1310 if(enc->coded_frame)
1311 pkt.pts= enc->coded_frame->pts;
1312 av_interleaved_write_frame(os, &pkt);
1313 }
1314 }
1315 }
1316 }
1317 }
1318
1319 return 0;
1320 fail_decode:
1321 return -1;
1322 }
1323
1324
1325 /*
1326 * The following code is the main loop of the file converter
1327 */
1328 static int av_encode(AVFormatContext **output_files,
1329 int nb_output_files,
1330 AVFormatContext **input_files,
1331 int nb_input_files,
1332 AVStreamMap *stream_maps, int nb_stream_maps)
1333 {
1334 int ret, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
1335 AVFormatContext *is, *os;
1336 AVCodecContext *codec, *icodec;
1337 AVOutputStream *ost, **ost_table = NULL;
1338 AVInputStream *ist, **ist_table = NULL;
1339 AVInputFile *file_table;
1340 AVFormatContext *stream_no_data;
1341 int key;
1342
1343 file_table= (AVInputFile*) av_mallocz(nb_input_files * sizeof(AVInputFile));
1344 if (!file_table)
1345 goto fail;
1346
1347 if (!bit_buffer)
1348 bit_buffer = av_malloc(VIDEO_BUFFER_SIZE);
1349 if (!bit_buffer)
1350 goto fail;
1351
1352 /* input stream init */
1353 j = 0;
1354 for(i=0;i<nb_input_files;i++) {
1355 is = input_files[i];
1356 file_table[i].ist_index = j;
1357 file_table[i].nb_streams = is->nb_streams;
1358 j += is->nb_streams;
1359 }
1360 nb_istreams = j;
1361
1362 ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
1363 if (!ist_table)
1364 goto fail;
1365
1366 for(i=0;i<nb_istreams;i++) {
1367 ist = av_mallocz(sizeof(AVInputStream));
1368 if (!ist)
1369 goto fail;
1370 ist_table[i] = ist;
1371 }
1372 j = 0;
1373 for(i=0;i<nb_input_files;i++) {
1374 is = input_files[i];
1375 for(k=0;k<is->nb_streams;k++) {
1376 ist = ist_table[j++];
1377 ist->st = is->streams[k];
1378 ist->file_index = i;
1379 ist->index = k;
1380 ist->discard = 1; /* the stream is discarded by default
1381 (changed later) */
1382
1383 if (ist->st->codec.rate_emu) {
1384 ist->start = av_gettime();
1385 ist->frame = 0;
1386 }
1387 }
1388 }
1389
1390 /* output stream init */
1391 nb_ostreams = 0;
1392 for(i=0;i<nb_output_files;i++) {
1393 os = output_files[i];
1394 nb_ostreams += os->nb_streams;
1395 }
1396 if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
1397 fprintf(stderr, "Number of stream maps must match number of output streams\n");
1398 exit(1);
1399 }
1400
1401 /* Sanity check the mapping args -- do the input files & streams exist? */
1402 for(i=0;i<nb_stream_maps;i++) {
1403 int fi = stream_maps[i].file_index;
1404 int si = stream_maps[i].stream_index;
1405
1406 if (fi < 0 || fi > nb_input_files - 1 ||
1407 si < 0 || si > file_table[fi].nb_streams - 1) {
1408 fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
1409 exit(1);
1410 }
1411 }
1412
1413 ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
1414 if (!ost_table)
1415 goto fail;
1416 for(i=0;i<nb_ostreams;i++) {
1417 ost = av_mallocz(sizeof(AVOutputStream));
1418 if (!ost)
1419 goto fail;
1420 ost_table[i] = ost;
1421 }
1422
1423 n = 0;
1424 for(k=0;k<nb_output_files;k++) {
1425 os = output_files[k];
1426 for(i=0;i<os->nb_streams;i++) {
1427 int found;
1428 ost = ost_table[n++];
1429 ost->file_index = k;
1430 ost->index = i;
1431 ost->st = os->streams[i];
1432 if (nb_stream_maps > 0) {
1433 ost->source_index = file_table[stream_maps[n-1].file_index].ist_index +
1434 stream_maps[n-1].stream_index;
1435
1436 /* Sanity check that the stream types match */
1437 if (ist_table[ost->source_index]->st->codec.codec_type != ost->st->codec.codec_type) {
1438 fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
1439 stream_maps[n-1].file_index, stream_maps[n-1].stream_index,
1440 ost->file_index, ost->index);
1441 exit(1);
1442 }
1443
1444 } else {
1445 /* get corresponding input stream index : we select the first one with the right type */
1446 found = 0;
1447 for(j=0;j<nb_istreams;j++) {
1448 ist = ist_table[j];
1449 if (ist->discard &&
1450 ist->st->codec.codec_type == ost->st->codec.codec_type) {
1451 ost->source_index = j;
1452 found = 1;
1453 }
1454 }
1455
1456 if (!found) {
1457 /* try again and reuse existing stream */
1458 for(j=0;j<nb_istreams;j++) {
1459 ist = ist_table[j];
1460 if (ist->st->codec.codec_type == ost->st->codec.codec_type) {
1461 ost->source_index = j;
1462 found = 1;
1463 }
1464 }
1465 if (!found) {
1466 fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
1467 ost->file_index, ost->index);
1468 exit(1);
1469 }
1470 }
1471 }
1472 ist = ist_table[ost->source_index];
1473 ist->discard = 0;
1474 }
1475 }
1476
1477 /* for each output stream, we compute the right encoding parameters */
1478 for(i=0;i<nb_ostreams;i++) {
1479 ost = ost_table[i];
1480 ist = ist_table[ost->source_index];
1481
1482 codec = &ost->st->codec;
1483 icodec = &ist->st->codec;
1484
1485 if (ost->st->stream_copy) {
1486 /* if stream_copy is selected, no need to decode or encode */
1487 codec->codec_id = icodec->codec_id;
1488 codec->codec_type = icodec->codec_type;
1489 codec->codec_tag = icodec->codec_tag;
1490 codec->bit_rate = icodec->bit_rate;
1491 switch(codec->codec_type) {
1492 case CODEC_TYPE_AUDIO:
1493 codec->sample_rate = icodec->sample_rate;
1494 codec->channels = icodec->channels;
1495 codec->frame_size = icodec->frame_size;
1496 break;
1497 case CODEC_TYPE_VIDEO:
1498 codec->frame_rate = icodec->frame_rate;
1499 codec->frame_rate_base = icodec->frame_rate_base;
1500 codec->width = icodec->width;
1501 codec->height = icodec->height;
1502 break;
1503 default:
1504 av_abort();
1505 }
1506 } else {
1507 switch(codec->codec_type) {
1508 case CODEC_TYPE_AUDIO:
1509 if (fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE))
1510 goto fail;
1511
1512 if (codec->channels == icodec->channels &&
1513 codec->sample_rate == icodec->sample_rate) {
1514 ost->audio_resample = 0;
1515 } else {
1516 if (codec->channels != icodec->channels &&
1517 (icodec->codec_id == CODEC_ID_AC3 ||
1518 icodec->codec_id == CODEC_ID_DTS)) {
1519 /* Special case for 5:1 AC3 and DTS input */
1520 /* and mono or stereo output */
1521 /* Request specific number of channels */
1522 icodec->channels = codec->channels;
1523 if (codec->sample_rate == icodec->sample_rate)
1524 ost->audio_resample = 0;
1525 else {
1526 ost->audio_resample = 1;
1527 }
1528 } else {
1529 ost->audio_resample = 1;
1530 }
1531 }
1532 if(audio_sync_method>1)
1533 ost->audio_resample = 1;
1534
1535 if(ost->audio_resample){
1536 ost->resample = audio_resample_init(codec->channels, icodec->channels,
1537 codec->sample_rate, icodec->sample_rate);
1538 if(!ost->resample){
1539 printf("Can't resample. Aborting.\n");
1540 av_abort();
1541 }
1542 }
1543 ist->decoding_needed = 1;
1544 ost->encoding_needed = 1;
1545 break;
1546 case CODEC_TYPE_VIDEO:
1547 if (codec->width == icodec->width &&
1548 codec->height == icodec->height &&
1549 frame_topBand == 0 &&
1550 frame_bottomBand == 0 &&
1551 frame_leftBand == 0 &&
1552 frame_rightBand == 0 &&
1553 frame_padtop == 0 &&
1554 frame_padbottom == 0 &&
1555 frame_padleft == 0 &&
1556 frame_padright == 0)
1557 {
1558 ost->video_resample = 0;
1559 ost->video_crop = 0;
1560 ost->video_pad = 0;
1561 } else if ((codec->width == icodec->width -
1562 (frame_leftBand + frame_rightBand)) &&
1563 (codec->height == icodec->height -
1564 (frame_topBand + frame_bottomBand)))
1565 {
1566 ost->video_resample = 0;
1567 ost->video_crop = 1;
1568 ost->topBand = frame_topBand;
1569 ost->leftBand = frame_leftBand;
1570 } else if ((codec->width == icodec->width +
1571 (frame_padleft + frame_padright)) &&
1572 (codec->height == icodec->height +
1573 (frame_padtop + frame_padbottom))) {
1574 ost->video_resample = 0;
1575 ost->video_crop = 0;
1576 ost->video_pad = 1;
1577 ost->padtop = frame_padtop;
1578 ost->padleft = frame_padleft;
1579 ost->padbottom = frame_padbottom;
1580 ost->padright = frame_padright;
1581 avcodec_get_frame_defaults(&ost->pict_tmp);
1582 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1583 codec->width, codec->height ) )
1584 goto fail;
1585 } else {
1586 ost->video_resample = 1;
1587 ost->video_crop = 0; // cropping is handled as part of resample
1588 avcodec_get_frame_defaults(&ost->pict_tmp);
1589 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1590 codec->width, codec->height ) )
1591 goto fail;
1592
1593 ost->img_resample_ctx = img_resample_full_init(
1594 ost->st->codec.width, ost->st->codec.height,
1595 ist->st->codec.width, ist->st->codec.height,
1596 frame_topBand, frame_bottomBand,
1597 frame_leftBand, frame_rightBand,
1598 frame_padtop, frame_padbottom,
1599 frame_padleft, frame_padright);
1600
1601 ost->padtop = frame_padtop;
1602 ost->padleft = frame_padleft;
1603 ost->padbottom = frame_padbottom;
1604 ost->padright = frame_padright;
1605
1606 }
1607 ost->encoding_needed = 1;
1608 ist->decoding_needed = 1;
1609 break;
1610 default:
1611 av_abort();
1612 }
1613 /* two pass mode */
1614 if (ost->encoding_needed &&
1615 (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1616 char logfilename[1024];
1617 FILE *f;
1618 int size;
1619 char *logbuffer;
1620
1621 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1622 pass_logfilename ?
1623 pass_logfilename : DEFAULT_PASS_LOGFILENAME, i);
1624 if (codec->flags & CODEC_FLAG_PASS1) {
1625 f = fopen(logfilename, "w");
1626 if (!f) {
1627 perror(logfilename);
1628 exit(1);
1629 }
1630 ost->logfile = f;
1631 } else {
1632 /* read the log file */
1633 f = fopen(logfilename, "r");
1634 if (!f) {
1635 perror(logfilename);
1636 exit(1);
1637 }
1638 fseek(f, 0, SEEK_END);
1639 size = ftell(f);
1640 fseek(f, 0, SEEK_SET);
1641 logbuffer = av_malloc(size + 1);
1642 if (!logbuffer) {
1643 fprintf(stderr, "Could not allocate log buffer\n");
1644 exit(1);
1645 }
1646 size = fread(logbuffer, 1, size, f);
1647 fclose(f);
1648 logbuffer[size] = '\0';
1649 codec->stats_in = logbuffer;
1650 }
1651 }
1652 }
1653 }
1654
1655 /* dump the file output parameters - cannot be done before in case
1656 of stream copy */
1657 for(i=0;i<nb_output_files;i++) {
1658 dump_format(output_files[i], i, output_files[i]->filename, 1);
1659 }
1660
1661 /* dump the stream mapping */
1662 if (verbose >= 0) {
1663 fprintf(stderr, "Stream mapping:\n");
1664 for(i=0;i<nb_ostreams;i++) {
1665 ost = ost_table[i];
1666 fprintf(stderr, " Stream #%d.%d -> #%d.%d\n",
1667 ist_table[ost->source_index]->file_index,
1668 ist_table[ost->source_index]->index,
1669 ost->file_index,
1670 ost->index);
1671 }
1672 }
1673
1674 /* open each encoder */
1675 for(i=0;i<nb_ostreams;i++) {
1676 ost = ost_table[i];
1677 if (ost->encoding_needed) {
1678 AVCodec *codec;
1679 codec = avcodec_find_encoder(ost->st->codec.codec_id);
1680 if (!codec) {
1681 fprintf(stderr, "Unsupported codec for output stream #%d.%d\n",
1682 ost->file_index, ost->index);
1683 exit(1);
1684 }
1685 if (avcodec_open(&ost->st->codec, codec) < 0) {
1686 fprintf(stderr, "Error while opening codec for stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height\n",
1687 ost->file_index, ost->index);
1688 exit(1);
1689 }
1690 extra_size += ost->st->codec.extradata_size;
1691 }
1692 }
1693
1694 /* open each decoder */
1695 for(i=0;i<nb_istreams;i++) {
1696 ist = ist_table[i];
1697 if (ist->decoding_needed) {
1698 AVCodec *codec;
1699 codec = avcodec_find_decoder(ist->st->codec.codec_id);
1700 if (!codec) {
1701 fprintf(stderr, "Unsupported codec (id=%d) for input stream #%d.%d\n",
1702 ist->st->codec.codec_id, ist->file_index, ist->index);
1703 exit(1);
1704 }
1705 if (avcodec_open(&ist->st->codec, codec) < 0) {
1706 fprintf(stderr, "Error while opening codec for input stream #%d.%d\n",
1707 ist->file_index, ist->index);
1708 exit(1);
1709 }
1710 //if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO)
1711 // ist->st->codec.flags |= CODEC_FLAG_REPEAT_FIELD;
1712 }
1713 }
1714
1715 /* init pts */
1716 for(i=0;i<nb_istreams;i++) {
1717 ist = ist_table[i];
1718 is = input_files[ist->file_index];
1719 ist->pts = 0;
1720 ist->next_pts = 0;
1721 ist->is_start = 1;
1722 }
1723
1724 /* compute buffer size max (should use a complete heuristic) */
1725 for(i=0;i<nb_input_files;i++) {
1726 file_table[i].buffer_size_max = 2048;
1727 }
1728
1729 /* set meta data information from input file if required */
1730 for (i=0;i<nb_meta_data_maps;i++) {
1731 AVFormatContext *out_file;
1732 AVFormatContext *in_file;
1733
1734 int out_file_index = meta_data_maps[i].out_file;
1735 int in_file_index = meta_data_maps[i].in_file;
1736 if ( out_file_index < 0 || out_file_index >= nb_output_files ) {
1737 fprintf(stderr, "Invalid output file index %d map_meta_data(%d,%d)\n", out_file_index, out_file_index, in_file_index);
1738 ret = -EINVAL;
1739 goto fail;
1740 }
1741 if ( in_file_index < 0 || in_file_index >= nb_input_files ) {
1742 fprintf(stderr, "Invalid input file index %d map_meta_data(%d,%d)\n", in_file_index, out_file_index, in_file_index);
1743 ret = -EINVAL;
1744 goto fail;
1745 }
1746
1747 out_file = output_files[out_file_index];
1748 in_file = input_files[in_file_index];
1749
1750 strcpy(out_file->title, in_file->title);
1751 strcpy(out_file->author, in_file->author);
1752 strcpy(out_file->copyright, in_file->copyright);
1753 strcpy(out_file->comment, in_file->comment);
1754 strcpy(out_file->album, in_file->album);
1755 out_file->year = in_file->year;
1756 out_file->track = in_file->track;
1757 strcpy(out_file->genre, in_file->genre);
1758 }
1759
1760 /* open files and write file headers */
1761 for(i=0;i<nb_output_files;i++) {
1762 os = output_files[i];
1763 if (av_write_header(os) < 0) {
1764 fprintf(stderr, "Could not write header for output file #%d (incorrect codec parameters ?)\n", i);
1765 ret = -EINVAL;
1766 goto fail;
1767 }
1768 }
1769
1770 #ifndef CONFIG_WIN32
1771 if ( !using_stdin && verbose >= 0) {
1772 fprintf(stderr, "Press [q] to stop encoding\n");
1773 url_set_interrupt_cb(decode_interrupt_cb);
1774 }
1775 #endif
1776 term_init();
1777
1778 stream_no_data = 0;
1779 key = -1;
1780
1781 for(; received_sigterm == 0;) {
1782 int file_index, ist_index;
1783 AVPacket pkt;
1784 double ipts_min;
1785 double opts_min;
1786
1787 redo:
1788 ipts_min= 1e100;
1789 opts_min= 1e100;
1790 /* if 'q' pressed, exits */
1791 if (!using_stdin) {
1792 if (q_pressed)
1793 break;
1794 /* read_key() returns 0 on EOF */
1795 key = read_key();
1796 if (key == 'q')
1797 break;
1798 }
1799
1800 /* select the stream that we must read now by looking at the
1801 smallest output pts */
1802 file_index = -1;
1803 for(i=0;i<nb_ostreams;i++) {
1804 double ipts, opts;
1805 ost = ost_table[i];
1806 os = output_files[ost->file_index];
1807 ist = ist_table[ost->source_index];
1808 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO)
1809 opts = (double)ost->sync_opts * ost->st->codec.frame_rate_base / ost->st->codec.frame_rate;
1810 else
1811 opts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den;
1812 ipts = (double)ist->pts;
1813 if (!file_table[ist->file_index].eof_reached){
1814 if(ipts < ipts_min) {
1815 ipts_min = ipts;
1816 if(input_sync ) file_index = ist->file_index;
1817 }
1818 if(opts < opts_min) {
1819 opts_min = opts;
1820 if(!input_sync) file_index = ist->file_index;
1821 }
1822 }
1823 }
1824 /* if none, if is finished */
1825 if (file_index < 0) {
1826 break;
1827 }
1828
1829 /* finish if recording time exhausted */
1830 if (recording_time > 0 && opts_min >= (recording_time / 1000000.0))
1831 break;
1832
1833 /* read a frame from it and output it in the fifo */
1834 is = input_files[file_index];
1835 if (av_read_frame(is, &pkt) < 0) {
1836 file_table[file_index].eof_reached = 1;
1837 continue;
1838 }
1839
1840 if (!pkt.size) {
1841 stream_no_data = is;
1842 } else {
1843 stream_no_data = 0;
1844 }
1845 if (do_pkt_dump) {
1846 av_pkt_dump(stdout, &pkt, do_hex_dump);
1847 }
1848 /* the following test is needed in case new streams appear
1849 dynamically in stream : we ignore them */
1850 if (pkt.stream_index >= file_table[file_index].nb_streams)
1851 goto discard_packet;
1852 ist_index = file_table[file_index].ist_index + pkt.stream_index;
1853 ist = ist_table[ist_index];
1854 if (ist->discard)
1855 goto discard_packet;
1856
1857 // fprintf(stderr, "next:%lld dts:%lld off:%lld %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec.codec_type);
1858 if (pkt.dts != AV_NOPTS_VALUE) {
1859 int64_t delta= pkt.dts - ist->next_pts;
1860 if(ABS(delta) > 10LL*AV_TIME_BASE && !copy_ts){
1861 input_files_ts_offset[ist->file_index]-= delta;
1862 if (verbose > 2)
1863 fprintf(stderr, "timestamp discontinuity %lld, new offset= %lld\n", delta, input_files_ts_offset[ist->file_index]);
1864 for(i=0; i<file_table[file_index].nb_streams; i++){
1865 int index= file_table[file_index].ist_index + i;
1866 ist_table[index]->next_pts += delta;
1867 ist_table[index]->is_start=1;
1868 }
1869 }
1870 }
1871
1872 //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
1873 if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
1874
1875 if (verbose >= 0)
1876 fprintf(stderr, "Error while decoding stream #%d.%d\n",
1877 ist->file_index, ist->index);
1878
1879 av_free_packet(&pkt);
1880 goto redo;
1881 }
1882
1883 discard_packet:
1884 av_free_packet(&pkt);
1885
1886 /* dump report by using the output first video and audio streams */
1887 print_report(output_files, ost_table, nb_ostreams, 0);
1888 }
1889
1890 /* at the end of stream, we must flush the decoder buffers */
1891 for(i=0;i<nb_istreams;i++) {
1892 ist = ist_table[i];
1893 if (ist->decoding_needed) {
1894 output_packet(ist, i, ost_table, nb_ostreams, NULL);
1895 }
1896 }
1897
1898 term_exit();
1899
1900 /* write the trailer if needed and close file */
1901 for(i=0;i<nb_output_files;i++) {
1902 os = output_files[i];
1903 av_write_trailer(os);
1904 }
1905
1906 /* dump report by using the first video and audio streams */
1907 print_report(output_files, ost_table, nb_ostreams, 1);
1908
1909 /* close each encoder */
1910 for(i=0;i<nb_ostreams;i++) {
1911 ost = ost_table[i];
1912 if (ost->encoding_needed) {
1913 av_freep(&ost->st->codec.stats_in);
1914 avcodec_close(&ost->st->codec);
1915 }
1916 }
1917
1918 /* close each decoder */
1919 for(i=0;i<nb_istreams;i++) {
1920 ist = ist_table[i];
1921 if (ist->decoding_needed) {
1922 avcodec_close(&ist->st->codec);
1923 }
1924 }
1925
1926 /* finished ! */
1927
1928 ret = 0;
1929 fail1:
1930 av_free(file_table);
1931
1932 if (ist_table) {
1933 for(i=0;i<nb_istreams;i++) {
1934 ist = ist_table[i];
1935 av_free(ist);
1936 }
1937 av_free(ist_table);
1938 }
1939 if (ost_table) {
1940 for(i=0;i<nb_ostreams;i++) {
1941 ost = ost_table[i];
1942 if (ost) {
1943 if (ost->logfile) {
1944 fclose(ost->logfile);
1945 ost->logfile = NULL;
1946 }
1947 fifo_free(&ost->fifo); /* works even if fifo is not
1948 initialized but set to zero */
1949 av_free(ost->pict_tmp.data[0]);
1950 if (ost->video_resample)
1951 img_resample_close(ost->img_resample_ctx);
1952 if (ost->audio_resample)
1953 audio_resample_close(ost->resample);
1954 av_free(ost);
1955 }
1956 }
1957 av_free(ost_table);
1958 }
1959 return ret;
1960 fail:
1961 ret = -ENOMEM;
1962 goto fail1;
1963 }
1964
1965 #if 0
1966 int file_read(const char *filename)
1967 {
1968 URLContext *h;
1969 unsigned char buffer[1024];
1970 int len, i;
1971
1972 if (url_open(&h, filename, O_RDONLY) < 0) {
1973 printf("could not open '%s'\n", filename);
1974 return -1;
1975 }
1976 for(;;) {
1977 len = url_read(h, buffer, sizeof(buffer));
1978 if (len <= 0)
1979 break;
1980 for(i=0;i<len;i++) putchar(buffer[i]);
1981 }
1982 url_close(h);
1983 return 0;
1984 }
1985 #endif
1986
1987 static void opt_image_format(const char *arg)
1988 {
1989 AVImageFormat *f;
1990
1991 for(f = first_image_format; f != NULL; f = f->next) {
1992 if (!strcmp(arg, f->name))
1993 break;
1994 }
1995 if (!f) {
1996 fprintf(stderr, "Unknown image format: '%s'\n", arg);
1997 exit(1);
1998 }
1999 image_format = f;
2000 }
2001
2002 static void opt_format(const char *arg)
2003 {
2004 /* compatibility stuff for pgmyuv */
2005 if (!strcmp(arg, "pgmyuv")) {
2006 opt_image_format(arg);
2007 arg = "image";
2008 }
2009
2010 file_iformat = av_find_input_format(arg);
2011 file_oformat = guess_format(arg, NULL, NULL);
2012 if (!file_iformat && !file_oformat) {
2013 fprintf(stderr, "Unknown input or output format: %s\n", arg);
2014 exit(1);
2015 }
2016 }
2017
2018 static void opt_video_bitrate(const char *arg)
2019 {
2020 video_bit_rate = atoi(arg) * 1000;
2021 }
2022
2023 static void opt_video_bitrate_tolerance(const char *arg)
2024 {
2025 video_bit_rate_tolerance = atoi(arg) * 1000;
2026 }
2027
2028 static void opt_video_bitrate_max(const char *arg)
2029 {
2030 video_rc_max_rate = atoi(arg) * 1000;
2031 }
2032
2033 static void opt_video_bitrate_min(const char *arg)
2034 {
2035 video_rc_min_rate = atoi(arg) * 1000;
2036 }
2037
2038 static void opt_video_buffer_size(const char *arg)
2039 {
2040 video_rc_buffer_size = atoi(arg) * 8*1024;
2041 }
2042
2043 static void opt_video_rc_eq(char *arg)
2044 {
2045 video_rc_eq = arg;
2046 }
2047
2048 static void opt_video_rc_override_string(char *arg)
2049 {
2050 video_rc_override_string = arg;
2051 }
2052
2053
2054 static void opt_workaround_bugs(const char *arg)
2055 {
2056 workaround_bugs = atoi(arg);
2057 }
2058
2059 static void opt_dct_algo(const char *arg)
2060 {
2061 dct_algo = atoi(arg);
2062 }
2063
2064 static void opt_idct_algo(const char *arg)
2065 {
2066 idct_algo = atoi(arg);
2067 }
2068
2069 static void opt_me_threshold(const char *arg)
2070 {
2071 me_threshold = atoi(arg);
2072 }
2073
2074 static void opt_mb_threshold(const char *arg)
2075 {
2076 mb_threshold = atoi(arg);
2077 }
2078
2079 static void opt_error_resilience(const char *arg)
2080 {
2081 error_resilience = atoi(arg);
2082 }
2083
2084 static void opt_error_concealment(const char *arg)
2085 {
2086 error_concealment = atoi(arg);
2087 }
2088
2089 static void opt_debug(const char *arg)
2090 {
2091 debug = atoi(arg);
2092 }
2093
2094 static void opt_vismv(const char *arg)
2095 {
2096 debug_mv = atoi(arg);
2097 }
2098
2099 static void opt_verbose(const char *arg)
2100 {
2101 verbose = atoi(arg);
2102 av_log_set_level(atoi(arg));
2103 }
2104
2105 static void opt_frame_rate(const char *arg)
2106 {
2107 if (parse_frame_rate(&frame_rate, &frame_rate_base, arg) < 0) {
2108 fprintf(stderr, "Incorrect frame rate\n");
2109 exit(1);
2110 }
2111 }
2112
2113 static void opt_frame_crop_top(const char *arg)
2114 {
2115 frame_topBand = atoi(arg);
2116 if (frame_topBand < 0) {
2117 fprintf(stderr, "Incorrect top crop size\n");
2118 exit(1);
2119 }
2120 if ((frame_topBand % 2) != 0) {
2121 fprintf(stderr, "Top crop size must be a multiple of 2\n");
2122 exit(1);
2123 }
2124 if ((frame_topBand) >= frame_height){
2125 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2126 exit(1);
2127 }
2128 frame_height -= frame_topBand;
2129 }
2130
2131 static void opt_frame_crop_bottom(const char *arg)
2132 {
2133 frame_bottomBand = atoi(arg);
2134 if (frame_bottomBand < 0) {
2135 fprintf(stderr, "Incorrect bottom crop size\n");
2136 exit(1);
2137 }
2138 if ((frame_bottomBand % 2) != 0) {
2139 fprintf(stderr, "Bottom crop size must be a multiple of 2\n");
2140 exit(1);
2141 }
2142 if ((frame_bottomBand) >= frame_height){
2143 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2144 exit(1);
2145 }
2146 frame_height -= frame_bottomBand;
2147 }
2148
2149 static void opt_frame_crop_left(const char *arg)
2150 {
2151 frame_leftBand = atoi(arg);
2152 if (frame_leftBand < 0) {
2153 fprintf(stderr, "Incorrect left crop size\n");
2154 exit(1);
2155 }
2156 if ((frame_leftBand % 2) != 0) {
2157 fprintf(stderr, "Left crop size must be a multiple of 2\n");
2158 exit(1);
2159 }
2160 if ((frame_leftBand) >= frame_width){
2161 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2162 exit(1);
2163 }
2164 frame_width -= frame_leftBand;
2165 }
2166
2167 static void opt_frame_crop_right(const char *arg)
2168 {
2169 frame_rightBand = atoi(arg);
2170 if (frame_rightBand < 0) {
2171 fprintf(stderr, "Incorrect right crop size\n");
2172 exit(1);
2173 }
2174 if ((frame_rightBand % 2) != 0) {
2175 fprintf(stderr, "Right crop size must be a multiple of 2\n");
2176 exit(1);
2177 }
2178 if ((frame_rightBand) >= frame_width){
2179 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2180 exit(1);
2181 }
2182 frame_width -= frame_rightBand;
2183 }
2184
2185 static void opt_frame_size(const char *arg)
2186 {
2187 if (parse_image_size(&frame_width, &frame_height, arg) < 0) {
2188 fprintf(stderr, "Incorrect frame size\n");
2189 exit(1);
2190 }
2191 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2192 fprintf(stderr, "Frame size must be a multiple of 2\n");
2193 exit(1);
2194 }
2195 }
2196
2197
2198 #define SCALEBITS 10
2199 #define ONE_HALF (1 << (SCALEBITS - 1))
2200 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
2201
2202 #define RGB_TO_Y(r, g, b) \
2203 ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
2204 FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
2205
2206 #define RGB_TO_U(r1, g1, b1, shift)\
2207 (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
2208 FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2209
2210 #define RGB_TO_V(r1, g1, b1, shift)\
2211 (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
2212 FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2213
2214 static void opt_pad_color(const char *arg) {
2215 /* Input is expected to be six hex digits similar to
2216 how colors are expressed in html tags (but without the #) */
2217 int rgb = strtol(arg, NULL, 16);
2218 int r,g,b;
2219
2220 r = (rgb >> 16);
2221 g = ((rgb >> 8) & 255);
2222 b = (rgb & 255);
2223
2224 padcolor[0] = RGB_TO_Y(r,g,b);
2225 padcolor[1] = RGB_TO_U(r,g,b,0);
2226 padcolor[2] = RGB_TO_V(r,g,b,0);
2227 }
2228
2229 static void opt_frame_pad_top(const char *arg)
2230 {
2231 frame_padtop = atoi(arg);
2232 if (frame_padtop < 0) {
2233 fprintf(stderr, "Incorrect top pad size\n");
2234 exit(1);
2235 }
2236 if ((frame_padtop % 2) != 0) {
2237 fprintf(stderr, "Top pad size must be a multiple of 2\n");
2238 exit(1);
2239 }
2240 }
2241
2242 static void opt_frame_pad_bottom(const char *arg)
2243 {
2244 frame_padbottom = atoi(arg);
2245 if (frame_padbottom < 0) {
2246 fprintf(stderr, "Incorrect bottom pad size\n");
2247 exit(1);
2248 }
2249 if ((frame_padbottom % 2) != 0) {
2250 fprintf(stderr, "Bottom pad size must be a multiple of 2\n");
2251 exit(1);
2252 }
2253 }
2254
2255
2256 static void opt_frame_pad_left(const char *arg)
2257 {
2258 frame_padleft = atoi(arg);
2259 if (frame_padleft < 0) {
2260 fprintf(stderr, "Incorrect left pad size\n");
2261 exit(1);
2262 }
2263 if ((frame_padleft % 2) != 0) {
2264 fprintf(stderr, "Left pad size must be a multiple of 2\n");
2265 exit(1);
2266 }
2267 }
2268
2269
2270 static void opt_frame_pad_right(const char *arg)
2271 {
2272 frame_padright = atoi(arg);
2273 if (frame_padright < 0) {
2274 fprintf(stderr, "Incorrect right pad size\n");
2275 exit(1);
2276 }
2277 if ((frame_padright % 2) != 0) {
2278 fprintf(stderr, "Right pad size must be a multiple of 2\n");
2279 exit(1);
2280 }
2281 }
2282
2283
2284 static void opt_frame_pix_fmt(const char *arg)
2285 {
2286 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2287 }
2288
2289 static void opt_frame_aspect_ratio(const char *arg)
2290 {
2291 int x = 0, y = 0;
2292 double ar = 0;
2293 const char *p;
2294
2295 p = strchr(arg, ':');
2296 if (p) {
2297 x = strtol(arg, (char **)&arg, 10);
2298 if (arg == p)
2299 y = strtol(arg+1, (char **)&arg, 10);
2300 if (x > 0 && y > 0)
2301 ar = (double)x / (double)y;
2302 } else
2303 ar = strtod(arg, (char **)&arg);
2304
2305 if (!ar) {
2306 fprintf(stderr, "Incorrect aspect ratio specification.\n");
2307 exit(1);
2308 }
2309 frame_aspect_ratio = ar;
2310 }
2311
2312 static void opt_gop_size(const char *arg)
2313 {
2314 gop_size = atoi(arg);
2315 }
2316
2317 static void opt_b_frames(const char *arg)
2318 {
2319 b_frames = atoi(arg);
2320 if (b_frames > FF_MAX_B_FRAMES) {
2321 fprintf(stderr, "\nCannot have more than %d B frames, increase FF_MAX_B_FRAMES.\n", FF_MAX_B_FRAMES);
2322 exit(1);
2323 } else if (b_frames < 1) {
2324 fprintf(stderr, "\nNumber of B frames must be higher than 0\n");
2325 exit(1);
2326 }
2327 }
2328
2329 static void opt_mb_decision(const char *arg)
2330 {
2331 mb_decision = atoi(arg);
2332 }
2333
2334 static void opt_mb_cmp(const char *arg)
2335 {
2336 mb_cmp = atoi(arg);
2337 }
2338
2339 static void opt_ildct_cmp(const char *arg)
2340 {
2341 ildct_cmp = atoi(arg);
2342 }
2343
2344 static void opt_sub_cmp(const char *arg)
2345 {
2346 sub_cmp = atoi(arg);
2347 }
2348
2349 static void opt_cmp(const char *arg)
2350 {
2351 cmp = atoi(arg);
2352 }
2353
2354 static void opt_pre_cmp(const char *arg)
2355 {
2356 pre_cmp = atoi(arg);
2357 }
2358
2359 static void opt_pre_me(const char *arg)
2360 {
2361 pre_me = atoi(arg);
2362 }
2363
2364 static void opt_lumi_mask(const char *arg)
2365 {
2366 lumi_mask = atof(arg);
2367 }
2368
2369 static void opt_dark_mask(const char *arg)
2370 {
2371 dark_mask = atof(arg);
2372 }
2373
2374 static void opt_scplx_mask(const char *arg)
2375 {
2376 scplx_mask = atof(arg);
2377 }
2378
2379 static void opt_tcplx_mask(const char *arg)
2380 {
2381 tcplx_mask = atof(arg);
2382 }
2383
2384 static void opt_p_mask(const char *arg)
2385 {
2386 p_mask = atof(arg);
2387 }
2388
2389 static void opt_qscale(const char *arg)
2390 {
2391 video_qscale = atof(arg);
2392 if (video_qscale < 0.01 ||
2393 video_qscale > 255) {
2394 fprintf(stderr, "qscale must be >= 0.01 and <= 255\n");
2395 exit(1);
2396 }
2397 }
2398
2399 static void opt_lmax(const char *arg)
2400 {
2401 video_lmax = atof(arg)*FF_QP2LAMBDA;
2402 }
2403
2404 static void opt_lmin(const char *arg)
2405 {
2406 video_lmin = atof(arg)*FF_QP2LAMBDA;
2407 }
2408
2409 static void opt_qmin(const char *arg)
2410 {
2411 video_qmin = atoi(arg);
2412 if (video_qmin < 0 ||
2413 video_qmin > 31) {
2414 fprintf(stderr, "qmin must be >= 1 and <= 31\n");
2415 exit(1);
2416 }
2417 }
2418
2419 static void opt_qmax(const char *arg)
2420 {
2421 video_qmax = atoi(arg);
2422 if (video_qmax < 0 ||
2423 video_qmax > 31) {
2424 fprintf(stderr, "qmax must be >= 1 and <= 31\n");
2425 exit(1);
2426 }
2427 }
2428
2429 static void opt_mb_qmin(const char *arg)
2430 {
2431 video_mb_qmin = atoi(arg);
2432 if (video_mb_qmin < 0 ||
2433 video_mb_qmin > 31) {
2434 fprintf(stderr, "qmin must be >= 1 and <= 31\n");
2435 exit(1);
2436 }
2437 }
2438
2439 static void opt_mb_qmax(const char *arg)
2440 {
2441 video_mb_qmax = atoi(arg);
2442 if (video_mb_qmax < 0 ||
2443 video_mb_qmax > 31) {
2444 fprintf(stderr, "qmax must be >= 1 and <= 31\n");
2445 exit(1);
2446 }
2447 }
2448
2449 static void opt_qdiff(const char *arg)
2450 {
2451 video_qdiff = atoi(arg);
2452 if (video_qdiff < 0 ||
2453 video_qdiff > 31) {
2454 fprintf(stderr, "qdiff must be >= 1 and <= 31\n");
2455 exit(1);
2456 }
2457 }
2458
2459 static void opt_qblur(const char *arg)
2460 {
2461 video_qblur = atof(arg);
2462 }
2463
2464 static void opt_qcomp(const char *arg)
2465 {
2466 video_qcomp = atof(arg);
2467 }
2468
2469 static void opt_rc_initial_cplx(const char *arg)
2470 {
2471 video_rc_initial_cplx = atof(arg);
2472 }
2473 static void opt_b_qfactor(const char *arg)
2474 {
2475 video_b_qfactor = atof(arg);
2476 }
2477 static void opt_i_qfactor(const char *arg)
2478 {
2479 video_i_qfactor = atof(arg);
2480 }
2481 static void opt_b_qoffset(const char *arg)
2482 {
2483 video_b_qoffset = atof(arg);
2484 }
2485 static void opt_i_qoffset(const char *arg)
2486 {
2487 video_i_qoffset = atof(arg);
2488 }
2489
2490 static void opt_ibias(const char *arg)
2491 {
2492 video_intra_quant_bias = atoi(arg);
2493 }
2494 static void opt_pbias(const char *arg)
2495 {
2496 video_inter_quant_bias = atoi(arg);
2497 }
2498
2499 static void opt_packet_size(const char *arg)
2500 {
2501 packet_size= atoi(arg);
2502 }
2503
2504 static void opt_error_rate(const char *arg)
2505 {
2506 error_rate= atoi(arg);
2507 }
2508
2509 static void opt_strict(const char *arg)
2510 {
2511 strict= atoi(arg);
2512 }
2513
2514 static void opt_top_field_first(const char *arg)
2515 {
2516 top_field_first= atoi(arg);
2517 }
2518
2519 static void opt_noise_reduction(const char *arg)
2520 {
2521 noise_reduction= atoi(arg);
2522 }
2523
2524 static void opt_qns(const char *arg)
2525 {
2526 qns= atoi(arg);
2527 }
2528
2529 static void opt_sc_threshold(const char *arg)
2530 {
2531 sc_threshold= atoi(arg);
2532 }
2533
2534 static void opt_me_range(const char *arg)
2535 {
2536 me_range = atoi(arg);
2537 }
2538
2539 static void opt_thread_count(const char *arg)
2540 {
2541 thread_count= atoi(arg);
2542 #if !defined(HAVE_PTHREADS) && !defined(HAVE_W32THREADS)
2543 if (verbose >= 0)
2544 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2545 #endif
2546 }
2547
2548 static void opt_audio_bitrate(const char *arg)
2549 {
2550 audio_bit_rate = atoi(arg) * 1000;
2551 }
2552
2553 static void opt_audio_rate(const char *arg)
2554 {
2555 audio_sample_rate = atoi(arg);
2556 }
2557
2558 static void opt_audio_channels(const char *arg)
2559 {
2560 audio_channels = atoi(arg);
2561 }
2562
2563 static void opt_video_device(const char *arg)
2564 {
2565 video_device = av_strdup(arg);
2566 }
2567
2568 static void opt_video_channel(const char *arg)
2569 {
2570 video_channel = strtol(arg, NULL, 0);
2571 }
2572
2573 static void opt_video_standard(const char *arg)
2574 {
2575 video_standard = av_strdup(arg);
2576 }
2577
2578 static void opt_audio_device(const char *arg)
2579 {
2580 audio_device = av_strdup(arg);
2581 }
2582
2583 static void opt_dv1394(const char *arg)
2584 {
2585 video_grab_format = "dv1394";
2586 audio_grab_format = NULL;
2587 }
2588
2589 static void opt_audio_codec(const char *arg)
2590 {
2591 AVCodec *p;
2592
2593 if (!strcmp(arg, "copy")) {
2594 audio_stream_copy = 1;
2595 } else {
2596 p = first_avcodec;
2597 while (p) {
2598 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_AUDIO)
2599 break;
2600 p = p->next;
2601 }
2602 if (p == NULL) {
2603 fprintf(stderr, "Unknown audio codec '%s'\n", arg);
2604 exit(1);
2605 } else {
2606 audio_codec_id = p->id;
2607 }
2608 }
2609 }
2610
2611 static void add_frame_hooker(const char *arg)
2612 {
2613 int argc = 0;
2614 char *argv[64];
2615 int i;
2616 char *args = av_strdup(arg);
2617
2618 using_vhook = 1;
2619
2620 argv[0] = strtok(args, " ");
2621 while (argc < 62 && (argv[++argc] = strtok(NULL, " "))) {
2622 }
2623
2624 i = frame_hook_add(argc, argv);
2625
2626 if (i != 0) {
2627 fprintf(stderr, "Failed to add video hook function: %s\n", arg);
2628 exit(1);
2629 }
2630 }
2631
2632 const char *motion_str[] = {
2633 "zero",
2634 "full",
2635 "log",
2636 "phods",
2637 "epzs",
2638 "x1",
2639 NULL,
2640 };
2641
2642 static void opt_motion_estimation(const char *arg)
2643 {
2644 const char **p;
2645 p = motion_str;
2646 for(;;) {
2647 if (!*p) {
2648 fprintf(stderr, "Unknown motion estimation method '%s'\n", arg);
2649 exit(1);
2650 }
2651 if (!strcmp(*p, arg))
2652 break;
2653 p++;
2654 }
2655 me_method = (p - motion_str) + 1;
2656 }
2657
2658 static void opt_video_codec(const char *arg)
2659 {
2660 AVCodec *p;
2661
2662 if (!strcmp(arg, "copy")) {
2663 video_stream_copy = 1;
2664 } else {
2665 p = first_avcodec;
2666 while (p) {
2667 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_VIDEO)
2668 break;
2669 p = p->next;
2670 }
2671 if (p == NULL) {
2672 fprintf(stderr, "Unknown video codec '%s'\n", arg);
2673 exit(1);
2674 } else {
2675 video_codec_id = p->id;
2676 }
2677 }
2678 }
2679
2680 static void opt_map(const char *arg)
2681 {
2682 AVStreamMap *m;
2683 const char *p;
2684
2685 p = arg;
2686 m = &stream_maps[nb_stream_maps++];
2687
2688 m->file_index = strtol(arg, (char **)&p, 0);
2689 if (*p)
2690 p++;
2691
2692 m->stream_index = strtol(p, (char **)&p, 0);
2693 }
2694
2695 static void opt_map_meta_data(const char *arg)
2696 {
2697 AVMetaDataMap *m;
2698 const char *p;
2699
2700 p = arg;
2701 m = &meta_data_maps[nb_meta_data_maps++];
2702
2703 m->out_file = strtol(arg, (char **)&p, 0);
2704 if (*p)
2705 p++;
2706
2707 m->in_file = strtol(p, (char **)&p, 0);
2708 }
2709
2710 static void opt_recording_time(const char *arg)
2711 {
2712 recording_time = parse_date(arg, 1);
2713 }
2714
2715 static void opt_start_time(const char *arg)
2716 {
2717 start_time = parse_date(arg, 1);
2718 }
2719
2720 static void opt_rec_timestamp(const char *arg)
2721 {
2722 rec_timestamp = parse_date(arg, 0) / 1000000;
2723 }
2724
2725 static void opt_input_ts_offset(const char *arg)
2726 {
2727 input_ts_offset = parse_date(arg, 1);
2728 }
2729
2730 static void opt_input_file(const char *filename)
2731 {
2732 AVFormatContext *ic;
2733 AVFormatParameters params, *ap = &params;
2734 int err, i, ret, rfps, rfps_base;
2735
2736 if (!strcmp(filename, "-"))
2737 filename = "pipe:";
2738
2739 using_stdin |= !strcmp(filename, "pipe:" ) ||
2740 !strcmp( filename, "/dev/stdin" );
2741
2742 /* get default parameters from command line */
2743 memset(ap, 0, sizeof(*ap));
2744 ap->sample_rate = audio_sample_rate;
2745 ap->channels = audio_channels;
2746 ap->frame_rate = frame_rate;
2747 ap->frame_rate_base = frame_rate_base;
2748 ap->width = frame_width + frame_padleft + frame_padright;
2749 ap->height = frame_height + frame_padtop + frame_padbottom;
2750 ap->image_format = image_format;
2751 ap->pix_fmt = frame_pix_fmt;
2752
2753 /* open the input file with generic libav function */
2754 err = av_open_input_file(&ic, filename, file_iformat, 0, ap);
2755 if (err < 0) {
2756 print_error(filename, err);
2757 exit(1);
2758 }
2759
2760 /* If not enough info to get the stream parameters, we decode the
2761 first frames to get it. (used in mpeg case for example) */
2762 ret = av_find_stream_info(ic);
2763 if (ret < 0 && verbose >= 0) {
2764 fprintf(stderr, "%s: could not find codec parameters\n", filename);
2765 exit(1);
2766 }
2767
2768 /* if seeking requested, we execute it */
2769 if (start_time != 0) {
2770 int64_t timestamp;
2771
2772 timestamp = start_time;
2773 /* add the stream start time */
2774 if (ic->start_time != AV_NOPTS_VALUE)
2775 timestamp += ic->start_time;
2776 ret = av_seek_frame(ic, -1, timestamp);
2777 if (ret < 0) {
2778 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2779 filename, (double)timestamp / AV_TIME_BASE);
2780 }
2781 /* reset seek info */
2782 start_time = 0;
2783 }
2784
2785 /* update the current parameters so that they match the one of the input stream */
2786 for(i=0;i<ic->nb_streams;i++) {
2787 AVCodecContext *enc = &ic->streams[i]->codec;
2788 #if defined(HAVE_PTHREADS) || defined(HAVE_W32THREADS)
2789 if(thread_count>1)
2790 avcodec_thread_init(enc, thread_count);
2791 #endif
2792 enc->thread_count= thread_count;
2793 switch(enc->codec_type) {
2794 case CODEC_TYPE_AUDIO:
2795 //fprintf(stderr, "\nInput Audio channels: %d", enc->channels);
2796 audio_channels = enc->channels;
2797 audio_sample_rate = enc->sample_rate;
2798 break;
2799 case CODEC_TYPE_VIDEO:
2800 frame_height = enc->height;
2801 frame_width = enc->width;
2802 frame_aspect_ratio = av_q2d(enc->sample_aspect_ratio) * enc->width / enc->height;
2803 frame_pix_fmt = enc->pix_fmt;
2804 rfps = ic->streams[i]->r_frame_rate;
2805 rfps_base = ic->streams[i]->r_frame_rate_base;
2806 enc->workaround_bugs = workaround_bugs;
2807 enc->error_resilience = error_resilience;
2808 enc->error_concealment = error_concealment;
2809 enc->idct_algo = idct_algo;
2810 enc->debug = debug;
2811 enc->debug_mv = debug_mv;
2812 if(bitexact)
2813 enc->flags|= CODEC_FLAG_BITEXACT;
2814 if(me_threshold)
2815 enc->debug |= FF_DEBUG_MV;
2816
2817 assert(enc->frame_rate_base == rfps_base); // should be true for now
2818 if (enc->frame_rate != rfps) {
2819
2820 if (verbose >= 0)
2821 fprintf(stderr,"\nSeems that stream %d comes from film source: %2.2f->%2.2f\n",
2822 i, (float)enc->frame_rate / enc->frame_rate_base,
2823
2824 (float)rfps / rfps_base);
2825 }
2826 /* update the current frame rate to match the stream frame rate */
2827 frame_rate = rfps;
2828 frame_rate_base = rfps_base;
2829
2830 enc->rate_emu = rate_emu;
2831 break;
2832 case CODEC_TYPE_DATA:
2833 break;
2834 default:
2835 av_abort();
2836 }
2837 }
2838
2839 input_files[nb_input_files] = ic;
2840 input_files_ts_offset[nb_input_files] = input_ts_offset;
2841 /* dump the file content */
2842 if (verbose >= 0)
2843 dump_format(ic, nb_input_files, filename, 0);
2844
2845 nb_input_files++;
2846 file_iformat = NULL;
2847 file_oformat = NULL;
2848 image_format = NULL;
2849
2850 rate_emu = 0;
2851 }
2852
2853 static void check_audio_video_inputs(int *has_video_ptr, int *has_audio_ptr)
2854 {
2855 int has_video, has_audio, i, j;
2856 AVFormatContext *ic;
2857
2858 has_video = 0;
2859 has_audio = 0;
2860 for(j=0;j<nb_input_files;j++) {
2861 ic = input_files[j];
2862 for(i=0;i<ic->nb_streams;i++) {
2863 AVCodecContext *enc = &ic->streams[i]->codec;
2864 switch(enc->codec_type) {
2865 case CODEC_TYPE_AUDIO:
2866 has_audio = 1;
2867 break;
2868 case CODEC_TYPE_VIDEO:
2869 has_video = 1;
2870 break;
2871 case CODEC_TYPE_DATA:
2872 break;
2873 default:
2874 av_abort();
2875 }
2876 }
2877 }
2878 *has_video_ptr = has_video;
2879 *has_audio_ptr = has_audio;
2880 }
2881
2882 static void opt_output_file(const char *filename)
2883 {
2884 AVStream *st;
2885 AVFormatContext *oc;
2886 int use_video, use_audio, nb_streams, input_has_video, input_has_audio;
2887 int codec_id;
2888 AVFormatParameters params, *ap = &params;
2889
2890 if (!strcmp(filename, "-"))
2891 filename = "pipe:";
2892
2893 oc = av_alloc_format_context();
2894
2895 if (!file_oformat) {
2896 file_oformat = guess_format(NULL, filename, NULL);
2897 if (!file_oformat) {
2898 fprintf(stderr, "Unable for find a suitable output format for '%s'\n",
2899 filename);
2900 exit(1);
2901 }
2902 }
2903
2904 oc->oformat = file_oformat;
2905
2906 if (!strcmp(file_oformat->name, "ffm") &&
2907 strstart(filename, "http:", NULL)) {
2908 /* special case for files sent to ffserver: we get the stream
2909 parameters from ffserver */
2910 if (read_ffserver_streams(oc, filename) < 0) {
2911 fprintf(stderr, "Could not read stream parameters from '%s'\n", filename);
2912 exit(1);
2913 }
2914 } else {
2915 use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy;
2916 use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy;
2917
2918 /* disable if no corresponding type found and at least one
2919 input file */
2920 if (nb_input_files > 0) {
2921 check_audio_video_inputs(&input_has_video, &input_has_audio);
2922 if (!input_has_video)
2923 use_video = 0;
2924 if (!input_has_audio)
2925 use_audio = 0;
2926 }
2927
2928 /* manual disable */
2929 if (audio_disable) {
2930 use_audio = 0;
2931 }
2932 if (video_disable) {
2933 use_video = 0;
2934 }
2935
2936 nb_streams = 0;
2937 if (use_video) {
2938 AVCodecContext *video_enc;
2939
2940 st = av_new_stream(oc, nb_streams++);
2941 if (!st) {
2942 fprintf(stderr, "Could not alloc stream\n");
2943 exit(1);
2944 }
2945 #if defined(HAVE_PTHREADS) || defined(HAVE_W32THREADS)
2946 if(thread_count>1)
2947 avcodec_thread_init(&st->codec, thread_count);
2948 #endif
2949
2950 video_enc = &st->codec;
2951
2952 if(!strcmp(file_oformat->name, "mp4") || !strcmp(file_oformat->name, "mov") || !strcmp(file_oformat->name, "3gp"))
2953 video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
2954 if (video_stream_copy) {
2955 st->stream_copy = 1;
2956 video_enc->codec_type = CODEC_TYPE_VIDEO;
2957 } else {
2958 char *p;
2959 int i;
2960 AVCodec *codec;
2961
2962 codec_id = file_oformat->video_codec;
2963 if (video_codec_id != CODEC_ID_NONE)
2964 codec_id = video_codec_id;
2965
2966 video_enc->codec_id = codec_id;
2967 codec = avcodec_find_encoder(codec_id);
2968
2969 video_enc->bit_rate = video_bit_rate;
2970 video_enc->bit_rate_tolerance = video_bit_rate_tolerance;
2971 video_enc->frame_rate = frame_rate;
2972 video_enc->frame_rate_base = frame_rate_base;
2973 if(codec && codec->supported_framerates){
2974 const AVRational *p= codec->supported_framerates;
2975 AVRational req= (AVRational){frame_rate, frame_rate_base};
2976 const AVRational *best=NULL;
2977 AVRational best_error= (AVRational){INT_MAX, 1};
2978 for(; p->den!=0; p++){
2979 AVRational error= av_sub_q(req, *p);
2980 if(error.num <0) error.num *= -1;
2981 if(av_cmp_q(error, best_error) < 0){
2982 best_error= error;
2983 best= p;
2984 }
2985 }
2986 video_enc->frame_rate = best->num;
2987 video_enc->frame_rate_base= best->den;
2988 }
2989
2990 video_enc->width = frame_width + frame_padright + frame_padleft;
2991 video_enc->height = frame_height + frame_padtop + frame_padbottom;
2992 video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
2993 video_enc->pix_fmt = frame_pix_fmt;
2994
2995 if(codec && codec->pix_fmts){
2996 const enum PixelFormat *p= codec->pix_fmts;
2997 for(; *p!=-1; p++){
2998 if(*p == video_enc->pix_fmt)
2999 break;
3000 }
3001 if(*p == -1)
3002 video_enc->pix_fmt = codec->pix_fmts[0];
3003 }
3004
3005 if (!intra_only)
3006 video_enc->gop_size = gop_size;
3007 else
3008 video_enc->gop_size = 0;
3009 if (video_qscale || same_quality) {
3010 video_enc->flags |= CODEC_FLAG_QSCALE;
3011 st->quality = FF_QP2LAMBDA * video_qscale;
3012 }
3013
3014 if(intra_matrix)
3015 video_enc->intra_matrix = intra_matrix;
3016 if(inter_matrix)
3017 video_enc->inter_matrix = inter_matrix;
3018
3019 if(bitexact)
3020 video_enc->flags |= CODEC_FLAG_BITEXACT;
3021
3022 video_enc->mb_decision = mb_decision;
3023 video_enc->mb_cmp = mb_cmp;
3024 video_enc->ildct_cmp = ildct_cmp;
3025 video_enc->me_sub_cmp = sub_cmp;
3026 video_enc->me_cmp = cmp;
3027 video_enc->me_pre_cmp = pre_cmp;
3028 video_enc->pre_me = pre_me;
3029 video_enc->lumi_masking = lumi_mask;
3030 video_enc->dark_masking = dark_mask;
3031 video_enc->spatial_cplx_masking = scplx_mask;
3032 video_enc->temporal_cplx_masking = tcplx_mask;
3033 video_enc->p_masking = p_mask;
3034 video_enc->quantizer_noise_shaping= qns;
3035
3036 if (use_umv) {
3037 video_enc->flags |= CODEC_FLAG_H263P_UMV;
3038 }
3039 if (use_ss) {
3040 video_enc->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
3041 }
3042 if (use_aic) {
3043 video_enc->flags |= CODEC_FLAG_H263P_AIC;
3044 }
3045 if (use_aiv) {
3046 video_enc->flags |= CODEC_FLAG_H263P_AIV;
3047 }
3048 if (use_4mv) {
3049 video_enc->flags |= CODEC_FLAG_4MV;
3050 }
3051 if (use_obmc) {
3052 video_enc->flags |= CODEC_FLAG_OBMC;
3053 }
3054 if (use_loop) {
3055 video_enc->flags |= CODEC_FLAG_LOOP_FILTER;
3056 }
3057
3058 if(use_part) {
3059 video_enc->flags |= CODEC_FLAG_PART;
3060 }
3061 if (use_alt_scan) {
3062 video_enc->flags |= CODEC_FLAG_ALT_SCAN;
3063 }
3064 if (use_trell) {
3065 video_enc->flags |= CODEC_FLAG_TRELLIS_QUANT;
3066 }
3067 if (use_scan_offset) {
3068 video_enc->flags |= CODEC_FLAG_SVCD_SCAN_OFFSET;
3069 }
3070 if (closed_gop) {
3071 video_enc->flags |= CODEC_FLAG_CLOSED_GOP;
3072 }
3073 if (use_qpel) {
3074 video_enc->flags |= CODEC_FLAG_QPEL;
3075 }
3076 if (use_qprd) {
3077 video_enc->flags |= CODEC_FLAG_QP_RD;
3078 }
3079 if (use_cbprd) {
3080 video_enc->flags |= CODEC_FLAG_CBP_RD;
3081 }
3082 if (b_frames) {
3083 video_enc->max_b_frames = b_frames;
3084 video_enc->b_frame_strategy = 0;
3085 video_enc->b_quant_factor = 2.0;
3086 }
3087 if (do_interlace_dct) {
3088 video_enc->flags |= CODEC_FLAG_INTERLACED_DCT;
3089 }
3090 if (do_interlace_me) {
3091 video_enc->flags |= CODEC_FLAG_INTERLACED_ME;
3092 }
3093 video_enc->qmin = video_qmin;
3094 video_enc->qmax = video_qmax;
3095 video_enc->lmin = video_lmin;
3096 video_enc->lmax = video_lmax;
3097 video_enc->mb_qmin = video_mb_qmin;
3098 video_enc->mb_qmax = video_mb_qmax;
3099 video_enc->max_qdiff = video_qdiff;
3100 video_enc->qblur = video_qblur;
3101 video_enc->qcompress = video_qcomp;
3102 video_enc->rc_eq = video_rc_eq;
3103 video_enc->debug = debug;
3104 video_enc->debug_mv = debug_mv;
3105 video_enc->thread_count = thread_count;
3106 p= video_rc_override_string;
3107 for(i=0; p; i++){
3108 int start, end, q;
3109 int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
3110 if(e!=3){
3111 fprintf(stderr, "error parsing rc_override\n");
3112 exit(1);
3113 }
3114 video_enc->rc_override=
3115 av_realloc(video_enc->rc_override,
3116 sizeof(RcOverride)*(i+1));
3117 video_enc->rc_override[i].start_frame= start;
3118 video_enc->rc_override[i].end_frame = end;
3119 if(q>0){
3120 video_enc->rc_override[i].qscale= q;
3121 video_enc->rc_override[i].quality_factor= 1.0;
3122 }
3123 else{
3124 video_enc->rc_override[i].qscale= 0;
3125 video_enc->rc_override[i].quality_factor= -q/100.0;
3126 }
3127 p= strchr(p, '/');
3128 if(p) p++;
3129 }
3130 video_enc->rc_override_count=i;
3131
3132 video_enc->rc_max_rate = video_rc_max_rate;
3133 video_enc->rc_min_rate = video_rc_min_rate;
3134 video_enc->rc_buffer_size = video_rc_buffer_size;
3135 video_enc->rc_buffer_aggressivity= video_rc_buffer_aggressivity;
3136 video_enc->rc_initial_cplx= video_rc_initial_cplx;
3137 video_enc->i_quant_factor = video_i_qfactor;
3138 video_enc->b_quant_factor = video_b_qfactor;
3139 video_enc->i_quant_offset = video_i_qoffset;
3140 video_enc->b_quant_offset = video_b_qoffset;
3141 video_enc->intra_quant_bias = video_intra_quant_bias;
3142 video_enc->inter_quant_bias = video_inter_quant_bias;
3143 video_enc->dct_algo = dct_algo;
3144 video_enc->idct_algo = idct_algo;
3145 video_enc->me_threshold= me_threshold;
3146 video_enc->mb_threshold= mb_threshold;
3147 video_enc->intra_dc_precision= intra_dc_precision - 8;
3148 video_enc->strict_std_compliance = strict;
3149 video_enc->error_rate = error_rate;
3150 video_enc->noise_reduction= noise_reduction;
3151 video_enc->scenechange_threshold= sc_threshold;
3152 video_enc->me_range = me_range;
3153 video_enc->coder_type= coder;
3154 video_enc->context_model= context;
3155 video_enc->prediction_method= predictor;
3156 video_enc->profile= video_profile;
3157 video_enc->level= video_level;
3158 video_enc->nsse_weight= nsse_weight;
3159
3160 if(packet_size){
3161 video_enc->rtp_mode= 1;
3162 video_enc->rtp_payload_size= packet_size;
3163 }
3164
3165 if (do_psnr)
3166 video_enc->flags|= CODEC_FLAG_PSNR;
3167
3168 video_enc->me_method = me_method;
3169
3170 /* two pass mode */
3171 if (do_pass) {
3172 if (do_pass == 1) {
3173 video_enc->flags |= CODEC_FLAG_PASS1;
3174 } else {
3175 video_enc->flags |= CODEC_FLAG_PASS2;
3176 }
3177 }
3178 }
3179 }
3180
3181 if (use_audio) {
3182 AVCodecContext *audio_enc;
3183
3184 st = av_new_stream(oc, nb_streams++);
3185 if (!st) {
3186 fprintf(stderr, "Could not alloc stream\n");
3187 exit(1);
3188 }
3189 #if defined(HAVE_PTHREADS) || defined(HAVE_W32THREADS)
3190 if(thread_count>1)
3191 avcodec_thread_init(&st->codec, thread_count);
3192 #endif
3193
3194 audio_enc = &st->codec;
3195 audio_enc->codec_type = CODEC_TYPE_AUDIO;
3196
3197 if(!strcmp(file_oformat->name, "mp4") || !strcmp(file_oformat->name, "mov") || !strcmp(file_oformat->name, "3gp"))
3198 audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3199 if (audio_stream_copy) {
3200 st->stream_copy = 1;
3201 audio_enc->channels = audio_channels;
3202 } else {
3203 codec_id = file_oformat->audio_codec;
3204 if (audio_codec_id != CODEC_ID_NONE)
3205 codec_id = audio_codec_id;
3206 audio_enc->codec_id = codec_id;
3207
3208 audio_enc->bit_rate = audio_bit_rate;
3209 audio_enc->strict_std_compliance = strict;
3210 audio_enc->thread_count = thread_count;
3211 /* For audio codecs other than AC3 or DTS we limit */
3212 /* the number of coded channels to stereo */
3213 if (audio_channels > 2 && codec_id != CODEC_ID_AC3
3214 && codec_id != CODEC_ID_DTS) {
3215 audio_enc->channels = 2;
3216 } else
3217 audio_enc->channels = audio_channels;
3218 }
3219 audio_enc->sample_rate = audio_sample_rate;
3220 }
3221
3222 oc->nb_streams = nb_streams;
3223
3224 if (!nb_streams) {
3225 fprintf(stderr, "No audio or video streams available\n");
3226 exit(1);
3227 }
3228
3229 oc->timestamp = rec_timestamp;
3230
3231 if (str_title)
3232 pstrcpy(oc->title, sizeof(oc->title), str_title);
3233 if (str_author)
3234 pstrcpy(oc->author, sizeof(oc->author), str_author);
3235 if (str_copyright)
3236 pstrcpy(oc->copyright, sizeof(oc->copyright), str_copyright);
3237 if (str_comment)
3238 pstrcpy(oc->comment, sizeof(oc->comment), str_comment);
3239 }
3240
3241 output_files[nb_output_files++] = oc;
3242
3243 strcpy(oc->filename, filename);
3244
3245 /* check filename in case of an image number is expected */
3246 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
3247 if (filename_number_test(oc->filename) < 0) {
3248 print_error(oc->filename, AVERROR_NUMEXPECTED);
3249 exit(1);
3250 }
3251 }
3252
3253 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
3254 /* test if it already exists to avoid loosing precious files */
3255 if (!file_overwrite &&
3256 (strchr(filename, ':') == NULL ||
3257 strstart(filename, "file:", NULL))) {
3258 if (url_exist(filename)) {
3259 int c;
3260
3261 if ( !using_stdin ) {
3262 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3263 fflush(stderr);
3264 c = getchar();
3265 if (toupper(c) != 'Y') {
3266 fprintf(stderr, "Not overwriting - exiting\n");
3267 exit(1);
3268 }
3269 }
3270 else {
3271 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3272 exit(1);
3273 }
3274 }
3275 }
3276
3277 /* open the file */
3278 if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
3279 fprintf(stderr, "Could not open '%s'\n", filename);
3280 exit(1);
3281 }
3282 }
3283
3284 memset(ap, 0, sizeof(*ap));
3285 ap->image_format = image_format;
3286 if (av_set_parameters(oc, ap) < 0) {
3287 fprintf(stderr, "%s: Invalid encoding parameters\n",
3288 oc->filename);
3289 exit(1);
3290 }
3291
3292 /* reset some options */
3293 file_oformat = NULL;
3294 file_iformat = NULL;
3295 image_format = NULL;
3296 audio_disable = 0;
3297 video_disable = 0;
3298 audio_codec_id = CODEC_ID_NONE;
3299 video_codec_id = CODEC_ID_NONE;
3300 audio_stream_copy = 0;
3301 video_stream_copy = 0;
3302 }
3303
3304 /* prepare dummy protocols for grab */
3305 static void prepare_grab(void)
3306 {
3307 int has_video, has_audio, i, j;
3308 AVFormatContext *oc;
3309 AVFormatContext *ic;
3310 AVFormatParameters vp1, *vp = &vp1;
3311 AVFormatParameters ap1, *ap = &ap1;
3312
3313 /* see if audio/video inputs are needed */
3314 has_video = 0;
3315 has_audio = 0;
3316 memset(ap, 0, sizeof(*ap));
3317 memset(vp, 0, sizeof(*vp