4909e50a06fc21ac4cc209ea572870acf3e94fb8
[libav.git] / ffmpeg.c
1 /*
2 * FFmpeg main
3 * Copyright (c) 2000-2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include <limits.h>
21 #include "avformat.h"
22 #include "framehook.h"
23 #include "dsputil.h"
24
25 #ifndef CONFIG_WIN32
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/time.h>
30 #include <termios.h>
31 #include <sys/resource.h>
32 #include <signal.h>
33 #endif
34 #ifdef CONFIG_OS2
35 #include <sys/types.h>
36 #include <sys/select.h>
37 #include <stdlib.h>
38 #endif
39 #undef time //needed because HAVE_AV_CONFIG_H is defined on top
40 #include <time.h>
41
42 #include "cmdutils.h"
43
44 #undef NDEBUG
45 #include <assert.h>
46
47 #if !defined(INFINITY) && defined(HUGE_VAL)
48 #define INFINITY HUGE_VAL
49 #endif
50
51 /* select an input stream for an output stream */
52 typedef struct AVStreamMap {
53 int file_index;
54 int stream_index;
55 } AVStreamMap;
56
57 /** select an input file for an output file */
58 typedef struct AVMetaDataMap {
59 int out_file;
60 int in_file;
61 } AVMetaDataMap;
62
63 extern const OptionDef options[];
64
65 static void show_help(void);
66 static void show_license(void);
67
68 #define MAX_FILES 20
69
70 static AVFormatContext *input_files[MAX_FILES];
71 static int64_t input_files_ts_offset[MAX_FILES];
72 static int nb_input_files = 0;
73
74 static AVFormatContext *output_files[MAX_FILES];
75 static int nb_output_files = 0;
76
77 static AVStreamMap stream_maps[MAX_FILES];
78 static int nb_stream_maps;
79
80 static AVMetaDataMap meta_data_maps[MAX_FILES];
81 static int nb_meta_data_maps;
82
83 static AVInputFormat *file_iformat;
84 static AVOutputFormat *file_oformat;
85 static AVImageFormat *image_format;
86 static int frame_width = 0;
87 static int frame_height = 0;
88 static float frame_aspect_ratio = 0;
89 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
90 static int frame_padtop = 0;
91 static int frame_padbottom = 0;
92 static int frame_padleft = 0;
93 static int frame_padright = 0;
94 static int padcolor[3] = {16,128,128}; /* default to black */
95 static int frame_topBand = 0;
96 static int frame_bottomBand = 0;
97 static int frame_leftBand = 0;
98 static int frame_rightBand = 0;
99 static int max_frames[3] = {INT_MAX, INT_MAX, INT_MAX};
100 static int frame_rate = 25;
101 static int frame_rate_base = 1;
102 static int video_bit_rate = 200*1000;
103 static int video_bit_rate_tolerance = 4000*1000;
104 static float video_qscale = 0;
105 static int video_qmin = 2;
106 static int video_qmax = 31;
107 static int video_lmin = 2*FF_QP2LAMBDA;
108 static int video_lmax = 31*FF_QP2LAMBDA;
109 static int video_mb_lmin = 2*FF_QP2LAMBDA;
110 static int video_mb_lmax = 31*FF_QP2LAMBDA;
111 static int video_qdiff = 3;
112 static int video_lelim = 0;
113 static int video_celim = 0;
114 static float video_qblur = 0.5;
115 static float video_qsquish = 0.0;
116 static float video_qcomp = 0.5;
117 static uint16_t *intra_matrix = NULL;
118 static uint16_t *inter_matrix = NULL;
119 #if 0 //experimental, (can be removed)
120 static float video_rc_qsquish=1.0;
121 static float video_rc_qmod_amp=0;
122 static int video_rc_qmod_freq=0;
123 #endif
124 static char *video_rc_override_string=NULL;
125 static char *video_rc_eq="tex^qComp";
126 static int video_rc_buffer_size=0;
127 static float video_rc_buffer_aggressivity=1.0;
128 static int video_rc_max_rate=0;
129 static int video_rc_min_rate=0;
130 static float video_rc_initial_cplx=0;
131 static float video_b_qfactor = 1.25;
132 static float video_b_qoffset = 1.25;
133 static float video_i_qfactor = -0.8;
134 static float video_i_qoffset = 0.0;
135 static int video_intra_quant_bias= FF_DEFAULT_QUANT_BIAS;
136 static int video_inter_quant_bias= FF_DEFAULT_QUANT_BIAS;
137 static int me_method = ME_EPZS;
138 static int video_disable = 0;
139 static int video_discard = 0;
140 static int video_codec_id = CODEC_ID_NONE;
141 static int video_codec_tag = 0;
142 static int same_quality = 0;
143 static int b_frames = 0;
144 static int b_strategy = 0;
145 static int mb_decision = FF_MB_DECISION_SIMPLE;
146 static int ildct_cmp = FF_CMP_VSAD;
147 static int mb_cmp = FF_CMP_SAD;
148 static int sub_cmp = FF_CMP_SAD;
149 static int cmp = FF_CMP_SAD;
150 static int pre_cmp = FF_CMP_SAD;
151 static int pre_me = 0;
152 static float lumi_mask = 0;
153 static float dark_mask = 0;
154 static float scplx_mask = 0;
155 static float tcplx_mask = 0;
156 static float p_mask = 0;
157 static int use_4mv = 0;
158 static int use_obmc = 0;
159 static int use_loop = 0;
160 static int use_aic = 0;
161 static int use_aiv = 0;
162 static int use_umv = 0;
163 static int use_ss = 0;
164 static int use_alt_scan = 0;
165 static int use_trell = 0;
166 static int use_scan_offset = 0;
167 static int use_qpel = 0;
168 static int use_qprd = 0;
169 static int use_cbprd = 0;
170 static int use_mv0 = 0;
171 static int do_normalize_aqp = 0;
172 static int qns = 0;
173 static int closed_gop = 0;
174 static int strict_gop = 0;
175 static int no_output = 0;
176 static int do_deinterlace = 0;
177 static int do_interlace_dct = 0;
178 static int do_interlace_me = 0;
179 static int workaround_bugs = FF_BUG_AUTODETECT;
180 static int error_resilience = 2;
181 static int error_concealment = 3;
182 static int dct_algo = 0;
183 static int idct_algo = 0;
184 static int use_part = 0;
185 static int packet_size = 0;
186 static int error_rate = 0;
187 static int strict = 0;
188 static int top_field_first = -1;
189 static int noise_reduction = 0;
190 static int sc_threshold = 0;
191 static int debug = 0;
192 static int debug_mv = 0;
193 static int me_threshold = 0;
194 static int mb_threshold = 0;
195 static int intra_dc_precision = 8;
196 static int coder = 0;
197 static int context = 0;
198 static int predictor = 0;
199 static int video_profile = FF_PROFILE_UNKNOWN;
200 static int video_level = FF_LEVEL_UNKNOWN;
201 static int nsse_weight = 8;
202 static int subpel_quality= 8;
203 static int me_penalty_compensation= 256;
204 static int lowres= 0;
205 static int frame_skip_threshold= 0;
206 static int frame_skip_factor= 0;
207 static int frame_skip_exp= 0;
208 static int frame_skip_cmp= FF_CMP_DCTMAX;
209 extern int loop_input; /* currently a hack */
210
211 static int gop_size = 12;
212 static int intra_only = 0;
213 static int audio_sample_rate = 44100;
214 static int audio_bit_rate = 64000;
215 static int audio_disable = 0;
216 static int audio_channels = 1;
217 static int audio_codec_id = CODEC_ID_NONE;
218 static int audio_codec_tag = 0;
219
220 static int mux_rate= 0;
221 static int mux_packet_size= 0;
222 static float mux_preload= 0.5;
223 static float mux_max_delay= 0.7;
224
225 static int64_t recording_time = 0;
226 static int64_t start_time = 0;
227 static int64_t rec_timestamp = 0;
228 static int64_t input_ts_offset = 0;
229 static int file_overwrite = 0;
230 static char *str_title = NULL;
231 static char *str_author = NULL;
232 static char *str_copyright = NULL;
233 static char *str_comment = NULL;
234 static int do_benchmark = 0;
235 static int do_hex_dump = 0;
236 static int do_pkt_dump = 0;
237 static int do_psnr = 0;
238 static int do_vstats = 0;
239 static int do_pass = 0;
240 static int bitexact = 0;
241 static char *pass_logfilename = NULL;
242 static int audio_stream_copy = 0;
243 static int video_stream_copy = 0;
244 static int video_sync_method= 1;
245 static int audio_sync_method= 0;
246 static int copy_ts= 0;
247 static int opt_shortest = 0; //
248
249 static int rate_emu = 0;
250
251 static char *video_grab_format = "video4linux";
252 static char *video_device = NULL;
253 static char *grab_device = NULL;
254 static int video_channel = 0;
255 static char *video_standard = "ntsc";
256
257 static char *audio_grab_format = "audio_device";
258 static char *audio_device = NULL;
259 static int audio_volume = 256;
260
261 static int using_stdin = 0;
262 static int using_vhook = 0;
263 static int verbose = 1;
264 static int thread_count= 1;
265 static int q_pressed = 0;
266 static int me_range = 0;
267 static int64_t video_size = 0;
268 static int64_t audio_size = 0;
269 static int64_t extra_size = 0;
270 static int nb_frames_dup = 0;
271 static int nb_frames_drop = 0;
272 static int input_sync;
273 static int limit_filesize = 0; //
274
275 static int pgmyuv_compatibility_hack=0;
276
277
278 #define DEFAULT_PASS_LOGFILENAME "ffmpeg2pass"
279
280 typedef struct AVOutputStream {
281 int file_index; /* file index */
282 int index; /* stream index in the output file */
283 int source_index; /* AVInputStream index */
284 AVStream *st; /* stream in the output file */
285 int encoding_needed; /* true if encoding needed for this stream */
286 int frame_number;
287 /* input pts and corresponding output pts
288 for A/V sync */
289 double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
290 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
291 /* video only */
292 int video_resample; /* video_resample and video_crop are mutually exclusive */
293 AVFrame pict_tmp; /* temporary image for resampling */
294 ImgReSampleContext *img_resample_ctx; /* for image resampling */
295
296 int video_crop; /* video_resample and video_crop are mutually exclusive */
297 int topBand; /* cropping area sizes */
298 int leftBand;
299
300 int video_pad; /* video_resample and video_pad are mutually exclusive */
301 int padtop; /* padding area sizes */
302 int padbottom;
303 int padleft;
304 int padright;
305
306 /* audio only */
307 int audio_resample;
308 ReSampleContext *resample; /* for audio resampling */
309 FifoBuffer fifo; /* for compression: one audio fifo per codec */
310 FILE *logfile;
311 } AVOutputStream;
312
313 typedef struct AVInputStream {
314 int file_index;
315 int index;
316 AVStream *st;
317 int discard; /* true if stream data should be discarded */
318 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
319 int64_t sample_index; /* current sample */
320
321 int64_t start; /* time when read started */
322 unsigned long frame; /* current frame */
323 int64_t next_pts; /* synthetic pts for cases where pkt.pts
324 is not defined */
325 int64_t pts; /* current pts */
326 int is_start; /* is 1 at the start and after a discontinuity */
327 } AVInputStream;
328
329 typedef struct AVInputFile {
330 int eof_reached; /* true if eof reached */
331 int ist_index; /* index of first stream in ist_table */
332 int buffer_size; /* current total buffer size */
333 int buffer_size_max; /* buffer size at which we consider we can stop
334 buffering */
335 int nb_streams; /* nb streams we are aware of */
336 } AVInputFile;
337
338 #ifndef CONFIG_WIN32
339
340 /* init terminal so that we can grab keys */
341 static struct termios oldtty;
342
343 static void term_exit(void)
344 {
345 tcsetattr (0, TCSANOW, &oldtty);
346 }
347
348 static volatile sig_atomic_t received_sigterm = 0;
349
350 static void
351 sigterm_handler(int sig)
352 {
353 received_sigterm = sig;
354 term_exit();
355 }
356
357 static void term_init(void)
358 {
359 struct termios tty;
360
361 tcgetattr (0, &tty);
362 oldtty = tty;
363
364 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
365 |INLCR|IGNCR|ICRNL|IXON);
366 tty.c_oflag |= OPOST;
367 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
368 tty.c_cflag &= ~(CSIZE|PARENB);
369 tty.c_cflag |= CS8;
370 tty.c_cc[VMIN] = 1;
371 tty.c_cc[VTIME] = 0;
372
373 tcsetattr (0, TCSANOW, &tty);
374
375 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
376 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
377 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
378 /*
379 register a function to be called at normal program termination
380 */
381 atexit(term_exit);
382 #ifdef CONFIG_BEOS_NETSERVER
383 fcntl(0, F_SETFL, fcntl(0, F_GETFL) | O_NONBLOCK);
384 #endif
385 }
386
387 /* read a key without blocking */
388 static int read_key(void)
389 {
390 int n = 1;
391 unsigned char ch;
392 #ifndef CONFIG_BEOS_NETSERVER
393 struct timeval tv;
394 fd_set rfds;
395
396 FD_ZERO(&rfds);
397 FD_SET(0, &rfds);
398 tv.tv_sec = 0;
399 tv.tv_usec = 0;
400 n = select(1, &rfds, NULL, NULL, &tv);
401 #endif
402 if (n > 0) {
403 n = read(0, &ch, 1);
404 if (n == 1)
405 return ch;
406
407 return n;
408 }
409 return -1;
410 }
411
412 static int decode_interrupt_cb(void)
413 {
414 return q_pressed || (q_pressed = read_key() == 'q');
415 }
416
417 #else
418
419 static volatile int received_sigterm = 0;
420
421 /* no interactive support */
422 static void term_exit(void)
423 {
424 }
425
426 static void term_init(void)
427 {
428 }
429
430 static int read_key(void)
431 {
432 return 0;
433 }
434
435 #endif
436
437 static int read_ffserver_streams(AVFormatContext *s, const char *filename)
438 {
439 int i, err;
440 AVFormatContext *ic;
441
442 err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL);
443 if (err < 0)
444 return err;
445 /* copy stream format */
446 s->nb_streams = ic->nb_streams;
447 for(i=0;i<ic->nb_streams;i++) {
448 AVStream *st;
449
450 st = av_mallocz(sizeof(AVStream));
451 memcpy(st, ic->streams[i], sizeof(AVStream));
452 s->streams[i] = st;
453 }
454
455 av_close_input_file(ic);
456 return 0;
457 }
458
459 #define MAX_AUDIO_PACKET_SIZE (128 * 1024)
460
461 static void do_audio_out(AVFormatContext *s,
462 AVOutputStream *ost,
463 AVInputStream *ist,
464 unsigned char *buf, int size)
465 {
466 uint8_t *buftmp;
467 static uint8_t *audio_buf = NULL;
468 static uint8_t *audio_out = NULL;
469 const int audio_out_size= 4*MAX_AUDIO_PACKET_SIZE;
470
471 int size_out, frame_bytes, ret;
472 AVCodecContext *enc= &ost->st->codec;
473
474 /* SC: dynamic allocation of buffers */
475 if (!audio_buf)
476 audio_buf = av_malloc(2*MAX_AUDIO_PACKET_SIZE);
477 if (!audio_out)
478 audio_out = av_malloc(audio_out_size);
479 if (!audio_buf || !audio_out)
480 return; /* Should signal an error ! */
481
482 if(audio_sync_method){
483 double delta = ost->sync_ipts * enc->sample_rate - ost->sync_opts
484 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2);
485 double idelta= delta*ist->st->codec.sample_rate / enc->sample_rate;
486 int byte_delta= ((int)idelta)*2*ist->st->codec.channels;
487
488 //FIXME resample delay
489 if(fabs(delta) > 50){
490 if(ist->is_start){
491 if(byte_delta < 0){
492 byte_delta= FFMAX(byte_delta, -size);
493 size += byte_delta;
494 buf -= byte_delta;
495 if(verbose > 2)
496 fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
497 if(!size)
498 return;
499 ist->is_start=0;
500 }else{
501 static uint8_t *input_tmp= NULL;
502 input_tmp= av_realloc(input_tmp, byte_delta + size);
503
504 if(byte_delta + size <= MAX_AUDIO_PACKET_SIZE)
505 ist->is_start=0;
506 else
507 byte_delta= MAX_AUDIO_PACKET_SIZE - size;
508
509 memset(input_tmp, 0, byte_delta);
510 memcpy(input_tmp + byte_delta, buf, size);
511 buf= input_tmp;
512 size += byte_delta;
513 if(verbose > 2)
514 fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
515 }
516 }else if(audio_sync_method>1){
517 int comp= clip(delta, -audio_sync_method, audio_sync_method);
518 assert(ost->audio_resample);
519 if(verbose > 2)
520 fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
521 // fprintf(stderr, "drift:%f len:%d opts:%lld ipts:%lld fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(ost->sync_ipts * enc->sample_rate), fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2));
522 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
523 }
524 }
525 }else
526 ost->sync_opts= lrintf(ost->sync_ipts * enc->sample_rate)
527 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2); //FIXME wrong
528
529 if (ost->audio_resample) {
530 buftmp = audio_buf;
531 size_out = audio_resample(ost->resample,
532 (short *)buftmp, (short *)buf,
533 size / (ist->st->codec.channels * 2));
534 size_out = size_out * enc->channels * 2;
535 } else {
536 buftmp = buf;
537 size_out = size;
538 }
539
540 /* now encode as many frames as possible */
541 if (enc->frame_size > 1) {
542 /* output resampled raw samples */
543 fifo_write(&ost->fifo, buftmp, size_out,
544 &ost->fifo.wptr);
545
546 frame_bytes = enc->frame_size * 2 * enc->channels;
547
548 while (fifo_read(&ost->fifo, audio_buf, frame_bytes,
549 &ost->fifo.rptr) == 0) {
550 AVPacket pkt;
551 av_init_packet(&pkt);
552
553 ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
554 (short *)audio_buf);
555 audio_size += ret;
556 pkt.stream_index= ost->index;
557 pkt.data= audio_out;
558 pkt.size= ret;
559 if(enc->coded_frame)
560 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
561 pkt.flags |= PKT_FLAG_KEY;
562 av_interleaved_write_frame(s, &pkt);
563
564 ost->sync_opts += enc->frame_size;
565 }
566 } else {
567 AVPacket pkt;
568 av_init_packet(&pkt);
569
570 ost->sync_opts += size_out / (2 * enc->channels);
571
572 /* output a pcm frame */
573 /* XXX: change encoding codec API to avoid this ? */
574 switch(enc->codec->id) {
575 case CODEC_ID_PCM_S16LE:
576 case CODEC_ID_PCM_S16BE:
577 case CODEC_ID_PCM_U16LE:
578 case CODEC_ID_PCM_U16BE:
579 break;
580 default:
581 size_out = size_out >> 1;
582 break;
583 }
584 ret = avcodec_encode_audio(enc, audio_out, size_out,
585 (short *)buftmp);
586 audio_size += ret;
587 pkt.stream_index= ost->index;
588 pkt.data= audio_out;
589 pkt.size= ret;
590 if(enc->coded_frame)
591 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
592 pkt.flags |= PKT_FLAG_KEY;
593 av_interleaved_write_frame(s, &pkt);
594 }
595 }
596
597 static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void **bufp)
598 {
599 AVCodecContext *dec;
600 AVPicture *picture2;
601 AVPicture picture_tmp;
602 uint8_t *buf = 0;
603
604 dec = &ist->st->codec;
605
606 /* deinterlace : must be done before any resize */
607 if (do_deinterlace || using_vhook) {
608 int size;
609
610 /* create temporary picture */
611 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
612 buf = av_malloc(size);
613 if (!buf)
614 return;
615
616 picture2 = &picture_tmp;
617 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
618
619 if (do_deinterlace){
620 if(avpicture_deinterlace(picture2, picture,
621 dec->pix_fmt, dec->width, dec->height) < 0) {
622 /* if error, do not deinterlace */
623 av_free(buf);
624 buf = NULL;
625 picture2 = picture;
626 }
627 } else {
628 if (img_convert(picture2, dec->pix_fmt, picture,
629 dec->pix_fmt, dec->width, dec->height) < 0) {
630 /* if error, do not copy */
631 av_free(buf);
632 buf = NULL;
633 picture2 = picture;
634 }
635 }
636 } else {
637 picture2 = picture;
638 }
639
640 frame_hook_process(picture2, dec->pix_fmt, dec->width, dec->height);
641
642 if (picture != picture2)
643 *picture = *picture2;
644 *bufp = buf;
645 }
646
647 /* we begin to correct av delay at this threshold */
648 #define AV_DELAY_MAX 0.100
649
650
651 /* Expects img to be yuv420 */
652 static void fill_pad_region(AVPicture* img, int height, int width,
653 int padtop, int padbottom, int padleft, int padright, int *color) {
654
655 int i, y, shift;
656 uint8_t *optr;
657
658 for (i = 0; i < 3; i++) {
659 shift = (i == 0) ? 0 : 1;
660
661 if (padtop || padleft) {
662 memset(img->data[i], color[i], (((img->linesize[i] * padtop) +
663 padleft) >> shift));
664 }
665
666 if (padleft || padright) {
667 optr = img->data[i] + (img->linesize[i] * (padtop >> shift)) +
668 (img->linesize[i] - (padright >> shift));
669
670 for (y = 0; y < ((height - (padtop + padbottom) - 1) >> shift); y++) {
671 memset(optr, color[i], (padleft + padright) >> shift);
672 optr += img->linesize[i];
673 }
674 }
675
676 if (padbottom || padright) {
677 optr = img->data[i] + (((img->linesize[i] * (height - padbottom)) - padright) >> shift);
678 memset(optr, color[i], (((img->linesize[i] * padbottom) + padright) >> shift));
679 }
680 }
681 }
682
683 static int bit_buffer_size= 1024*256;
684 static uint8_t *bit_buffer= NULL;
685
686 static void do_video_out(AVFormatContext *s,
687 AVOutputStream *ost,
688 AVInputStream *ist,
689 AVFrame *in_picture,
690 int *frame_size)
691 {
692 int nb_frames, i, ret;
693 AVFrame *final_picture, *formatted_picture;
694 AVFrame picture_format_temp, picture_crop_temp;
695 uint8_t *buf = NULL, *buf1 = NULL;
696 AVCodecContext *enc, *dec;
697 enum PixelFormat target_pixfmt;
698
699 avcodec_get_frame_defaults(&picture_format_temp);
700 avcodec_get_frame_defaults(&picture_crop_temp);
701
702 enc = &ost->st->codec;
703 dec = &ist->st->codec;
704
705 /* by default, we output a single frame */
706 nb_frames = 1;
707
708 *frame_size = 0;
709
710 if(video_sync_method){
711 double vdelta;
712 vdelta = ost->sync_ipts / av_q2d(enc->time_base) - ost->sync_opts;
713 //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
714 if (vdelta < -1.1)
715 nb_frames = 0;
716 else if (vdelta > 1.1)
717 nb_frames = lrintf(vdelta);
718 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%lld, ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, ost->sync_ipts, nb_frames);
719 if (nb_frames == 0){
720 ++nb_frames_drop;
721 if (verbose>2)
722 fprintf(stderr, "*** drop!\n");
723 }else if (nb_frames > 1) {
724 nb_frames_dup += nb_frames;
725 if (verbose>2)
726 fprintf(stderr, "*** %d dup!\n", nb_frames-1);
727 }
728 }else
729 ost->sync_opts= lrintf(ost->sync_ipts / av_q2d(enc->time_base));
730
731 nb_frames= FFMIN(nb_frames, max_frames[CODEC_TYPE_VIDEO] - ost->frame_number);
732 if (nb_frames <= 0)
733 return;
734
735 /* convert pixel format if needed */
736 target_pixfmt = ost->video_resample || ost->video_pad
737 ? PIX_FMT_YUV420P : enc->pix_fmt;
738 if (dec->pix_fmt != target_pixfmt) {
739 int size;
740
741 /* create temporary picture */
742 size = avpicture_get_size(target_pixfmt, dec->width, dec->height);
743 buf = av_malloc(size);
744 if (!buf)
745 return;
746 formatted_picture = &picture_format_temp;
747 avpicture_fill((AVPicture*)formatted_picture, buf, target_pixfmt, dec->width, dec->height);
748
749 if (img_convert((AVPicture*)formatted_picture, target_pixfmt,
750 (AVPicture *)in_picture, dec->pix_fmt,
751 dec->width, dec->height) < 0) {
752
753 if (verbose >= 0)
754 fprintf(stderr, "pixel format conversion not handled\n");
755
756 goto the_end;
757 }
758 } else {
759 formatted_picture = in_picture;
760 }
761
762 /* XXX: resampling could be done before raw format conversion in
763 some cases to go faster */
764 /* XXX: only works for YUV420P */
765 if (ost->video_resample) {
766 final_picture = &ost->pict_tmp;
767 img_resample(ost->img_resample_ctx, (AVPicture*)final_picture, (AVPicture*)formatted_picture);
768
769 if (ost->padtop || ost->padbottom || ost->padleft || ost->padright) {
770 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
771 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
772 padcolor);
773 }
774
775 if (enc->pix_fmt != PIX_FMT_YUV420P) {
776 int size;
777
778 av_free(buf);
779 /* create temporary picture */
780 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
781 buf = av_malloc(size);
782 if (!buf)
783 return;
784 final_picture = &picture_format_temp;
785 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
786
787 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
788 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
789 enc->width, enc->height) < 0) {
790
791 if (verbose >= 0)
792 fprintf(stderr, "pixel format conversion not handled\n");
793
794 goto the_end;
795 }
796 }
797 } else if (ost->video_crop) {
798 picture_crop_temp.data[0] = formatted_picture->data[0] +
799 (ost->topBand * formatted_picture->linesize[0]) + ost->leftBand;
800
801 picture_crop_temp.data[1] = formatted_picture->data[1] +
802 ((ost->topBand >> 1) * formatted_picture->linesize[1]) +
803 (ost->leftBand >> 1);
804
805 picture_crop_temp.data[2] = formatted_picture->data[2] +
806 ((ost->topBand >> 1) * formatted_picture->linesize[2]) +
807 (ost->leftBand >> 1);
808
809 picture_crop_temp.linesize[0] = formatted_picture->linesize[0];
810 picture_crop_temp.linesize[1] = formatted_picture->linesize[1];
811 picture_crop_temp.linesize[2] = formatted_picture->linesize[2];
812 final_picture = &picture_crop_temp;
813 } else if (ost->video_pad) {
814 final_picture = &ost->pict_tmp;
815
816 for (i = 0; i < 3; i++) {
817 uint8_t *optr, *iptr;
818 int shift = (i == 0) ? 0 : 1;
819 int y, yheight;
820
821 /* set offset to start writing image into */
822 optr = final_picture->data[i] + (((final_picture->linesize[i] *
823 ost->padtop) + ost->padleft) >> shift);
824 iptr = formatted_picture->data[i];
825
826 yheight = (enc->height - ost->padtop - ost->padbottom) >> shift;
827 for (y = 0; y < yheight; y++) {
828 /* copy unpadded image row into padded image row */
829 memcpy(optr, iptr, formatted_picture->linesize[i]);
830 optr += final_picture->linesize[i];
831 iptr += formatted_picture->linesize[i];
832 }
833 }
834
835 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
836 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
837 padcolor);
838
839 if (enc->pix_fmt != PIX_FMT_YUV420P) {
840 int size;
841
842 av_free(buf);
843 /* create temporary picture */
844 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
845 buf = av_malloc(size);
846 if (!buf)
847 return;
848 final_picture = &picture_format_temp;
849 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
850
851 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
852 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
853 enc->width, enc->height) < 0) {
854
855 if (verbose >= 0)
856 fprintf(stderr, "pixel format conversion not handled\n");
857
858 goto the_end;
859 }
860 }
861 } else {
862 final_picture = formatted_picture;
863 }
864 /* duplicates frame if needed */
865 for(i=0;i<nb_frames;i++) {
866 AVPacket pkt;
867 av_init_packet(&pkt);
868 pkt.stream_index= ost->index;
869
870 if (s->oformat->flags & AVFMT_RAWPICTURE) {
871 /* raw pictures are written as AVPicture structure to
872 avoid any copies. We support temorarily the older
873 method. */
874 AVFrame* old_frame = enc->coded_frame;
875 enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
876 pkt.data= (uint8_t *)final_picture;
877 pkt.size= sizeof(AVPicture);
878 if(dec->coded_frame)
879 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
880 if(dec->coded_frame && dec->coded_frame->key_frame)
881 pkt.flags |= PKT_FLAG_KEY;
882
883 av_interleaved_write_frame(s, &pkt);
884 enc->coded_frame = old_frame;
885 } else {
886 AVFrame big_picture;
887
888 big_picture= *final_picture;
889 /* better than nothing: use input picture interlaced
890 settings */
891 big_picture.interlaced_frame = in_picture->interlaced_frame;
892 if(do_interlace_me || do_interlace_dct){
893 if(top_field_first == -1)
894 big_picture.top_field_first = in_picture->top_field_first;
895 else
896 big_picture.top_field_first = top_field_first;
897 }
898
899 /* handles sameq here. This is not correct because it may
900 not be a global option */
901 if (same_quality) {
902 big_picture.quality = ist->st->quality;
903 }else
904 big_picture.quality = ost->st->quality;
905 if(!me_threshold)
906 big_picture.pict_type = 0;
907 // big_picture.pts = AV_NOPTS_VALUE;
908 big_picture.pts= ost->sync_opts;
909 // big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
910 //av_log(NULL, AV_LOG_DEBUG, "%lld -> encoder\n", ost->sync_opts);
911 ret = avcodec_encode_video(enc,
912 bit_buffer, bit_buffer_size,
913 &big_picture);
914 //enc->frame_number = enc->real_pict_num;
915 if(ret>0){
916 pkt.data= bit_buffer;
917 pkt.size= ret;
918 if(enc->coded_frame)
919 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
920 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %lld/%lld\n",
921 pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
922 pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
923
924 if(enc->coded_frame && enc->coded_frame->key_frame)
925 pkt.flags |= PKT_FLAG_KEY;
926 av_interleaved_write_frame(s, &pkt);
927 *frame_size = ret;
928 //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d",
929 // enc->frame_number-1, enc->real_pict_num, ret,
930 // enc->pict_type);
931 /* if two pass, output log */
932 if (ost->logfile && enc->stats_out) {
933 fprintf(ost->logfile, "%s", enc->stats_out);
934 }
935 }
936 }
937 ost->sync_opts++;
938 ost->frame_number++;
939 }
940 the_end:
941 av_free(buf);
942 av_free(buf1);
943 }
944
945 static double psnr(double d){
946 if(d==0) return INFINITY;
947 return -10.0*log(d)/log(10.0);
948 }
949
950 static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
951 int frame_size)
952 {
953 static FILE *fvstats=NULL;
954 char filename[40];
955 time_t today2;
956 struct tm *today;
957 AVCodecContext *enc;
958 int frame_number;
959 int64_t ti;
960 double ti1, bitrate, avg_bitrate;
961
962 if (!fvstats) {
963 today2 = time(NULL);
964 today = localtime(&today2);
965 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour,
966 today->tm_min,
967 today->tm_sec);
968 fvstats = fopen(filename,"w");
969 if (!fvstats) {
970 perror("fopen");
971 exit(1);
972 }
973 }
974
975 ti = MAXINT64;
976 enc = &ost->st->codec;
977 if (enc->codec_type == CODEC_TYPE_VIDEO) {
978 frame_number = ost->frame_number;
979 fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
980 if (enc->flags&CODEC_FLAG_PSNR)
981 fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
982
983 fprintf(fvstats,"f_size= %6d ", frame_size);
984 /* compute pts value */
985 ti1 = ost->sync_opts * av_q2d(enc->time_base);
986 if (ti1 < 0.01)
987 ti1 = 0.01;
988
989 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
990 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
991 fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
992 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
993 fprintf(fvstats,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
994 }
995 }
996
997 static void print_report(AVFormatContext **output_files,
998 AVOutputStream **ost_table, int nb_ostreams,
999 int is_last_report)
1000 {
1001 char buf[1024];
1002 AVOutputStream *ost;
1003 AVFormatContext *oc, *os;
1004 int64_t total_size;
1005 AVCodecContext *enc;
1006 int frame_number, vid, i;
1007 double bitrate, ti1, pts;
1008 static int64_t last_time = -1;
1009
1010 if (!is_last_report) {
1011 int64_t cur_time;
1012 /* display the report every 0.5 seconds */
1013 cur_time = av_gettime();
1014 if (last_time == -1) {
1015 last_time = cur_time;
1016 return;
1017 }
1018 if ((cur_time - last_time) < 500000)
1019 return;
1020 last_time = cur_time;
1021 }
1022
1023
1024 oc = output_files[0];
1025
1026 total_size = url_ftell(&oc->pb);
1027
1028 buf[0] = '\0';
1029 ti1 = 1e10;
1030 vid = 0;
1031 for(i=0;i<nb_ostreams;i++) {
1032 ost = ost_table[i];
1033 os = output_files[ost->file_index];
1034 enc = &ost->st->codec;
1035 if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1036 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ",
1037 enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1038 }
1039 if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1040 frame_number = ost->frame_number;
1041 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d q=%2.1f ",
1042 frame_number, enc->coded_frame ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : 0);
1043 if(is_last_report)
1044 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1045 if (enc->flags&CODEC_FLAG_PSNR){
1046 int j;
1047 double error, error_sum=0;
1048 double scale, scale_sum=0;
1049 char type[3]= {'Y','U','V'};
1050 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1051 for(j=0; j<3; j++){
1052 if(is_last_report){
1053 error= enc->error[j];
1054 scale= enc->width*enc->height*255.0*255.0*frame_number;
1055 }else{
1056 error= enc->coded_frame->error[j];
1057 scale= enc->width*enc->height*255.0*255.0;
1058 }
1059 if(j) scale/=4;
1060 error_sum += error;
1061 scale_sum += scale;
1062 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
1063 }
1064 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
1065 }
1066 vid = 1;
1067 }
1068 /* compute min output value */
1069 pts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den;
1070 if ((pts < ti1) && (pts > 0))
1071 ti1 = pts;
1072 }
1073 if (ti1 < 0.01)
1074 ti1 = 0.01;
1075
1076 if (verbose || is_last_report) {
1077 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1078
1079 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1080 "size=%8.0fkB time=%0.1f bitrate=%6.1fkbits/s",
1081 (double)total_size / 1024, ti1, bitrate);
1082
1083 if (verbose > 1)
1084 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1085 nb_frames_dup, nb_frames_drop);
1086
1087 if (verbose >= 0)
1088 fprintf(stderr, "%s \r", buf);
1089
1090 fflush(stderr);
1091 }
1092
1093 if (is_last_report && verbose >= 0){
1094 int64_t raw= audio_size + video_size + extra_size;
1095 fprintf(stderr, "\n");
1096 fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1097 video_size/1024.0,
1098 audio_size/1024.0,
1099 extra_size/1024.0,
1100 100.0*(total_size - raw)/raw
1101 );
1102 }
1103 }
1104
1105 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1106 static int output_packet(AVInputStream *ist, int ist_index,
1107 AVOutputStream **ost_table, int nb_ostreams,
1108 const AVPacket *pkt)
1109 {
1110 AVFormatContext *os;
1111 AVOutputStream *ost;
1112 uint8_t *ptr;
1113 int len, ret, i;
1114 uint8_t *data_buf;
1115 int data_size, got_picture;
1116 AVFrame picture;
1117 void *buffer_to_free;
1118 static int samples_size= 0;
1119 static short *samples= NULL;
1120
1121 if(!pkt){
1122 ist->pts= ist->next_pts; // needed for last packet if vsync=0
1123 } else if (pkt->dts != AV_NOPTS_VALUE) { //FIXME seems redundant, as libavformat does this too
1124 ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1125 } else {
1126 // assert(ist->pts == ist->next_pts);
1127 }
1128
1129 if (pkt == NULL) {
1130 /* EOF handling */
1131 ptr = NULL;
1132 len = 0;
1133 goto handle_eof;
1134 }
1135
1136 len = pkt->size;
1137 ptr = pkt->data;
1138 while (len > 0) {
1139 handle_eof:
1140 /* decode the packet if needed */
1141 data_buf = NULL; /* fail safe */
1142 data_size = 0;
1143 if (ist->decoding_needed) {
1144 switch(ist->st->codec.codec_type) {
1145 case CODEC_TYPE_AUDIO:{
1146 if(pkt)
1147 samples= av_fast_realloc(samples, &samples_size, FFMAX(pkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE));
1148 /* XXX: could avoid copy if PCM 16 bits with same
1149 endianness as CPU */
1150 ret = avcodec_decode_audio(&ist->st->codec, samples, &data_size,
1151 ptr, len);
1152 if (ret < 0)
1153 goto fail_decode;
1154 ptr += ret;
1155 len -= ret;
1156 /* Some bug in mpeg audio decoder gives */
1157 /* data_size < 0, it seems they are overflows */
1158 if (data_size <= 0) {
1159 /* no audio frame */
1160 continue;
1161 }
1162 data_buf = (uint8_t *)samples;
1163 ist->next_pts += ((int64_t)AV_TIME_BASE/2 * data_size) /
1164 (ist->st->codec.sample_rate * ist->st->codec.channels);
1165 break;}
1166 case CODEC_TYPE_VIDEO:
1167 data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
1168 /* XXX: allocate picture correctly */
1169 avcodec_get_frame_defaults(&picture);
1170
1171 ret = avcodec_decode_video(&ist->st->codec,
1172 &picture, &got_picture, ptr, len);
1173 ist->st->quality= picture.quality;
1174 if (ret < 0)
1175 goto fail_decode;
1176 if (!got_picture) {
1177 /* no picture yet */
1178 goto discard_packet;
1179 }
1180 if (ist->st->codec.time_base.num != 0) {
1181 ist->next_pts += ((int64_t)AV_TIME_BASE *
1182 ist->st->codec.time_base.num) /
1183 ist->st->codec.time_base.den;
1184 }
1185 len = 0;
1186 break;
1187 default:
1188 goto fail_decode;
1189 }
1190 } else {
1191 switch(ist->st->codec.codec_type) {
1192 case CODEC_TYPE_AUDIO:
1193 ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec.frame_size) /
1194 (ist->st->codec.sample_rate * ist->st->codec.channels);
1195 break;
1196 case CODEC_TYPE_VIDEO:
1197 if (ist->st->codec.time_base.num != 0) {
1198 ist->next_pts += ((int64_t)AV_TIME_BASE *
1199 ist->st->codec.time_base.num) /
1200 ist->st->codec.time_base.den;
1201 }
1202 break;
1203 }
1204 data_buf = ptr;
1205 data_size = len;
1206 ret = len;
1207 len = 0;
1208 }
1209
1210 buffer_to_free = NULL;
1211 if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO) {
1212 pre_process_video_frame(ist, (AVPicture *)&picture,
1213 &buffer_to_free);
1214 }
1215
1216 // preprocess audio (volume)
1217 if (ist->st->codec.codec_type == CODEC_TYPE_AUDIO) {
1218 if (audio_volume != 256) {
1219 short *volp;
1220 volp = samples;
1221 for(i=0;i<(data_size / sizeof(short));i++) {
1222 int v = ((*volp) * audio_volume + 128) >> 8;
1223 if (v < -32768) v = -32768;
1224 if (v > 32767) v = 32767;
1225 *volp++ = v;
1226 }
1227 }
1228 }
1229
1230 /* frame rate emulation */
1231 if (ist->st->codec.rate_emu) {
1232 int64_t pts = av_rescale((int64_t) ist->frame * ist->st->codec.time_base.num, 1000000, ist->st->codec.time_base.den);
1233 int64_t now = av_gettime() - ist->start;
1234 if (pts > now)
1235 usleep(pts - now);
1236
1237 ist->frame++;
1238 }
1239
1240 #if 0
1241 /* mpeg PTS deordering : if it is a P or I frame, the PTS
1242 is the one of the next displayed one */
1243 /* XXX: add mpeg4 too ? */
1244 if (ist->st->codec.codec_id == CODEC_ID_MPEG1VIDEO) {
1245 if (ist->st->codec.pict_type != B_TYPE) {
1246 int64_t tmp;
1247 tmp = ist->last_ip_pts;
1248 ist->last_ip_pts = ist->frac_pts.val;
1249 ist->frac_pts.val = tmp;
1250 }
1251 }
1252 #endif
1253 /* if output time reached then transcode raw format,
1254 encode packets and output them */
1255 if (start_time == 0 || ist->pts >= start_time)
1256 for(i=0;i<nb_ostreams;i++) {
1257 int frame_size;
1258
1259 ost = ost_table[i];
1260 if (ost->source_index == ist_index) {
1261 os = output_files[ost->file_index];
1262
1263 #if 0
1264 printf("%d: got pts=%0.3f %0.3f\n", i,
1265 (double)pkt->pts / AV_TIME_BASE,
1266 ((double)ist->pts / AV_TIME_BASE) -
1267 ((double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den));
1268 #endif
1269 /* set the input output pts pairs */
1270 ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/ AV_TIME_BASE;
1271
1272 if (ost->encoding_needed) {
1273 switch(ost->st->codec.codec_type) {
1274 case CODEC_TYPE_AUDIO:
1275 do_audio_out(os, ost, ist, data_buf, data_size);
1276 break;
1277 case CODEC_TYPE_VIDEO:
1278 do_video_out(os, ost, ist, &picture, &frame_size);
1279 video_size += frame_size;
1280 if (do_vstats && frame_size)
1281 do_video_stats(os, ost, frame_size);
1282 break;
1283 default:
1284 av_abort();
1285 }
1286 } else {
1287 AVFrame avframe; //FIXME/XXX remove this
1288 AVPacket opkt;
1289 av_init_packet(&opkt);
1290
1291 /* no reencoding needed : output the packet directly */
1292 /* force the input stream PTS */
1293
1294 avcodec_get_frame_defaults(&avframe);
1295 ost->st->codec.coded_frame= &avframe;
1296 avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
1297
1298 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO)
1299 audio_size += data_size;
1300 else if (ost->st->codec.codec_type == CODEC_TYPE_VIDEO) {
1301 video_size += data_size;
1302 ost->sync_opts++;
1303 }
1304
1305 opkt.stream_index= ost->index;
1306 opkt.data= data_buf;
1307 opkt.size= data_size;
1308 if(pkt->pts != AV_NOPTS_VALUE)
1309 opkt.pts= av_rescale_q(av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q) + input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ost->st->time_base);
1310 else
1311 opkt.pts= AV_NOPTS_VALUE;
1312 opkt.dts= av_rescale_q(av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q) + input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ost->st->time_base);
1313 opkt.flags= pkt->flags;
1314
1315 av_interleaved_write_frame(os, &opkt);
1316 ost->st->codec.frame_number++;
1317 ost->frame_number++;
1318 }
1319 }
1320 }
1321 av_free(buffer_to_free);
1322 }
1323 discard_packet:
1324 if (pkt == NULL) {
1325 /* EOF handling */
1326
1327 for(i=0;i<nb_ostreams;i++) {
1328 ost = ost_table[i];
1329 if (ost->source_index == ist_index) {
1330 AVCodecContext *enc= &ost->st->codec;
1331 os = output_files[ost->file_index];
1332
1333 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO && enc->frame_size <=1)
1334 continue;
1335 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
1336 continue;
1337
1338 if (ost->encoding_needed) {
1339 for(;;) {
1340 AVPacket pkt;
1341 av_init_packet(&pkt);
1342 pkt.stream_index= ost->index;
1343
1344 switch(ost->st->codec.codec_type) {
1345 case CODEC_TYPE_AUDIO:
1346 ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
1347 audio_size += ret;
1348 pkt.flags |= PKT_FLAG_KEY;
1349 break;
1350 case CODEC_TYPE_VIDEO:
1351 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
1352 video_size += ret;
1353 if(enc->coded_frame && enc->coded_frame->key_frame)
1354 pkt.flags |= PKT_FLAG_KEY;
1355 if (ost->logfile && enc->stats_out) {
1356 fprintf(ost->logfile, "%s", enc->stats_out);
1357 }
1358 break;
1359 default:
1360 ret=-1;
1361 }
1362
1363 if(ret<=0)
1364 break;
1365 pkt.data= bit_buffer;
1366 pkt.size= ret;
1367 if(enc->coded_frame)
1368 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1369 av_interleaved_write_frame(os, &pkt);
1370 }
1371 }
1372 }
1373 }
1374 }
1375
1376 return 0;
1377 fail_decode:
1378 return -1;
1379 }
1380
1381
1382 /*
1383 * The following code is the main loop of the file converter
1384 */
1385 static int av_encode(AVFormatContext **output_files,
1386 int nb_output_files,
1387 AVFormatContext **input_files,
1388 int nb_input_files,
1389 AVStreamMap *stream_maps, int nb_stream_maps)
1390 {
1391 int ret, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
1392 AVFormatContext *is, *os;
1393 AVCodecContext *codec, *icodec;
1394 AVOutputStream *ost, **ost_table = NULL;
1395 AVInputStream *ist, **ist_table = NULL;
1396 AVInputFile *file_table;
1397 AVFormatContext *stream_no_data;
1398 int key;
1399
1400 file_table= (AVInputFile*) av_mallocz(nb_input_files * sizeof(AVInputFile));
1401 if (!file_table)
1402 goto fail;
1403
1404 /* input stream init */
1405 j = 0;
1406 for(i=0;i<nb_input_files;i++) {
1407 is = input_files[i];
1408 file_table[i].ist_index = j;
1409 file_table[i].nb_streams = is->nb_streams;
1410 j += is->nb_streams;
1411 }
1412 nb_istreams = j;
1413
1414 ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
1415 if (!ist_table)
1416 goto fail;
1417
1418 for(i=0;i<nb_istreams;i++) {
1419 ist = av_mallocz(sizeof(AVInputStream));
1420 if (!ist)
1421 goto fail;
1422 ist_table[i] = ist;
1423 }
1424 j = 0;
1425 for(i=0;i<nb_input_files;i++) {
1426 is = input_files[i];
1427 for(k=0;k<is->nb_streams;k++) {
1428 ist = ist_table[j++];
1429 ist->st = is->streams[k];
1430 ist->file_index = i;
1431 ist->index = k;
1432 ist->discard = 1; /* the stream is discarded by default
1433 (changed later) */
1434
1435 if (ist->st->codec.rate_emu) {
1436 ist->start = av_gettime();
1437 ist->frame = 0;
1438 }
1439 }
1440 }
1441
1442 /* output stream init */
1443 nb_ostreams = 0;
1444 for(i=0;i<nb_output_files;i++) {
1445 os = output_files[i];
1446 nb_ostreams += os->nb_streams;
1447 }
1448 if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
1449 fprintf(stderr, "Number of stream maps must match number of output streams\n");
1450 exit(1);
1451 }
1452
1453 /* Sanity check the mapping args -- do the input files & streams exist? */
1454 for(i=0;i<nb_stream_maps;i++) {
1455 int fi = stream_maps[i].file_index;
1456 int si = stream_maps[i].stream_index;
1457
1458 if (fi < 0 || fi > nb_input_files - 1 ||
1459 si < 0 || si > file_table[fi].nb_streams - 1) {
1460 fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
1461 exit(1);
1462 }
1463 }
1464
1465 ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
1466 if (!ost_table)
1467 goto fail;
1468 for(i=0;i<nb_ostreams;i++) {
1469 ost = av_mallocz(sizeof(AVOutputStream));
1470 if (!ost)
1471 goto fail;
1472 ost_table[i] = ost;
1473 }
1474
1475 n = 0;
1476 for(k=0;k<nb_output_files;k++) {
1477 os = output_files[k];
1478 for(i=0;i<os->nb_streams;i++) {
1479 int found;
1480 ost = ost_table[n++];
1481 ost->file_index = k;
1482 ost->index = i;
1483 ost->st = os->streams[i];
1484 if (nb_stream_maps > 0) {
1485 ost->source_index = file_table[stream_maps[n-1].file_index].ist_index +
1486 stream_maps[n-1].stream_index;
1487
1488 /* Sanity check that the stream types match */
1489 if (ist_table[ost->source_index]->st->codec.codec_type != ost->st->codec.codec_type) {
1490 fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
1491 stream_maps[n-1].file_index, stream_maps[n-1].stream_index,
1492 ost->file_index, ost->index);
1493 exit(1);
1494 }
1495
1496 } else {
1497 /* get corresponding input stream index : we select the first one with the right type */
1498 found = 0;
1499 for(j=0;j<nb_istreams;j++) {
1500 ist = ist_table[j];
1501 if (ist->discard &&
1502 ist->st->codec.codec_type == ost->st->codec.codec_type) {
1503 ost->source_index = j;
1504 found = 1;
1505 break;
1506 }
1507 }
1508
1509 if (!found) {
1510 /* try again and reuse existing stream */
1511 for(j=0;j<nb_istreams;j++) {
1512 ist = ist_table[j];
1513 if (ist->st->codec.codec_type == ost->st->codec.codec_type) {
1514 ost->source_index = j;
1515 found = 1;
1516 }
1517 }
1518 if (!found) {
1519 fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
1520 ost->file_index, ost->index);
1521 exit(1);
1522 }
1523 }
1524 }
1525 ist = ist_table[ost->source_index];
1526 ist->discard = 0;
1527 }
1528 }
1529
1530 /* for each output stream, we compute the right encoding parameters */
1531 for(i=0;i<nb_ostreams;i++) {
1532 ost = ost_table[i];
1533 ist = ist_table[ost->source_index];
1534
1535 codec = &ost->st->codec;
1536 icodec = &ist->st->codec;
1537
1538 if (ost->st->stream_copy) {
1539 /* if stream_copy is selected, no need to decode or encode */
1540 codec->codec_id = icodec->codec_id;
1541 codec->codec_type = icodec->codec_type;
1542 if(!codec->codec_tag) codec->codec_tag = icodec->codec_tag;
1543 codec->bit_rate = icodec->bit_rate;
1544 codec->extradata= icodec->extradata;
1545 codec->extradata_size= icodec->extradata_size;
1546 switch(codec->codec_type) {
1547 case CODEC_TYPE_AUDIO:
1548 codec->sample_rate = icodec->sample_rate;
1549 codec->channels = icodec->channels;
1550 codec->frame_size = icodec->frame_size;
1551 codec->block_align= icodec->block_align;
1552 break;
1553 case CODEC_TYPE_VIDEO:
1554 codec->time_base = icodec->time_base;
1555 codec->width = icodec->width;
1556 codec->height = icodec->height;
1557 codec->has_b_frames = icodec->has_b_frames;
1558 break;
1559 default:
1560 av_abort();
1561 }
1562 } else {
1563 switch(codec->codec_type) {
1564 case CODEC_TYPE_AUDIO:
1565 if (fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE))
1566 goto fail;
1567
1568 if (codec->channels == icodec->channels &&
1569 codec->sample_rate == icodec->sample_rate) {
1570 ost->audio_resample = 0;
1571 } else {
1572 if (codec->channels != icodec->channels &&
1573 (icodec->codec_id == CODEC_ID_AC3 ||
1574 icodec->codec_id == CODEC_ID_DTS)) {
1575 /* Special case for 5:1 AC3 and DTS input */
1576 /* and mono or stereo output */
1577 /* Request specific number of channels */
1578 icodec->channels = codec->channels;
1579 if (codec->sample_rate == icodec->sample_rate)
1580 ost->audio_resample = 0;
1581 else {
1582 ost->audio_resample = 1;
1583 }
1584 } else {
1585 ost->audio_resample = 1;
1586 }
1587 }
1588 if(audio_sync_method>1)
1589 ost->audio_resample = 1;
1590
1591 if(ost->audio_resample){
1592 ost->resample = audio_resample_init(codec->channels, icodec->channels,
1593 codec->sample_rate, icodec->sample_rate);
1594 if(!ost->resample){
1595 printf("Can't resample. Aborting.\n");
1596 av_abort();
1597 }
1598 }
1599 ist->decoding_needed = 1;
1600 ost->encoding_needed = 1;
1601 break;
1602 case CODEC_TYPE_VIDEO:
1603 if (codec->width == icodec->width &&
1604 codec->height == icodec->height &&
1605 frame_topBand == 0 &&
1606 frame_bottomBand == 0 &&
1607 frame_leftBand == 0 &&
1608 frame_rightBand == 0 &&
1609 frame_padtop == 0 &&
1610 frame_padbottom == 0 &&
1611 frame_padleft == 0 &&
1612 frame_padright == 0)
1613 {
1614 ost->video_resample = 0;
1615 ost->video_crop = 0;
1616 ost->video_pad = 0;
1617 } else if ((codec->width == icodec->width -
1618 (frame_leftBand + frame_rightBand)) &&
1619 (codec->height == icodec->height -
1620 (frame_topBand + frame_bottomBand)))
1621 {
1622 ost->video_resample = 0;
1623 ost->video_crop = 1;
1624 ost->topBand = frame_topBand;
1625 ost->leftBand = frame_leftBand;
1626 } else if ((codec->width == icodec->width +
1627 (frame_padleft + frame_padright)) &&
1628 (codec->height == icodec->height +
1629 (frame_padtop + frame_padbottom))) {
1630 ost->video_resample = 0;
1631 ost->video_crop = 0;
1632 ost->video_pad = 1;
1633 ost->padtop = frame_padtop;
1634 ost->padleft = frame_padleft;
1635 ost->padbottom = frame_padbottom;
1636 ost->padright = frame_padright;
1637 avcodec_get_frame_defaults(&ost->pict_tmp);
1638 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1639 codec->width, codec->height ) )
1640 goto fail;
1641 } else {
1642 ost->video_resample = 1;
1643 ost->video_crop = 0; // cropping is handled as part of resample
1644 avcodec_get_frame_defaults(&ost->pict_tmp);
1645 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1646 codec->width, codec->height ) )
1647 goto fail;
1648
1649 ost->img_resample_ctx = img_resample_full_init(
1650 ost->st->codec.width, ost->st->codec.height,
1651 ist->st->codec.width, ist->st->codec.height,
1652 frame_topBand, frame_bottomBand,
1653 frame_leftBand, frame_rightBand,
1654 frame_padtop, frame_padbottom,
1655 frame_padleft, frame_padright);
1656
1657 ost->padtop = frame_padtop;
1658 ost->padleft = frame_padleft;
1659 ost->padbottom = frame_padbottom;
1660 ost->padright = frame_padright;
1661
1662 }
1663 ost->encoding_needed = 1;
1664 ist->decoding_needed = 1;
1665 break;
1666 default:
1667 av_abort();
1668 }
1669 /* two pass mode */
1670 if (ost->encoding_needed &&
1671 (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1672 char logfilename[1024];
1673 FILE *f;
1674 int size;
1675 char *logbuffer;
1676
1677 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1678 pass_logfilename ?
1679 pass_logfilename : DEFAULT_PASS_LOGFILENAME, i);
1680 if (codec->flags & CODEC_FLAG_PASS1) {
1681 f = fopen(logfilename, "w");
1682 if (!f) {
1683 perror(logfilename);
1684 exit(1);
1685 }
1686 ost->logfile = f;
1687 } else {
1688 /* read the log file */
1689 f = fopen(logfilename, "r");
1690 if (!f) {
1691 perror(logfilename);
1692 exit(1);
1693 }
1694 fseek(f, 0, SEEK_END);
1695 size = ftell(f);
1696 fseek(f, 0, SEEK_SET);
1697 logbuffer = av_malloc(size + 1);
1698 if (!logbuffer) {
1699 fprintf(stderr, "Could not allocate log buffer\n");
1700 exit(1);
1701 }
1702 size = fread(logbuffer, 1, size, f);
1703 fclose(f);
1704 logbuffer[size] = '\0';
1705 codec->stats_in = logbuffer;
1706 }
1707 }
1708 }
1709 if(codec->codec_type == CODEC_TYPE_VIDEO){
1710 int size= codec->width * codec->height;
1711 bit_buffer_size= FFMAX(bit_buffer_size, 4*size);
1712 }
1713 }
1714
1715 if (!bit_buffer)
1716 bit_buffer = av_malloc(bit_buffer_size);
1717 if (!bit_buffer)
1718 goto fail;
1719
1720 /* dump the file output parameters - cannot be done before in case
1721 of stream copy */
1722 for(i=0;i<nb_output_files;i++) {
1723 dump_format(output_files[i], i, output_files[i]->filename, 1);
1724 }
1725
1726 /* dump the stream mapping */
1727 if (verbose >= 0) {
1728 fprintf(stderr, "Stream mapping:\n");
1729 for(i=0;i<nb_ostreams;i++) {
1730 ost = ost_table[i];
1731 fprintf(stderr, " Stream #%d.%d -> #%d.%d\n",
1732 ist_table[ost->source_index]->file_index,
1733 ist_table[ost->source_index]->index,
1734 ost->file_index,
1735 ost->index);
1736 }
1737 }
1738
1739 /* open each encoder */
1740 for(i=0;i<nb_ostreams;i++) {
1741 ost = ost_table[i];
1742 if (ost->encoding_needed) {
1743 AVCodec *codec;
1744 codec = avcodec_find_encoder(ost->st->codec.codec_id);
1745 if (!codec) {
1746 fprintf(stderr, "Unsupported codec for output stream #%d.%d\n",
1747 ost->file_index, ost->index);
1748 exit(1);
1749 }
1750 if (avcodec_open(&ost->st->codec, codec) < 0) {
1751 fprintf(stderr, "Error while opening codec for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height\n",
1752 ost->file_index, ost->index);
1753 exit(1);
1754 }
1755 extra_size += ost->st->codec.extradata_size;
1756 }
1757 }
1758
1759 /* open each decoder */
1760 for(i=0;i<nb_istreams;i++) {
1761 ist = ist_table[i];
1762 if (ist->decoding_needed) {
1763 AVCodec *codec;
1764 codec = avcodec_find_decoder(ist->st->codec.codec_id);
1765 if (!codec) {
1766 fprintf(stderr, "Unsupported codec (id=%d) for input stream #%d.%d\n",
1767 ist->st->codec.codec_id, ist->file_index, ist->index);
1768 exit(1);
1769 }
1770 if (avcodec_open(&ist->st->codec, codec) < 0) {
1771 fprintf(stderr, "Error while opening codec for input stream #%d.%d\n",
1772 ist->file_index, ist->index);
1773 exit(1);
1774 }
1775 //if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO)
1776 // ist->st->codec.flags |= CODEC_FLAG_REPEAT_FIELD;
1777 }
1778 }
1779
1780 /* init pts */
1781 for(i=0;i<nb_istreams;i++) {
1782 ist = ist_table[i];
1783 is = input_files[ist->file_index];
1784 ist->pts = 0;
1785 ist->next_pts = av_rescale_q(ist->st->start_time, ist->st->time_base, AV_TIME_BASE_Q);
1786 if(ist->st->start_time == AV_NOPTS_VALUE)
1787 ist->next_pts=0;
1788 if(input_files_ts_offset[ist->file_index])
1789 ist->next_pts= AV_NOPTS_VALUE;
1790 ist->is_start = 1;
1791 }
1792
1793 /* compute buffer size max (should use a complete heuristic) */
1794 for(i=0;i<nb_input_files;i++) {
1795 file_table[i].buffer_size_max = 2048;
1796 }
1797
1798 /* set meta data information from input file if required */
1799 for (i=0;i<nb_meta_data_maps;i++) {
1800 AVFormatContext *out_file;
1801 AVFormatContext *in_file;
1802
1803 int out_file_index = meta_data_maps[i].out_file;
1804 int in_file_index = meta_data_maps[i].in_file;
1805 if ( out_file_index < 0 || out_file_index >= nb_output_files ) {
1806 fprintf(stderr, "Invalid output file index %d map_meta_data(%d,%d)\n", out_file_index, out_file_index, in_file_index);
1807 ret = -EINVAL;
1808 goto fail;
1809 }
1810 if ( in_file_index < 0 || in_file_index >= nb_input_files ) {
1811 fprintf(stderr, "Invalid input file index %d map_meta_data(%d,%d)\n", in_file_index, out_file_index, in_file_index);
1812 ret = -EINVAL;
1813 goto fail;
1814 }
1815
1816 out_file = output_files[out_file_index];
1817 in_file = input_files[in_file_index];
1818
1819 strcpy(out_file->title, in_file->title);
1820 strcpy(out_file->author, in_file->author);
1821 strcpy(out_file->copyright, in_file->copyright);
1822 strcpy(out_file->comment, in_file->comment);
1823 strcpy(out_file->album, in_file->album);
1824 out_file->year = in_file->year;
1825 out_file->track = in_file->track;
1826 strcpy(out_file->genre, in_file->genre);
1827 }
1828
1829 /* open files and write file headers */
1830 for(i=0;i<nb_output_files;i++) {
1831 os = output_files[i];
1832 if (av_write_header(os) < 0) {
1833 fprintf(stderr, "Could not write header for output file #%d (incorrect codec parameters ?)\n", i);
1834 ret = -EINVAL;
1835 goto fail;
1836 }
1837 }
1838
1839 #ifndef CONFIG_WIN32
1840 if ( !using_stdin && verbose >= 0) {
1841 fprintf(stderr, "Press [q] to stop encoding\n");
1842 url_set_interrupt_cb(decode_interrupt_cb);
1843 }
1844 #endif
1845 term_init();
1846
1847 stream_no_data = 0;
1848 key = -1;
1849
1850 for(; received_sigterm == 0;) {
1851 int file_index, ist_index;
1852 AVPacket pkt;
1853 double ipts_min;
1854 double opts_min;
1855
1856 redo:
1857 ipts_min= 1e100;
1858 opts_min= 1e100;
1859 /* if 'q' pressed, exits */
1860 if (!using_stdin) {
1861 if (q_pressed)
1862 break;
1863 /* read_key() returns 0 on EOF */
1864 key = read_key();
1865 if (key == 'q')
1866 break;
1867 }
1868
1869 /* select the stream that we must read now by looking at the
1870 smallest output pts */
1871 file_index = -1;
1872 for(i=0;i<nb_ostreams;i++) {
1873 double ipts, opts;
1874 ost = ost_table[i];
1875 os = output_files[ost->file_index];
1876 ist = ist_table[ost->source_index];
1877 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO)
1878 opts = ost->sync_opts * av_q2d(ost->st->codec.time_base);
1879 else
1880 opts = ost->st->pts.val * av_q2d(ost->st->time_base);
1881 ipts = (double)ist->pts;
1882 if (!file_table[ist->file_index].eof_reached){
1883 if(ipts < ipts_min) {
1884 ipts_min = ipts;
1885 if(input_sync ) file_index = ist->file_index;
1886 }
1887 if(opts < opts_min) {
1888 opts_min = opts;
1889 if(!input_sync) file_index = ist->file_index;
1890 }
1891 }
1892 if(ost->frame_number >= max_frames[ost->st->codec.codec_type]){
1893 file_index= -1;
1894 break;
1895 }
1896 }
1897 /* if none, if is finished */
1898 if (file_index < 0) {
1899 break;
1900 }
1901
1902 /* finish if recording time exhausted */
1903 if (recording_time > 0 && opts_min >= (recording_time / 1000000.0))
1904 break;
1905
1906 /* finish if limit size exhausted */
1907 if (limit_filesize != 0 && (limit_filesize * 1024) < url_ftell(&output_files[0]->pb))
1908 break;
1909
1910 /* read a frame from it and output it in the fifo */
1911 is = input_files[file_index];
1912 if (av_read_frame(is, &pkt) < 0) {
1913 file_table[file_index].eof_reached = 1;
1914 if (opt_shortest) break; else continue; //
1915 }
1916
1917 if (!pkt.size) {
1918 stream_no_data = is;
1919 } else {
1920 stream_no_data = 0;
1921 }
1922 if (do_pkt_dump) {
1923 av_pkt_dump(stdout, &pkt, do_hex_dump);
1924 }
1925 /* the following test is needed in case new streams appear
1926 dynamically in stream : we ignore them */
1927 if (pkt.stream_index >= file_table[file_index].nb_streams)
1928 goto discard_packet;
1929 ist_index = file_table[file_index].ist_index + pkt.stream_index;
1930 ist = ist_table[ist_index];
1931 if (ist->discard)
1932 goto discard_packet;
1933
1934 // fprintf(stderr, "next:%lld dts:%lld off:%lld %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec.codec_type);
1935 if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE) {
1936 int64_t delta= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q) - ist->next_pts;
1937 if(ABS(delta) > 10LL*AV_TIME_BASE && !copy_ts){
1938 input_files_ts_offset[ist->file_index]-= delta;
1939 if (verbose > 2)
1940 fprintf(stderr, "timestamp discontinuity %lld, new offset= %lld\n", delta, input_files_ts_offset[ist->file_index]);
1941 for(i=0; i<file_table[file_index].nb_streams; i++){
1942 int index= file_table[file_index].ist_index + i;
1943 ist_table[index]->next_pts += delta;
1944 ist_table[index]->is_start=1;
1945 }
1946 }
1947 }
1948
1949 //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
1950 if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
1951
1952 if (verbose >= 0)
1953 fprintf(stderr, "Error while decoding stream #%d.%d\n",
1954 ist->file_index, ist->index);
1955
1956 av_free_packet(&pkt);
1957 goto redo;
1958 }
1959
1960 discard_packet:
1961 av_free_packet(&pkt);
1962
1963 /* dump report by using the output first video and audio streams */
1964 print_report(output_files, ost_table, nb_ostreams, 0);
1965 }
1966
1967 /* at the end of stream, we must flush the decoder buffers */
1968 for(i=0;i<nb_istreams;i++) {
1969 ist = ist_table[i];
1970 if (ist->decoding_needed) {
1971 output_packet(ist, i, ost_table, nb_ostreams, NULL);
1972 }
1973 }
1974
1975 term_exit();
1976
1977 /* write the trailer if needed and close file */
1978 for(i=0;i<nb_output_files;i++) {
1979 os = output_files[i];
1980 av_write_trailer(os);
1981 }
1982
1983 /* dump report by using the first video and audio streams */
1984 print_report(output_files, ost_table, nb_ostreams, 1);
1985
1986 /* close each encoder */
1987 for(i=0;i<nb_ostreams;i++) {
1988 ost = ost_table[i];
1989 if (ost->encoding_needed) {
1990 av_freep(&ost->st->codec.stats_in);
1991 avcodec_close(&ost->st->codec);
1992 }
1993 }
1994
1995 /* close each decoder */
1996 for(i=0;i<nb_istreams;i++) {
1997 ist = ist_table[i];
1998 if (ist->decoding_needed) {
1999 avcodec_close(&ist->st->codec);
2000 }
2001 }
2002
2003 /* finished ! */
2004
2005 ret = 0;
2006 fail1:
2007 av_freep(&bit_buffer);
2008 av_free(file_table);
2009
2010 if (ist_table) {
2011 for(i=0;i<nb_istreams;i++) {
2012 ist = ist_table[i];
2013 av_free(ist);
2014 }
2015 av_free(ist_table);
2016 }
2017 if (ost_table) {
2018 for(i=0;i<nb_ostreams;i++) {
2019 ost = ost_table[i];
2020 if (ost) {
2021 if (ost->logfile) {
2022 fclose(ost->logfile);
2023 ost->logfile = NULL;
2024 }
2025 fifo_free(&ost->fifo); /* works even if fifo is not
2026 initialized but set to zero */
2027 av_free(ost->pict_tmp.data[0]);
2028 if (ost->video_resample)
2029 img_resample_close(ost->img_resample_ctx);
2030 if (ost->audio_resample)
2031 audio_resample_close(ost->resample);
2032 av_free(ost);
2033 }
2034 }
2035 av_free(ost_table);
2036 }
2037 return ret;
2038 fail:
2039 ret = -ENOMEM;
2040 goto fail1;
2041 }
2042
2043 #if 0
2044 int file_read(const char *filename)
2045 {
2046 URLContext *h;
2047 unsigned char buffer[1024];
2048 int len, i;
2049
2050 if (url_open(&h, filename, O_RDONLY) < 0) {
2051 printf("could not open '%s'\n", filename);
2052 return -1;
2053 }
2054 for(;;) {
2055 len = url_read(h, buffer, sizeof(buffer));
2056 if (len <= 0)
2057 break;
2058 for(i=0;i<len;i++) putchar(buffer[i]);
2059 }
2060 url_close(h);
2061 return 0;
2062 }
2063 #endif
2064
2065 static void opt_image_format(const char *arg)
2066 {
2067 AVImageFormat *f;
2068
2069 for(f = first_image_format; f != NULL; f = f->next) {
2070 if (!strcmp(arg, f->name))
2071 break;
2072 }
2073 if (!f) {
2074 fprintf(stderr, "Unknown image format: '%s'\n", arg);
2075 exit(1);
2076 }
2077 image_format = f;
2078 }
2079
2080 static void opt_format(const char *arg)
2081 {
2082 /* compatibility stuff for pgmyuv */
2083 if (!strcmp(arg, "pgmyuv")) {
2084 pgmyuv_compatibility_hack=1;
2085 // opt_image_format(arg);
2086 arg = "image2";
2087 }
2088
2089 file_iformat = av_find_input_format(arg);
2090 file_oformat = guess_format(arg, NULL, NULL);
2091 if (!file_iformat && !file_oformat) {
2092 fprintf(stderr, "Unknown input or output format: %s\n", arg);
2093 exit(1);
2094 }
2095 }
2096
2097 static void opt_video_bitrate(const char *arg)
2098 {
2099 video_bit_rate = atoi(arg) * 1000;
2100 }
2101
2102 static void opt_video_bitrate_tolerance(const char *arg)
2103 {
2104 video_bit_rate_tolerance = atoi(arg) * 1000;
2105 }
2106
2107 static void opt_video_bitrate_max(const char *arg)
2108 {
2109 video_rc_max_rate = atoi(arg) * 1000;
2110 }
2111
2112 static void opt_video_bitrate_min(const char *arg)
2113 {
2114 video_rc_min_rate = atoi(arg) * 1000;
2115 }
2116
2117 static void opt_video_buffer_size(const char *arg)
2118 {
2119 video_rc_buffer_size = atoi(arg) * 8*1024;
2120 }
2121
2122 static void opt_video_rc_eq(char *arg)
2123 {
2124 video_rc_eq = arg;
2125 }
2126
2127 static void opt_video_rc_override_string(char *arg)
2128 {
2129 video_rc_override_string = arg;
2130 }
2131
2132
2133 static void opt_workaround_bugs(const char *arg)
2134 {
2135 workaround_bugs = atoi(arg);
2136 }
2137
2138 static void opt_dct_algo(const char *arg)
2139 {
2140 dct_algo = atoi(arg);
2141 }
2142
2143 static void opt_idct_algo(const char *arg)
2144 {
2145 idct_algo = atoi(arg);
2146 }
2147
2148 static void opt_me_threshold(const char *arg)
2149 {
2150 me_threshold = atoi(arg);
2151 }
2152
2153 static void opt_mb_threshold(const char *arg)
2154 {
2155 mb_threshold = atoi(arg);
2156 }
2157
2158 static void opt_error_resilience(const char *arg)
2159 {
2160 error_resilience = atoi(arg);
2161 }
2162
2163 static void opt_error_concealment(const char *arg)
2164 {
2165 error_concealment = atoi(arg);
2166 }
2167
2168 static void opt_debug(const char *arg)
2169 {
2170 debug = atoi(arg);
2171 }
2172
2173 static void opt_vismv(const char *arg)
2174 {
2175 debug_mv = atoi(arg);
2176 }
2177
2178 static void opt_verbose(const char *arg)
2179 {
2180 verbose = atoi(arg);
2181 av_log_set_level(atoi(arg));
2182 }
2183
2184 static void opt_frame_rate(const char *arg)
2185 {
2186 if (parse_frame_rate(&frame_rate, &frame_rate_base, arg) < 0) {
2187 fprintf(stderr, "Incorrect frame rate\n");
2188 exit(1);
2189 }
2190 }
2191
2192 static void opt_frame_crop_top(const char *arg)
2193 {
2194 frame_topBand = atoi(arg);
2195 if (frame_topBand < 0) {
2196 fprintf(stderr, "Incorrect top crop size\n");
2197 exit(1);
2198 }
2199 if ((frame_topBand % 2) != 0) {
2200 fprintf(stderr, "Top crop size must be a multiple of 2\n");
2201 exit(1);
2202 }
2203 if ((frame_topBand) >= frame_height){
2204 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2205 exit(1);
2206 }
2207 frame_height -= frame_topBand;
2208 }
2209
2210 static void opt_frame_crop_bottom(const char *arg)
2211 {
2212 frame_bottomBand = atoi(arg);
2213 if (frame_bottomBand < 0) {
2214 fprintf(stderr, "Incorrect bottom crop size\n");
2215 exit(1);
2216 }
2217 if ((frame_bottomBand % 2) != 0) {
2218 fprintf(stderr, "Bottom crop size must be a multiple of 2\n");
2219 exit(1);
2220 }
2221 if ((frame_bottomBand) >= frame_height){
2222 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2223 exit(1);
2224 }
2225 frame_height -= frame_bottomBand;
2226 }
2227
2228 static void opt_frame_crop_left(const char *arg)
2229 {
2230 frame_leftBand = atoi(arg);
2231 if (frame_leftBand < 0) {
2232 fprintf(stderr, "Incorrect left crop size\n");
2233 exit(1);
2234 }
2235 if ((frame_leftBand % 2) != 0) {
2236 fprintf(stderr, "Left crop size must be a multiple of 2\n");
2237 exit(1);
2238 }
2239 if ((frame_leftBand) >= frame_width){
2240 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2241 exit(1);
2242 }
2243 frame_width -= frame_leftBand;
2244 }
2245
2246 static void opt_frame_crop_right(const char *arg)
2247 {
2248 frame_rightBand = atoi(arg);
2249 if (frame_rightBand < 0) {
2250 fprintf(stderr, "Incorrect right crop size\n");
2251 exit(1);
2252 }
2253 if ((frame_rightBand % 2) != 0) {
2254 fprintf(stderr, "Right crop size must be a multiple of 2\n");
2255 exit(1);
2256 }
2257 if ((frame_rightBand) >= frame_width){
2258 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2259 exit(1);
2260 }
2261 frame_width -= frame_rightBand;
2262 }
2263
2264 static void opt_frame_size(const char *arg)
2265 {
2266 if (parse_image_size(&frame_width, &frame_height, arg) < 0) {
2267 fprintf(stderr, "Incorrect frame size\n");
2268 exit(1);
2269 }
2270 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2271 fprintf(stderr, "Frame size must be a multiple of 2\n");
2272 exit(1);
2273 }
2274 }
2275
2276
2277 #define SCALEBITS 10
2278 #define ONE_HALF (1 << (SCALEBITS - 1))
2279 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
2280
2281 #define RGB_TO_Y(r, g, b) \
2282 ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
2283 FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
2284
2285 #define RGB_TO_U(r1, g1, b1, shift)\
2286 (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
2287 FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2288
2289 #define RGB_TO_V(r1, g1, b1, shift)\
2290 (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
2291 FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2292
2293 static void opt_pad_color(const char *arg) {
2294 /* Input is expected to be six hex digits similar to
2295 how colors are expressed in html tags (but without the #) */
2296 int rgb = strtol(arg, NULL, 16);
2297 int r,g,b;
2298
2299 r = (rgb >> 16);
2300 g = ((rgb >> 8) & 255);
2301 b = (rgb & 255);
2302
2303 padcolor[0] = RGB_TO_Y(r,g,b);
2304 padcolor[1] = RGB_TO_U(r,g,b,0);
2305 padcolor[2] = RGB_TO_V(r,g,b,0);
2306 }
2307
2308 static void opt_frame_pad_top(const char *arg)
2309 {
2310 frame_padtop = atoi(arg);
2311 if (frame_padtop < 0) {
2312 fprintf(stderr, "Incorrect top pad size\n");
2313 exit(1);
2314 }
2315 if ((frame_padtop % 2) != 0) {
2316 fprintf(stderr, "Top pad size must be a multiple of 2\n");
2317 exit(1);
2318 }
2319 }
2320
2321 static void opt_frame_pad_bottom(const char *arg)
2322 {
2323 frame_padbottom = atoi(arg);
2324 if (frame_padbottom < 0) {
2325 fprintf(stderr, "Incorrect bottom pad size\n");
2326 exit(1);
2327 }
2328 if ((frame_padbottom % 2) != 0) {
2329 fprintf(stderr, "Bottom pad size must be a multiple of 2\n");
2330 exit(1);
2331 }
2332 }
2333
2334
2335 static void opt_frame_pad_left(const char *arg)
2336 {
2337 frame_padleft = atoi(arg);
2338 if (frame_padleft < 0) {
2339 fprintf(stderr, "Incorrect left pad size\n");
2340 exit(1);
2341 }
2342 if ((frame_padleft % 2) != 0) {
2343 fprintf(stderr, "Left pad size must be a multiple of 2\n");
2344 exit(1);
2345 }
2346 }
2347
2348
2349 static void opt_frame_pad_right(const char *arg)
2350 {
2351 frame_padright = atoi(arg);
2352 if (frame_padright < 0) {
2353 fprintf(stderr, "Incorrect right pad size\n");
2354 exit(1);
2355 }
2356 if ((frame_padright % 2) != 0) {
2357 fprintf(stderr, "Right pad size must be a multiple of 2\n");
2358 exit(1);
2359 }
2360 }
2361
2362
2363 static void opt_frame_pix_fmt(const char *arg)
2364 {
2365 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2366 }
2367
2368 static void opt_frame_aspect_ratio(const char *arg)
2369 {
2370 int x = 0, y = 0;
2371 double ar = 0;
2372 const char *p;
2373
2374 p = strchr(arg, ':');
2375 if (p) {
2376 x = strtol(arg, (char **)&arg, 10);
2377 if (arg == p)
2378 y = strtol(arg+1, (char **)&arg, 10);
2379 if (x > 0 && y > 0)
2380 ar = (double)x / (double)y;
2381 } else
2382 ar = strtod(arg, (char **)&arg);
2383
2384 if (!ar) {
2385 fprintf(stderr, "Incorrect aspect ratio specification.\n");
2386 exit(1);
2387 }
2388 frame_aspect_ratio = ar;
2389 }
2390
2391 static void opt_gop_size(const char *arg)
2392 {
2393 gop_size = atoi(arg);
2394 }
2395
2396 static void opt_b_frames(const char *arg)
2397 {
2398 b_frames = atoi(arg);
2399 if (b_frames > FF_MAX_B_FRAMES) {
2400 fprintf(stderr, "\nCannot have more than %d B frames, increase FF_MAX_B_FRAMES.\n", FF_MAX_B_FRAMES);
2401 exit(1);
2402 } else if (b_frames < 1) {
2403 fprintf(stderr, "\nNumber of B frames must be higher than 0\n");
2404 exit(1);
2405 }
2406 }
2407
2408 static void opt_mb_decision(const char *arg)
2409 {
2410 mb_decision = atoi(arg);
2411 }
2412
2413 static void opt_mb_cmp(const char *arg)
2414 {
2415 mb_cmp = atoi(arg);
2416 }
2417
2418 static void opt_ildct_cmp(const char *arg)
2419 {
2420 ildct_cmp = atoi(arg);
2421 }
2422
2423 static void opt_sub_cmp(const char *arg)
2424 {
2425 sub_cmp = atoi(arg);
2426 }
2427
2428 static void opt_cmp(const char *arg)
2429 {
2430 cmp = atoi(arg);
2431 }
2432
2433 static void opt_pre_cmp(const char *arg)
2434 {
2435 pre_cmp = atoi(arg);
2436 }
2437
2438 static void opt_pre_me(const char *arg)
2439 {
2440 pre_me = atoi(arg);
2441 }
2442
2443 static void opt_lumi_mask(const char *arg)
2444 {
2445 lumi_mask = atof(arg);
2446 }
2447
2448 static void opt_dark_mask(const char *arg)
2449 {
2450 dark_mask = atof(arg);
2451 }
2452
2453 static void opt_scplx_mask(const char *arg)
2454 {
2455 scplx_mask = atof(arg);
2456 }
2457
2458 static void opt_tcplx_mask(const char *arg)
2459 {
2460 tcplx_mask = atof(arg);
2461 }
2462
2463 static void opt_p_mask(const char *arg)
2464 {
2465 p_mask = atof(arg);
2466 }
2467
2468 static void opt_qscale(const char *arg)
2469 {
2470 video_qscale = atof(arg);
2471 if (video_qscale < 0.01 ||
2472 video_qscale > 255) {
2473 fprintf(stderr, "qscale must be >= 0.01 and <= 255\n");
2474 exit(1);
2475 }
2476 }
2477
2478 static void opt_qsquish(const char *arg)
2479 {
2480 video_qsquish = atof(arg);
2481 if (video_qsquish < 0.0 ||
2482 video_qsquish > 99.0) {
2483 fprintf(stderr, "qsquish must be >= 0.0 and <= 99.0\n");
2484 exit(1);
2485 }
2486 }
2487
2488 static void opt_lelim(const char *arg)
2489 {
2490 video_lelim = atoi(arg);
2491 if (video_lelim < -99 ||
2492 video_lelim > 99) {
2493 fprintf(stderr, "lelim must be >= -99 and <= 99\n");
2494 exit(1);
2495 }
2496 }
2497
2498 static void opt_celim(const char *arg)
2499 {
2500 video_celim = atoi(arg);
2501 if (video_celim < -99 ||
2502 video_celim > 99) {
2503 fprintf(stderr, "celim must be >= -99 and <= 99\n");
2504 exit(1);
2505 }
2506 }
2507
2508 static void opt_lmax(const char *arg)
2509 {
2510 video_lmax = atof(arg)*FF_QP2LAMBDA;
2511 }
2512
2513 static void opt_lmin(const char *arg)
2514 {
2515 video_lmin = atof(arg)*FF_QP2LAMBDA;
2516 }
2517
2518 static void opt_qmin(const char *arg)
2519 {
2520 video_qmin = atoi(arg);
2521 if (video_qmin < 1 ||
2522 video_qmin > 31) {
2523 fprintf(stderr, "qmin must be >= 1 and <= 31\n");
2524 exit(1);
2525 }
2526 }
2527
2528 static void opt_qmax(const char *arg)
2529 {
2530 video_qmax = atoi(arg);
2531 if (video_qmax < 1 ||
2532 video_qmax > 31) {
2533 fprintf(stderr, "qmax must be >= 1 and <= 31\n");
2534 exit(1);
2535 }
2536 }
2537
2538 static void opt_mb_lmin(const char *arg)
2539 {
2540 video_mb_lmin = atof(arg)*FF_QP2LAMBDA;
2541 if (video_mb_lmin < 1 ||
2542 video_mb_lmin > FF_LAMBDA_MAX) {
2543 fprintf(stderr, "mblmin must be >= 1 and <= %d\n", FF_LAMBDA_MAX / FF_QP2LAMBDA);
2544 exit(1);
2545 }
2546 }
2547
2548 static void opt_mb_lmax(const char *arg)
2549 {
2550 video_mb_lmax = atof(arg)*FF_QP2LAMBDA;
2551 if (video_mb_lmax < 1 ||
2552 video_mb_lmax > FF_LAMBDA_MAX) {
2553 fprintf(stderr, "mblmax must be >= 1 and <= %d\n", FF_LAMBDA_MAX / FF_QP2LAMBDA);
2554 exit(1);
2555 }
2556 }
2557
2558 static void opt_qdiff(const char *arg)
2559 {
2560 video_qdiff = atoi(arg);
2561 if (video_qdiff < 0 ||
2562 video_qdiff > 31) {
2563 fprintf(stderr, "qdiff must be >= 1 and <= 31\n");
2564 exit(1);
2565 }
2566 }
2567
2568 static void opt_qblur(const char *arg)
2569 {
2570 video_qblur = atof(arg);
2571 }
2572
2573 static void opt_qcomp(const char *arg)
2574 {
2575 video_qcomp = atof(arg);
2576 }
2577
2578 static void opt_rc_initial_cplx(const char *arg)
2579 {
2580 video_rc_initial_cplx = atof(arg);
2581 }
2582 static void opt_b_qfactor(const char *arg)
2583 {
2584 video_b_qfactor = atof(arg);
2585 }
2586 static void opt_i_qfactor(const char *arg)
2587 {
2588 video_i_qfactor = atof(arg);
2589 }
2590 static void opt_b_qoffset(const char *arg)
2591 {
2592 video_b_qoffset = atof(arg);
2593 }
2594 static void opt_i_qoffset(const char *arg)
2595 {
2596 video_i_qoffset = atof(arg);
2597 }
2598
2599 static void opt_ibias(const char *arg)
2600 {
2601 video_intra_quant_bias = atoi(arg);
2602 }
2603 static void opt_pbias(const char *arg)
2604 {
2605 video_inter_quant_bias = atoi(arg);
2606 }
2607
2608 static void opt_packet_size(const char *arg)
2609 {
2610 packet_size= atoi(arg);
2611 }
2612
2613 static void opt_error_rate(const char *arg)
2614 {
2615 error_rate= atoi(arg);
2616 }
2617
2618 static void opt_strict(const char *arg)
2619 {
2620 strict= atoi(arg);
2621 }
2622
2623 static void opt_top_field_first(const char *arg)
2624 {
2625 top_field_first= atoi(arg);
2626 }
2627
2628 static void opt_noise_reduction(const char *arg)
2629 {
2630 noise_reduction= atoi(arg);
2631 }
2632
2633 static void opt_qns(const char *arg)
2634 {
2635 qns= atoi(arg);
2636 }
2637
2638 static void opt_sc_threshold(const char *arg)
2639 {
2640 sc_threshold= atoi(arg);
2641 }
2642
2643 static void opt_me_range(const char *arg)
2644 {
2645 me_range = atoi(arg);
2646 }
2647
2648 static void opt_thread_count(const char *arg)
2649 {
2650 thread_count= atoi(arg);
2651 #if !defined(HAVE_THREADS)
2652 if (verbose >= 0)
2653 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2654 #endif
2655 }
2656
2657 static void opt_audio_bitrate(const char *arg)
2658 {
2659 audio_bit_rate = atoi(arg) * 1000;
2660 }
2661
2662 static void opt_audio_rate(const char *arg)
2663 {
2664 audio_sample_rate = atoi(arg);
2665 }
2666
2667 static void opt_audio_channels(const char *arg)
2668 {
2669 audio_channels = atoi(arg);
2670 }
2671
2672 static void opt_video_device(const char *arg)
2673 {
2674 video_device = av_strdup(arg);
2675 }
2676
2677 static void opt_grab_device(const char *arg)
2678 {
2679 grab_device = av_strdup(arg);
2680 }
2681
2682 static void opt_video_channel(const char *arg)
2683 {
2684 video_channel = strtol(arg, NULL, 0);
2685 }
2686
2687 static void opt_video_standard(const char *arg)
2688 {
2689 video_standard = av_strdup(arg);
2690 }
2691
2692 static void opt_audio_device(const char *arg)
2693 {
2694 audio_device = av_strdup(arg);
2695 }
2696
2697 static void opt_audio_codec(const char *arg)
2698 {
2699 AVCodec *p;
2700
2701 if (!strcmp(arg, "copy")) {
2702 audio_stream_copy = 1;
2703 } else {
2704 p = first_avcodec;
2705 while (p) {
2706 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_AUDIO)
2707 break;
2708 p = p->next;
2709 }
2710 if (p == NULL) {
2711 fprintf(stderr, "Unknown audio codec '%s'\n", arg);
2712 exit(1);
2713 } else {
2714 audio_codec_id = p->id;
2715 }
2716 }
2717 }
2718
2719 static void opt_audio_tag(const char *arg)
2720 {
2721 char *tail;
2722 audio_codec_tag= strtol(arg, &tail, 0);
2723
2724 if(!tail || *tail)
2725 audio_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2726 }
2727
2728 static void opt_video_tag(const char *arg)
2729 {
2730 char *tail;
2731 video_codec_tag= strtol(arg, &tail, 0);
2732
2733 if(!tail || *tail)
2734 video_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2735 }
2736
2737 static void add_frame_hooker(const char *arg)
2738 {
2739 int argc = 0;
2740 char *argv[64];
2741 int i;
2742 char *args = av_strdup(arg);
2743
2744 using_vhook = 1;
2745
2746 argv[0] = strtok(args, " ");
2747 while (argc < 62 && (argv[++argc] = strtok(NULL, " "))) {
2748 }
2749
2750 i = frame_hook_add(argc, argv);
2751
2752 if (i != 0) {
2753 fprintf(stderr, "Failed to add video hook function: %s\n", arg);
2754 exit(1);
2755 }
2756 }
2757
2758 const char *motion_str[] = {
2759 "zero",
2760 "full",
2761 "log",
2762 "phods",
2763 "epzs",
2764 "x1",
2765 NULL,
2766 };
2767
2768 static void opt_motion_estimation(const char *arg)
2769 {
2770 const char **p;
2771 p = motion_str;
2772 for(;;) {
2773 if (!*p) {
2774 fprintf(stderr, "Unknown motion estimation method '%s'\n", arg);
2775 exit(1);
2776 }
2777 if (!strcmp(*p, arg))
2778 break;
2779 p++;
2780 }
2781 me_method = (p - motion_str) + 1;
2782 }
2783
2784 static void opt_video_codec(const char *arg)
2785 {
2786 AVCodec *p;
2787
2788 if (!strcmp(arg, "copy")) {
2789 video_stream_copy = 1;
2790 } else {
2791 p = first_avcodec;
2792 while (p) {
2793 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_VIDEO)
2794 break;
2795 p = p->next;
2796 }
2797 if (p == NULL) {
2798 fprintf(stderr, "Unknown video codec '%s'\n", arg);
2799 exit(1);
2800 } else {
2801 video_codec_id = p->id;
2802 }
2803 }
2804 }
2805
2806 static void opt_map(const char *arg)
2807 {
2808 AVStreamMap *m;
2809 const char *p;
2810
2811 p = arg;
2812 m = &stream_maps[nb_stream_maps++];
2813
2814 m->file_index = strtol(arg, (char **)&p, 0);
2815 if (*p)
2816 p++;
2817
2818 m->stream_index = strtol(p, (char **)&p, 0);
2819 }
2820
2821 static void opt_map_meta_data(const char *arg)
2822 {
2823 AVMetaDataMap *m;
2824 const char *p;
2825
2826 p = arg;
2827 m = &meta_data_maps[nb_meta_data_maps++];
2828
2829 m->out_file = strtol(arg, (char **)&p, 0);
2830 if (*p)
2831 p++;
2832
2833 m->in_file = strtol(p, (char **)&p, 0);
2834 }
2835
2836 static void opt_recording_time(const char *arg)
2837 {
2838 recording_time = parse_date(arg, 1);
2839 }
2840
2841 static void opt_start_time(const char *arg)
2842 {
2843 start_time = parse_date(arg, 1);
2844 }
2845
2846 static void opt_rec_timestamp(const char *arg)
2847 {
2848 rec_timestamp = parse_date(arg, 0) / 1000000;
2849 }
2850
2851 static void opt_input_ts_offset(const char *arg)
2852 {
2853 input_ts_offset = parse_date(arg, 1);
2854 }
2855
2856 static void opt_input_file(const char *filename)
2857 {
2858 AVFormatContext *ic;
2859 AVFormatParameters params, *ap = &params;
2860 int err, i, ret, rfps, rfps_base;
2861 int64_t timestamp;
2862
2863 if (!strcmp(filename, "-"))
2864 filename = "pipe:";
2865
2866 using_stdin |= !strncmp(filename, "pipe:", 5) ||
2867 !strcmp( filename, "/dev/stdin" );
2868
2869 /* get default parameters from command line */
2870 memset(ap, 0, sizeof(*ap));
2871 ap->sample_rate = audio_sample_rate;
2872 ap->channels = audio_channels;
2873 ap->time_base.den = frame_rate;
2874 ap->time_base.num = frame_rate_base;
2875 ap->width = frame_width + frame_padleft + frame_padright;
2876 ap->height = frame_height + frame_padtop + frame_padbottom;
2877 ap->image_format = image_format;
2878 ap->pix_fmt = frame_pix_fmt;
2879 ap->device = grab_device;
2880 ap->channel = video_channel;
2881 ap->standard = video_standard;
2882 ap->video_codec_id = video_codec_id;
2883 ap->audio_codec_id = audio_codec_id;
2884 if(pgmyuv_compatibility_hack)
2885 ap->video_codec_id= CODEC_ID_PGMYUV;
2886
2887 /* open the input file with generic libav function */
2888 err = av_open_input_file(&ic, filename, file_iformat, 0, ap);
2889 if (err < 0) {
2890 print_error(filename, err);
2891 exit(1);
2892 }
2893
2894 /* If not enough info to get the stream parameters, we decode the
2895 first frames to get it. (used in mpeg case for example) */
2896 ret = av_find_stream_info(ic);
2897 if (ret < 0 && verbose >= 0) {
2898 fprintf(stderr, "%s: could not find codec parameters\n", filename);
2899 exit(1);
2900 }
2901
2902 timestamp = start_time;
2903 /* add the stream start time */
2904 if (ic->start_time != AV_NOPTS_VALUE)
2905 timestamp += ic->start_time;
2906
2907 /* if seeking requested, we execute it */
2908 if (start_time != 0) {
2909 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
2910 if (ret < 0) {
2911 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2912 filename, (double)timestamp / AV_TIME_BASE);
2913 }
2914 /* reset seek info */
2915 start_time = 0;
2916 }
2917
2918 /* update the current parameters so that they match the one of the input stream */
2919 for(i=0;i<ic->nb_streams;i++) {
2920 AVCodecContext *enc = &ic->streams[i]->codec;
2921 #if defined(HAVE_THREADS)
2922 if(thread_count>1)
2923 avcodec_thread_init(enc, thread_count);
2924 #endif
2925 enc->thread_count= thread_count;
2926 switch(enc->codec_type) {
2927 case CODEC_TYPE_AUDIO:
2928 //fprintf(stderr, "\nInput Audio channels: %d", enc->channels);
2929 audio_channels = enc->channels;
2930 audio_sample_rate = enc->sample_rate;
2931 if(audio_disable)
2932 ic->streams[i]->discard= AVDISCARD_ALL;
2933 break;
2934 case CODEC_TYPE_VIDEO:
2935 frame_height = enc->height;
2936 frame_width = enc->width;
2937 frame_aspect_ratio = av_q2d(enc->sample_aspect_ratio) * enc->width / enc->height;
2938 frame_pix_fmt = enc->pix_fmt;
2939 rfps = ic->streams[i]->r_frame_rate.num;
2940 rfps_base = ic->streams[i]->r_frame_rate.den;
2941 enc->workaround_bugs = workaround_bugs;
2942 enc->error_resilience = error_resilience;
2943 enc->error_concealment = error_concealment;
2944 enc->idct_algo = idct_algo;
2945 enc->debug = debug;
2946 enc->debug_mv = debug_mv;
2947 enc->lowres= lowres;
2948 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
2949 if(bitexact)
2950 enc->flags|= CODEC_FLAG_BITEXACT;
2951 if(me_threshold)
2952 enc->debug |= FF_DEBUG_MV;
2953
2954 if (enc->time_base.den != rfps || enc->time_base.num != rfps_base) {
2955
2956 if (verbose >= 0)
2957 fprintf(stderr,"\nSeems that stream %d comes from film source: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
2958 i, (float)enc->time_base.den / enc->time_base.num, enc->time_base.den, enc->time_base.num,
2959
2960 (float)rfps / rfps_base, rfps, rfps_base);
2961 }
2962 /* update the current frame rate to match the stream frame rate */
2963 frame_rate = rfps;
2964 frame_rate_base = rfps_base;
2965
2966 enc->rate_emu = rate_emu;
2967 if(video_disable)
2968 ic->streams[i]->discard= AVDISCARD_ALL;
2969 else if(video_discard)
2970 ic->streams[i]->discard= video_discard;
2971 break;
2972 case CODEC_TYPE_DATA:
2973 break;
2974 default:
2975 av_abort();
2976 }
2977 }
2978
2979 input_files[nb_input_files] = ic;
2980 input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp);
2981 /* dump the file content */
2982 if (verbose >= 0)
2983 dump_format(ic, nb_input_files, filename, 0);
2984
2985 nb_input_files++;
2986 file_iformat = NULL;
2987 file_oformat = NULL;
2988 image_format = NULL;
2989
2990 grab_device = NULL;
2991 video_channel = 0;
2992
2993 rate_emu = 0;
2994 }
2995
2996 static void opt_grab(const char *arg)
2997 {
2998 file_iformat = av_find_input_format(arg);
2999 opt_input_file("");
3000 }
3001
3002 static void check_audio_video_inputs(int *has_video_ptr, int *has_audio_ptr)
3003 {
3004 int has_video, has_audio, i, j;
3005 AVFormatContext *ic;
3006
3007 has_video = 0;
3008 has_audio = 0;
3009 for(j=0;j<nb_input_files;j++) {
3010 ic = input_files[j];
3011 for(i=0;i<ic->nb_streams;i++) {
3012 AVCodecContext *enc = &ic->streams[i]->codec;
3013 switch(enc->codec_type) {
3014 case CODEC_TYPE_AUDIO:
3015 has_audio = 1;
3016 break;
3017 case CODEC_TYPE_VIDEO:
3018 has_video = 1;
3019 break;
3020 case CODEC_TYPE_DATA:
3021 break;
3022 default:
3023 av_abort();
3024 }
3025 }
3026 }
3027 *has_video_ptr = has_video;
3028 *has_audio_ptr = has_audio;
3029 }
3030
3031 static void opt_output_file(const char *filename)
3032 {
3033 AVStream *st;
3034 AVFormatContext *oc;
3035 int use_video, use_audio, nb_streams, input_has_video, input_has_audio;
3036 int codec_id;
3037 AVFormatParameters params, *ap = &params;
3038
3039 if (!strcmp(filename, "-"))
3040 filename = "pipe:";
3041
3042 oc = av_alloc_format_context();
3043
3044 if (!file_oformat) {
3045 file_oformat = guess_format(NULL, filename, NULL);
3046 if (!file_oformat) {
3047 fprintf(stderr, "Unable for find a suitable output format for '%s'\n",
3048 filename);
3049 exit(1);
3050 }
3051 }
3052
3053 oc->oformat = file_oformat;
3054
3055 if (!strcmp(file_oformat->name, "ffm") &&
3056 strstart(filename, "http:", NULL)) {
3057 /* special case for files sent to ffserver: we get the stream
3058 parameters from ffserver */
3059 if (read_ffserver_streams(oc, filename) < 0) {
3060 fprintf(stderr, "Could not read stream parameters from '%s'\n", filename);
3061 exit(1);
3062 }
3063 } else {
3064 use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy;
3065 use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy;
3066
3067 /* disable if no corresponding type found and at least one
3068 input file */
3069 if (nb_input_files > 0) {
3070 check_audio_video_inputs(&input_has_video, &input_has_audio);
3071 if (!input_has_video)
3072 use_video = 0;
3073 if (!input_has_audio)
3074 use_audio = 0;
3075 }
3076
3077 /* manual disable */
3078 if (audio_disable) {
3079 use_audio = 0;
3080 }
3081 if (video_disable) {
3082 use_video = 0;
3083 }
3084
3085 nb_streams = 0;
3086 if (use_video) {
3087 AVCodecContext *video_enc;
3088
3089 st = av_new_stream(oc, nb_streams++);
3090 if (!st) {
3091 fprintf(stderr, "Could not alloc stream\n");
3092 exit(1);
3093 }
3094 #if defined(HAVE_THREADS)
3095 if(thread_count>1)
3096 avcodec_thread_init(&st->codec, thread_count);
3097 #endif
3098
3099 video_enc = &st->codec;
3100
3101 if(video_codec_tag)
3102 video_enc->codec_tag= video_codec_tag;
3103
3104 if (file_oformat->flags & AVFMT_GLOBALHEADER)
3105 video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3106 if (video_stream_copy) {
3107 st->stream_copy = 1;
3108 video_enc->codec_type = CODEC_TYPE_VIDEO;
3109 } else {
3110 char *p;
3111 int i;
3112 AVCodec *codec;
3113
3114 codec_id = av_guess_codec(file_oformat, NULL, filename, NULL, CODEC_TYPE_VIDEO);
3115 if (video_codec_id != CODEC_ID_NONE)
3116 codec_id = video_codec_id;
3117
3118 video_enc->codec_id = codec_id;
3119 codec = avcodec_find_encoder(codec_id);
3120
3121 video_enc->bit_rate = video_bit_rate;
3122 video_enc->bit_rate_tolerance = video_bit_rate_tolerance;
3123 video_enc->time_base.den = frame_rate;
3124 video_enc->time_base.num = frame_rate_base;
3125 if(codec && codec->supported_framerates){
3126 const AVRational *p= codec->supported_framerates;
3127 AVRational req= (AVRational){frame_rate, frame_rate_base};
3128 const AVRational *best=NULL;
3129 AVRational best_error= (AVRational){INT_MAX, 1};
3130 for(; p->den!=0; p++){
3131 AVRational error= av_sub_q(req, *p);
3132 if(error.num <0) error.num *= -1;
3133 if(av_cmp_q(error, best_error) < 0){
3134 best_error= error;
3135 best= p;
3136 }
3137 }
3138 video_enc->time_base.den= best->num;
3139 video_enc->time_base.num= best->den;
3140 }
3141
3142 video_enc->width = frame_width + frame_padright + frame_padleft;
3143 video_enc->height = frame_height + frame_padtop + frame_padbottom;
3144 video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
3145 video_enc->pix_fmt = frame_pix_fmt;
3146
3147 if(codec && codec->pix_fmts){
3148 const enum PixelFormat *p= codec->pix_fmts;
3149 for(; *p!=-1; p++){
3150 if(*p == video_enc->pix_fmt)
3151 break;
3152 }
3153 if(*p == -1)
3154 video_enc->pix_fmt = codec->pix_fmts[0];
3155 }
3156
3157 if (!intra_only)
3158 video_enc->gop_size = gop_size;
3159 else
3160 video_enc->gop_size = 0;
3161 if (video_qscale || same_quality) {
3162 video_enc->flags |= CODEC_FLAG_QSCALE;
3163 video_enc->global_quality=
3164 st->quality = FF_QP2LAMBDA * video_qscale;
3165 }
3166
3167 if(intra_matrix)
3168 video_enc->intra_matrix = intra_matrix;
3169 if(inter_matrix)
3170 video_enc->inter_matrix = inter_matrix;
3171
3172 if(bitexact)
3173 video_enc->flags |= CODEC_FLAG_BITEXACT;
3174
3175 video_enc->mb_decision = mb_decision;
3176 video_enc->mb_cmp = mb_cmp;
3177 video_enc->ildct_cmp = ildct_cmp;
3178 video_enc->me_sub_cmp = sub_cmp;
3179 video_enc->me_cmp = cmp;
3180 video_enc->me_pre_cmp = pre_cmp;
3181 video_enc->pre_me = pre_me;
3182 video_enc->lumi_masking = lumi_mask;
3183 video_enc->dark_masking = dark_mask;
3184 video_enc->spatial_cplx_masking = scplx_mask;
3185 video_enc->temporal_cplx_masking = tcplx_mask;
3186 video_enc->p_masking = p_mask;
3187 video_enc->quantizer_noise_shaping= qns;
3188
3189 if (use_umv) {
3190 video_enc->flags |= CODEC_FLAG_H263P_UMV;
3191 }
3192 if (use_ss) {
3193 video_enc->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
3194 }
3195 if (use_aic) {
3196 video_enc->flags |= CODEC_FLAG_H263P_AIC;
3197 }
3198 if (use_aiv) {
3199 video_enc->flags |= CODEC_FLAG_H263P_AIV;
3200 }
3201 if (use_4mv) {
3202 video_enc->flags |= CODEC_FLAG_4MV;
3203 }
3204 if (use_obmc) {
3205 video_enc->flags |= CODEC_FLAG_OBMC;
3206 }
3207 if (use_loop) {
3208 video_enc->flags |= CODEC_FLAG_LOOP_FILTER;
3209 }
3210
3211 if(use_part) {
3212 video_enc->flags |= CODEC_FLAG_PART;
3213 }
3214 if (use_alt_scan) {
3215 video_enc->flags |= CODEC_FLAG_ALT_SCAN;
3216 }
3217 if (use_trell) {
3218 video_enc->flags |= CODEC_FLAG_TRELLIS_QUANT;
3219 }
3220 if (use_mv0) {
3221 video_enc->flags |= CODEC_FLAG_MV0;
3222 }
3223 if (do_normalize_aqp) {
3224 video_enc->flags |= CODEC_FLAG_NORMALIZE_AQP;
3225 }
3226 if (use_scan_offset) {
3227 video_enc->flags |= CODEC_FLAG_SVCD_SCAN_OFFSET;
3228 }
3229 if (closed_gop) {
3230 video_enc->flags |= CODEC_FLAG_CLOSED_GOP;
3231 }
3232 if (strict_gop) {
3233 video_enc->flags2 |= CODEC_FLAG2_STRICT_GOP;
3234 }
3235 if (use_qpel) {
3236 video_enc->flags |= CODEC_FLAG_QPEL;
3237 }
3238 if (use_qprd) {
3239 video_enc->flags |= CODEC_FLAG_QP_RD;
3240 }
3241 if (use_cbprd) {
3242 video_enc->flags |= CODEC_FLAG_CBP_RD;
3243 }
3244 if (b_frames) {
3245 video_enc->max_b_frames = b_frames;
3246 video_enc->b_frame_strategy = b_strategy;
3247 video_enc->b_quant_factor = 2.0;
3248 }
3249 if (do_interlace_dct) {
3250 video_enc->flags |= CODEC_FLAG_INTERLACED_DCT;
3251 }
3252 if (do_interlace_me) {
3253 video_enc->flags |= CODEC_FLAG_INTERLACED_ME;
3254 }
3255 if (no_output) {
3256 video_enc->flags2 |= CODEC_FLAG2_NO_OUTPUT;
3257 }
3258 video_enc->qmin = video_qmin;
3259 video_enc->qmax = video_qmax;
3260 video_enc->lmin = video_lmin;
3261 video_enc->lmax = video_lmax;
3262 video_enc->rc_qsquish = video_qsquish;
3263 video_enc->luma_elim_threshold = video_lelim;
3264 video_enc->chroma_elim_threshold = video_celim;
3265 video_enc->mb_lmin = video_mb_lmin;
3266 video_enc->mb_lmax = video_mb_lmax;
3267 video_enc->max_qdiff = video_qdiff;
3268 video_enc->qblur = video_qblur;
3269 video_enc->qcompress = video_qcomp;
3270 video_enc->rc_eq = video_rc_eq;
3271 video_enc->debug = debug;
3272 video_enc->debug_mv = debug_mv;
3273 video_enc->workaround_bugs = workaround_bugs;
3274 video_enc->thread_count = thread_count;
3275 p= video_rc_override_string;
3276 for(i=0; p; i++){
3277 int start, end, q;
3278 int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
3279 if(e!=3){
3280 fprintf(stderr, "error parsing rc_override\n");
3281 exit(1);
3282 }
3283 video_enc->rc_override=
3284 av_realloc(video_enc->rc_override,
3285 sizeof(RcOverride)*(i+1));
3286 video_enc->rc_override[i].start_frame= start;
3287 video_enc->rc_override[i].end_frame = end;
3288 if(q>0){
3289 video_enc->rc_override[i].qscale= q;
3290 video_enc->rc_override[i].quality_factor= 1.0;
3291 }
3292 else{
3293 video_enc->rc_override[i].qscale= 0;
3294 video_enc->rc_override[i].quality_factor= -q/100.0;
3295 }
3296 p= strchr(p, '/');
3297 if(p) p++;
3298 }
3299 video_enc->rc_override_count=i;
3300
3301 video_enc->rc_max_rate = video_rc_max_rate;
3302 video_enc->rc_min_rate = video_rc_min_rate;
3303 video_enc->rc_buffer_size = video_rc_buffer_size;
3304 video_enc->rc_initial_buffer_occupancy = video_rc_buffer_size*3/4;
3305 video_enc->rc_buffer_aggressivity= video_rc_buffer_aggressivity;
3306 video_enc->rc_initial_cplx= video_rc_initial_cplx;
3307 video_enc->i_quant_factor = video_i_qfactor;
3308 video_enc->b_quant_factor = video_b_qfactor;
3309 video_enc->i_quant_offset = video_i_qoffset;
3310 video_enc->b_quant_offset = video_b_qoffset;
3311 video_enc->intra_quant_bias = video_intra_quant_bias;
3312 video_enc->inter_quant_bias = video_inter_quant_bias;
3313 video_enc->dct_algo = dct_algo;
3314 video_enc->idct_algo = idct_algo;
3315 video_enc->me_threshold= me_threshold;
3316 video_enc->mb_threshold= mb_threshold;
3317 video_enc->intra_dc_precision= intra_dc_precision - 8;
3318 video_enc->strict_std_compliance = strict;
3319 video_enc->error_rate = error_rate;
3320 video_enc->noise_reduction= noise_reduction;
3321 video_enc->scenechange_threshold= sc_threshold;
3322 video_enc->me_range = me_range;
3323 video_enc->coder_type= coder;
3324 video_enc->context_model= context;
3325 video_enc->prediction_method= predictor;
3326 video_enc->profile= video_profile;
3327 video_enc->level= video_level;
3328 video_enc->nsse_weight= nsse_weight;
3329 video_enc->me_subpel_quality= subpel_quality;
3330 video_enc->me_penalty_compensation= me_penalty_compensation;
3331 video_enc->frame_skip_threshold= frame_skip_threshold;
3332 video_enc->frame_skip_factor= frame_skip_factor;
3333 video_enc->frame_skip_exp= frame_skip_exp;
3334 video_enc->frame_skip_cmp= frame_skip_cmp;
3335
3336 if(packet_size){
3337 video_enc->rtp_mode= 1;
3338 video_enc->rtp_payload_size= packet_size;
3339 }
3340
3341 if (do_psnr)
3342 video_enc->flags|= CODEC_FLAG_PSNR;
3343
3344 video_enc->me_method = me_method;
3345
3346 /* two pass mode */
3347 if (do_pass) {
3348 if (do_pass == 1) {
3349 video_enc->flags |= CODEC_FLAG_PASS1;
3350 } else {
3351 video_enc->flags |= CODEC_FLAG_PASS2;
3352 }
3353 }
3354 }
3355 }
3356
3357 if (use_audio) {
3358 AVCodecContext *audio_enc;
3359
3360 st = av_new_stream(oc, nb_streams++);
3361 if (!st) {
3362 fprintf(stderr, "Could not alloc stream\n");
3363 exit(1);
3364 }
3365 #if defined(HAVE_THREADS)
3366 if(thread_count>1)
3367 avcodec_thread_init(&st->codec, thread_count);
3368 #endif
3369
3370 audio_enc = &st->codec;
3371 audio_enc->codec_type = CODEC_TYPE_AUDIO;
3372
3373 if(audio_codec_tag)
3374 audio_enc->codec_tag= audio_codec_tag;
3375
3376 if (file_oformat->flags & AVFMT_GLOBALHEADER)
3377 audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3378 if (audio_stream_copy) {
3379 st->stream_copy = 1;
3380 audio_enc->channels = audio_channels;
3381 } else {
3382 codec_id = av_guess_codec(file_oformat, NULL, filename, NULL, CODEC_TYPE_AUDIO);
3383 if (audio_codec_id != CODEC_ID_NONE)
3384 codec_id = audio_codec_id;
3385 audio_enc->codec_id = codec_id;
3386
3387 audio_enc->bit_rate = audio_bit_rate;
3388 audio_enc->strict_std_compliance = strict;
3389 audio_enc->thread_count = thread_count;
3390 /* For audio codecs other than AC3 or DTS we limit */
3391 /* the number of coded channels to stereo */
3392 if (audio_channels > 2 && codec_id != CODEC_ID_AC3
3393 && codec_id != CODEC_ID_DTS) {
3394 audio_enc->channels = 2;
3395 } else
3396 audio_enc->channels = audio_channels;
3397 }
3398 audio_enc->sample_rate = audio_sample_rate;
3399 }
3400
3401 oc->nb_streams = nb_streams;
3402
3403 if (!nb_streams) {
3404 fprintf(stderr, "No audio or video streams available\n");
3405 exit(1);
3406 }
3407
3408 oc->timestamp = rec_timestamp;
3409
3410 if (str_title)
3411 pstrcpy(oc->title, sizeof(oc->title), str_title);
3412 if (str_author)
3413 pstrcpy(oc->author, sizeof(oc->author), str_author);
3414 if (str_copyright)
3415 pstrcpy(oc->copyright, sizeof(oc->copyright), str_copyright);
3416 if (str_comment)
3417 pstrcpy(oc->comment, sizeof(oc->comment), str_comment);
3418 }
3419
3420 output_files[nb_output_files++] = oc;
3421
3422 pstrcpy(oc->filename, sizeof(oc->filename), filename);
3423
3424 /* check filename in case of an image number is expected */
3425 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
3426 if (filename_number_test(oc->filename) < 0) {
3427 print_error(oc->filename, AVERROR_NUMEXPECTED);
3428 exit(1);
3429 }
3430 }
3431
3432 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
3433 /* test if it already exists to avoid loosing precious files */
3434 if (!file_overwrite &&
3435 (strchr(filename, ':') == NULL ||
3436 strstart(filename, "file:", NULL))) {
3437 if (url_exist(filename)) {
3438 int c;
3439
3440 if ( !using_stdin ) {
3441 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3442 fflush(stderr);
3443 c = getchar();
3444 if (toupper(c) != 'Y') {
3445 fprintf(stderr, "Not overwriting - exiting\n");
3446 exit(1);
3447 }
3448 }
3449 else {
3450 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3451 exit(1);
3452 }
3453 }
3454 }
3455
3456 /* open the file */
3457 if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
3458 fprintf(stderr, "Could not open '%s'\n", filename);
3459 exit(1);
3460 }
3461 }
3462
3463 memset(ap, 0, sizeof(*ap));
3464 ap->image_format = image_format;
3465 if (av_set_parameters(oc, ap) < 0) {
3466 fprintf(stderr, "%s: Invalid encoding parameters\n",
3467 oc->filename);
3468 exit(1);
3469 }
3470
3471 oc->packet_size= mux_packet_size;
3472 oc->mux_rate= mux_rate;
3473 oc->preload= (int)(mux_preload*AV_TIME_BASE);
3474 oc->max_delay= (int)(mux_max_delay*AV_TIME_BASE);
3475
3476 /* reset some options */
3477 file_oformat = NULL;
3478 file_iformat = NULL;
3479 image_format = NULL;
3480 audio_disable = 0;
3481 video_disable = 0;
3482 audio_codec_id = CODEC_ID_NONE;
3483 video_codec_id = CODEC_ID_NONE;
3484 audio_stream_copy = 0;
3485 video_stream_copy = 0;
3486 }
3487
3488 /* prepare dummy protocols for grab */
3489 static void prepare_grab(void)
3490 {
3491 int has_video, has_audio, i, j;
3492 AVFormatContext *oc;
3493 AVFormatContext *ic;
3494 AVFormatParameters vp1, *vp = &vp1;
3495 AVFormatParameters ap1, *ap = &ap1;
3496
3497 /* see if audio/video inputs are needed */
3498 has_video = 0;
3499 has_audio = 0;
3500 memset(ap, 0, sizeof(*ap));
3501 memset(vp, 0, sizeof(*vp));
3502 vp->time_base.num= 1;
3503 for(j=0;j<nb_output_files;j++) {
3504 oc = output_files[j];
3505 for(i=0;i<oc->nb_streams;i++) {
3506 AVCodecContext *enc = &oc->streams[i]->codec;
3507 switch(enc->codec_type) {
3508 case CODEC_TYPE_AUDIO:
3509 if (enc->sample_rate > ap->sample_rate)
3510 ap->sample_rate = enc->sample_rate;
3511 if (enc->channels > ap->channels)
3512 ap->channels = enc->channels;
3513 has_audio = 1;
3514 break;
3515 case CODEC_TYPE_VIDEO:
3516 if (enc->width > vp->width)
3517 vp->width = enc->width;
3518 if (enc->height > vp->height)
3519 vp->height = enc->height;
3520
3521 if (vp->time_base.num*(int64_t)enc->time_base.den > enc->time_base.num*(int64_t)vp->time_base.den){
3522 vp->time_base = enc->time_base;
3523 }
3524 has_video = 1;
3525 break;
3526 default:
3527 av_abort();
3528 }
3529 }
3530 }
3531
3532 if (has_video == 0 && has_audio == 0) {
3533 fprintf(stderr, "Output file must have at least one audio or video stream\n");
3534 exit(1);
3535 }
3536
3537 if (has_video) {
3538 AVInputFormat *fmt1;
3539 fmt1 = av_find_input_format(video_grab_format);
3540 vp->device = video_device;
3541 vp->channel = video_channel;
3542 vp->standard = video_standard;
3543 if (av_open_input_file(&ic, "", fmt1, 0, vp) < 0) {
3544 fprintf(stderr, "Could not find video grab device\n");
3545 exit(1);
3546 }
3547 /* If not enough info to get the stream parameters, we decode the
3548 first frames to get it. */
3549 if ((ic->ctx_flags & AVFMTCTX_NOHEADER) && av_find_stream_info(ic) < 0) {
3550 fprintf(stderr, "Could not find video grab parameters\n");
3551 exit(1);
3552 }
3553 /* by now video grab has one stream */
3554 ic->streams[0]->r_frame_rate.num = vp->time_base.den;
3555 ic->streams[0]->r_frame_rate.den = vp->time_base.num;
3556 input_files[nb_input_files] = ic;
3557
3558 if (verbose >= 0)
3559 dump_format(ic, nb_input_files, "", 0);
3560
3561 nb_input_files++;
3562 }
3563 if (has_audio && audio_grab_format) {
3564 AVInputFormat *fmt1;
3565 fmt1 = av_find_input_format(audio_grab_format);
3566 ap->device = audio_device;
3567 if (av_open_input_file(&ic, "", fmt1, 0, ap) < 0) {
3568 fprintf(stderr, "Could not find audio grab device\n");
3569 exit(1);
3570 }
3571 input_files[nb_input_files] = ic;
3572
3573 if (verbose >= 0)
3574 dump_format(ic, nb_input_files, "", 0);
3575
3576 nb_input_files++;
3577 }
3578 }
3579
3580 /* same option as mencoder */
3581 static void opt_pass(const char *pass_str)
3582 {
3583 int pass;
3584 pass = atoi(pass_str);
3585 if (pass != 1 && pass != 2) {
3586 fprintf(stderr, "pass number can be only 1 or 2\n");
3587 exit(1);
3588 }
3589 do_pass = pass;
3590 }
3591
3592 #if defined(CONFIG_WIN32) || defined(CONFIG_OS2)
3593 static int64_t getutime(void)
3594 {
3595 return av_gettime();
3596 }
3597 #else
3598 static int64_t getutime(void)
3599 {
3600 struct rusage rusage;
3601
3602 getrusage(RUSAGE_SELF, &rusage);
3603 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
3604 }
3605 #endif
3606
3607 extern int ffm_nopts;
3608
3609 static void opt_bitexact(void)
3610 {
3611 bitexact=1;
3612 /* disable generate of real time pts in ffm (need to be supressed anyway) */
3613 ffm_nopts = 1;
3614 }
3615
3616 static void show_formats(void)
3617 {
3618 AVInputFormat *ifmt;
3619 AVOutputFormat *ofmt;
3620 AVImageFormat *image_fmt;
3621 URLProtocol *up;
3622 AVCodec *p, *p2;
3623 const char **pp, *last_name;
3624
3625 printf("File formats:\n");
3626 last_name= "000";
3627 for(;;){
3628 int decode=0;
3629 int encode=0;
3630 const char *name=NULL;
3631 const char *long_name=NULL;
3632
3633 for(ofmt = first_oformat; ofmt != NULL; ofmt = ofmt->next) {
3634 if((name == NULL || strcmp(ofmt->name, name)<0) &&
3635 strcmp(ofmt->name, last_name)>0){
3636 name= ofmt->name;
3637 long_name= ofmt->long_name;
3638 encode=1;
3639 }
3640 }
3641 for(ifmt = first_iformat; ifmt != NULL; ifmt = ifmt->next) {
3642 if((name == NULL || strcmp(ifmt->name, name)<0) &&
3643 strcmp(ifmt->name, last_name)>0){
3644 name= ifmt->name;
3645 long_name= ifmt->long_name;
3646 encode=0;
3647 }
3648 if(name && strcmp(ifmt->name, name)==0)
3649 decode=1;
3650 }
3651 if(name==NULL)
3652 break;
3653 last_name= name;
3654
3655 printf(
3656 " %s%s %-15s %s\n",
3657 decode ? "D":" ",
3658 encode ? "E":" ",
3659 name,
3660 long_name ? long_name:" ");
3661 }
3662 printf("\n");
3663
3664 printf("Image formats (filename extensions, if any, follow):\n");
3665 for(image_fmt = first_image_format; image_fmt != NULL;
3666 image_fmt = image_fmt->next) {
3667 printf(
3668 " %s%s %-6s %s\n",
3669 image_fmt->img_read ? "D":" ",
3670 image_fmt->img_write ? "E":" ",
3671 image_fmt->name,
3672 image_fmt->extensions ? image_fmt->extensions:" ");
3673 }
3674 printf("\n");
3675
3676 printf("Codecs:\n");
3677 last_name= "000";
3678 for(;;){
3679 int decode=0;
3680 int encode=0;
3681 int cap=0;
3682
3683 p2=NULL;
3684 for(p = first_avcodec; p != NULL; p = p->next) {
3685 if((p2==NULL || strcmp(p->name, p2->name)<0) &&
3686 strcmp(p->name, last_name)>0){
3687 p2= p;
3688 decode= encode= cap=0;
3689 }
3690 if(p2 && strcmp(p->name, p2->name)==0){
3691 if(p->decode) decode=1;
3692 if(p->encode) encode=1;
3693 cap |= p->capabilities;
3694 }
3695 }
3696 if(p2==NULL)
3697 break;
3698 last_name= p2->name;
3699
3700 printf(
3701 " %s%s%s%s%s%s %s",
3702 decode ? "D": (/*p2->decoder ? "d":*/" "),
3703 encode ? "E":" ",
3704 p2->type == CODEC_TYPE_AUDIO ? "A":"V",
3705 cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S":" ",
3706 cap & CODEC_CAP_DR1 ? "D":" ",
3707 cap & CODEC_CAP_TRUNCATED ? "T":" ",
3708 p2->name);
3709 /* if(p2->decoder && decode==0)
3710 printf(" use %s for decoding", p2->decoder->name);*/
3711 printf("\n");
3712 }
3713 printf("\n");
3714
3715 printf("Supported file protocols:\n");
3716 for(up = first_protocol; up != NULL; up = up->next)
3717 printf(" %s:", up->name);
3718 printf("\n");
3719
3720 printf("Frame size, frame rate abbreviations:\n ntsc pal qntsc qpal sntsc spal film ntsc-film sqcif qcif cif 4cif\n");
3721 printf("Motion estimation methods:\n");
3722 pp = motion_str;
3723 while (*pp) {
3724 printf(" %s", *pp);
3725 if ((pp - motion_str + 1) == ME_ZERO)
3726 printf("(fastest)");
3727 else if ((pp - motion_str + 1) == ME_FULL)
3728 printf("(slowest)");
3729 else if ((pp - motion_str + 1) == ME_EPZS)
3730 printf("(default)");
3731 pp++;
3732 }
3733 printf("\n\n");
3734 printf(
3735 "Note, the names of encoders and decoders dont always match, so there are\n"
3736 "several cases where the above table shows encoder only or decoder only entries\n"
3737 "even though both encoding and decoding are supported for example, the h263\n"
3738 "decoder corresponds to the h263 and h263p encoders, for file formats its even\n"
3739 "worse\n");
3740 exit(1);
3741 }
3742
3743 void parse_matrix_coeffs(uint16_t *dest, const char *str)
3744 {
3745 int i;
3746 const char *p = str;
3747 for(i = 0;; i++) {
3748 dest[i] = atoi(p);
3749 if(i == 63)
3750 break;
3751 p = strchr(p, ',');
3752 if(!p) {
3753 fprintf(stderr, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3754 exit(1);
3755 }
3756 p++;
3757 }
3758 }
3759
3760 void opt_inter_matrix(const char *arg)
3761 {
3762 inter_matrix = av_mallocz(sizeof(uint16_t) * 64);
3763 parse_matrix_coeffs(inter_matrix, arg);
3764 }
3765
3766 void opt_intra_matrix(const char *arg)
3767 {
3768 intra_matrix = av_mallocz(sizeof(uint16_t) * 64);
3769 parse_matrix_coeffs(intra_matrix, arg);
3770 }
3771
3772 static void opt_target(const char *arg)
3773 {
3774 int norm = -1;
3775 static const char *const frame_rates[] = {"25", "30000/1001", "24000/1001"};
3776
3777 if(!strncmp(arg, "pal-", 4)) {
3778 norm = 0;
3779 arg += 4;
3780 } else if(!strncmp(arg, "ntsc-", 5)) {
3781 norm = 1;
3782 arg += 5;
3783 } else if(!strncmp(arg, "film-", 5)) {
3784 norm = 2;
3785 arg += 5;
3786 } else {
3787 int fr;
3788 /* Calculate FR via float to avoid int overflow */
3789 fr = (int)(frame_rate * 1000.0 / frame_rate_base);
3790 if(fr == 25000) {
3791 norm = 0;
3792 } else if((fr == 29970) || (fr == 23976)) {
3793 norm = 1;
3794 } else {
3795 /* Try to determine PAL/NTSC by peeking in the input files */
3796 if(nb_input_files) {
3797 int i, j;
3798 for(j = 0; j < nb_input_files; j++) {
3799 for(i = 0; i < input_files[j]->nb_streams; i++) {
3800 AVCodecContext *c = &input_files[j]->streams[i]->codec;
3801 if(c->codec_type != CODEC_TYPE_VIDEO)
3802 continue;
3803 fr = c->time_base.den * 1000 / c->time_base.num;
3804 if(fr == 25000) {
3805 norm = 0;
3806 break;
3807 } else if((fr == 29970) || (fr == 23976)) {
3808 norm = 1;
3809 break;
3810 }
3811 }
3812 if(norm >= 0)
3813 break;
3814 }
3815 }