10l (symbol mangling)
[libav.git] / ffmpeg.c
1 /*
2 * FFmpeg main
3 * Copyright (c) 2000-2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #define HAVE_AV_CONFIG_H
20 #include <limits.h>
21 #include "avformat.h"
22 #include "framehook.h"
23 #include "dsputil.h"
24
25 #ifndef CONFIG_WIN32
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/time.h>
30 #include <termios.h>
31 #include <sys/resource.h>
32 #include <signal.h>
33 #endif
34 #ifdef CONFIG_OS2
35 #include <sys/types.h>
36 #include <sys/select.h>
37 #include <stdlib.h>
38 #endif
39 #undef time //needed because HAVE_AV_CONFIG_H is defined on top
40 #include <time.h>
41
42 #include "cmdutils.h"
43
44 #undef NDEBUG
45 #include <assert.h>
46
47 #if !defined(INFINITY) && defined(HUGE_VAL)
48 #define INFINITY HUGE_VAL
49 #endif
50
51 /* select an input stream for an output stream */
52 typedef struct AVStreamMap {
53 int file_index;
54 int stream_index;
55 int sync_file_index;
56 int sync_stream_index;
57 } AVStreamMap;
58
59 /** select an input file for an output file */
60 typedef struct AVMetaDataMap {
61 int out_file;
62 int in_file;
63 } AVMetaDataMap;
64
65 extern const OptionDef options[];
66
67 static void show_help(void);
68 static void show_license(void);
69
70 #define MAX_FILES 20
71
72 static AVFormatContext *input_files[MAX_FILES];
73 static int64_t input_files_ts_offset[MAX_FILES];
74 static int nb_input_files = 0;
75
76 static AVFormatContext *output_files[MAX_FILES];
77 static int nb_output_files = 0;
78
79 static AVStreamMap stream_maps[MAX_FILES];
80 static int nb_stream_maps;
81
82 static AVMetaDataMap meta_data_maps[MAX_FILES];
83 static int nb_meta_data_maps;
84
85 static AVInputFormat *file_iformat;
86 static AVOutputFormat *file_oformat;
87 static AVImageFormat *image_format;
88 static int frame_width = 0;
89 static int frame_height = 0;
90 static float frame_aspect_ratio = 0;
91 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
92 static int frame_padtop = 0;
93 static int frame_padbottom = 0;
94 static int frame_padleft = 0;
95 static int frame_padright = 0;
96 static int padcolor[3] = {16,128,128}; /* default to black */
97 static int frame_topBand = 0;
98 static int frame_bottomBand = 0;
99 static int frame_leftBand = 0;
100 static int frame_rightBand = 0;
101 static int max_frames[3] = {INT_MAX, INT_MAX, INT_MAX};
102 static int frame_rate = 25;
103 static int frame_rate_base = 1;
104 static int video_bit_rate = 200*1000;
105 static int video_bit_rate_tolerance = 4000*1000;
106 static float video_qscale = 0;
107 static int video_qmin = 2;
108 static int video_qmax = 31;
109 static int video_lmin = 2*FF_QP2LAMBDA;
110 static int video_lmax = 31*FF_QP2LAMBDA;
111 static int video_mb_lmin = 2*FF_QP2LAMBDA;
112 static int video_mb_lmax = 31*FF_QP2LAMBDA;
113 static int video_qdiff = 3;
114 static int video_lelim = 0;
115 static int video_celim = 0;
116 static float video_qblur = 0.5;
117 static float video_qsquish = 0.0;
118 static float video_qcomp = 0.5;
119 static uint16_t *intra_matrix = NULL;
120 static uint16_t *inter_matrix = NULL;
121 #if 0 //experimental, (can be removed)
122 static float video_rc_qsquish=1.0;
123 static float video_rc_qmod_amp=0;
124 static int video_rc_qmod_freq=0;
125 #endif
126 static char *video_rc_override_string=NULL;
127 static char *video_rc_eq="tex^qComp";
128 static int video_rc_buffer_size=0;
129 static float video_rc_buffer_aggressivity=1.0;
130 static int video_rc_max_rate=0;
131 static int video_rc_min_rate=0;
132 static float video_rc_initial_cplx=0;
133 static float video_b_qfactor = 1.25;
134 static float video_b_qoffset = 1.25;
135 static float video_i_qfactor = -0.8;
136 static float video_i_qoffset = 0.0;
137 static int video_intra_quant_bias= FF_DEFAULT_QUANT_BIAS;
138 static int video_inter_quant_bias= FF_DEFAULT_QUANT_BIAS;
139 static int me_method = ME_EPZS;
140 static int video_disable = 0;
141 static int video_discard = 0;
142 static int video_codec_id = CODEC_ID_NONE;
143 static int video_codec_tag = 0;
144 static int same_quality = 0;
145 static int b_frames = 0;
146 static int b_strategy = 0;
147 static int mb_decision = FF_MB_DECISION_SIMPLE;
148 static int ildct_cmp = FF_CMP_VSAD;
149 static int mb_cmp = FF_CMP_SAD;
150 static int sub_cmp = FF_CMP_SAD;
151 static int cmp = FF_CMP_SAD;
152 static int pre_cmp = FF_CMP_SAD;
153 static int pre_me = 0;
154 static float lumi_mask = 0;
155 static float dark_mask = 0;
156 static float scplx_mask = 0;
157 static float tcplx_mask = 0;
158 static float p_mask = 0;
159 static int use_4mv = 0;
160 static int use_obmc = 0;
161 static int use_loop = 0;
162 static int use_aic = 0;
163 static int use_aiv = 0;
164 static int use_umv = 0;
165 static int use_ss = 0;
166 static int use_alt_scan = 0;
167 static int use_trell = 0;
168 static int use_scan_offset = 0;
169 static int use_qpel = 0;
170 static int use_qprd = 0;
171 static int use_cbprd = 0;
172 static int use_mv0 = 0;
173 static int do_normalize_aqp = 0;
174 static int qns = 0;
175 static int closed_gop = 0;
176 static int strict_gop = 0;
177 static int no_output = 0;
178 static int do_deinterlace = 0;
179 static int do_interlace_dct = 0;
180 static int do_interlace_me = 0;
181 static int workaround_bugs = FF_BUG_AUTODETECT;
182 static int error_resilience = 2;
183 static int error_concealment = 3;
184 static int dct_algo = 0;
185 static int idct_algo = 0;
186 static int use_part = 0;
187 static int packet_size = 0;
188 static int error_rate = 0;
189 static int strict = 0;
190 static int top_field_first = -1;
191 static int noise_reduction = 0;
192 static int sc_threshold = 0;
193 static int debug = 0;
194 static int debug_mv = 0;
195 static int me_threshold = 0;
196 static int mb_threshold = 0;
197 static int intra_dc_precision = 8;
198 static int coder = 0;
199 static int context = 0;
200 static int predictor = 0;
201 static int video_profile = FF_PROFILE_UNKNOWN;
202 static int video_level = FF_LEVEL_UNKNOWN;
203 static int nsse_weight = 8;
204 static int subpel_quality= 8;
205 static int me_penalty_compensation= 256;
206 static int lowres= 0;
207 static int frame_skip_threshold= 0;
208 static int frame_skip_factor= 0;
209 static int frame_skip_exp= 0;
210 static int frame_skip_cmp= FF_CMP_DCTMAX;
211 extern int loop_input; /* currently a hack */
212 static int gray_only = 0;
213
214 static int gop_size = 12;
215 static int intra_only = 0;
216 static int audio_sample_rate = 44100;
217 static int audio_bit_rate = 64000;
218 static int audio_disable = 0;
219 static int audio_channels = 1;
220 static int audio_codec_id = CODEC_ID_NONE;
221 static int audio_codec_tag = 0;
222
223 static int mux_rate= 0;
224 static int mux_packet_size= 0;
225 static float mux_preload= 0.5;
226 static float mux_max_delay= 0.7;
227
228 static int64_t recording_time = 0;
229 static int64_t start_time = 0;
230 static int64_t rec_timestamp = 0;
231 static int64_t input_ts_offset = 0;
232 static int file_overwrite = 0;
233 static char *str_title = NULL;
234 static char *str_author = NULL;
235 static char *str_copyright = NULL;
236 static char *str_comment = NULL;
237 static int do_benchmark = 0;
238 static int do_hex_dump = 0;
239 static int do_pkt_dump = 0;
240 static int do_psnr = 0;
241 static int do_vstats = 0;
242 static int do_pass = 0;
243 static int bitexact = 0;
244 static char *pass_logfilename = NULL;
245 static int audio_stream_copy = 0;
246 static int video_stream_copy = 0;
247 static int video_sync_method= 1;
248 static int audio_sync_method= 0;
249 static int copy_ts= 0;
250 static int opt_shortest = 0; //
251
252 static int rate_emu = 0;
253
254 static char *video_grab_format = "video4linux";
255 static char *video_device = NULL;
256 static char *grab_device = NULL;
257 static int video_channel = 0;
258 static char *video_standard = "ntsc";
259
260 static char *audio_grab_format = "audio_device";
261 static char *audio_device = NULL;
262 static int audio_volume = 256;
263
264 static int using_stdin = 0;
265 static int using_vhook = 0;
266 static int verbose = 1;
267 static int thread_count= 1;
268 static int q_pressed = 0;
269 static int me_range = 0;
270 static int64_t video_size = 0;
271 static int64_t audio_size = 0;
272 static int64_t extra_size = 0;
273 static int nb_frames_dup = 0;
274 static int nb_frames_drop = 0;
275 static int input_sync;
276 static int limit_filesize = 0; //
277
278 static int pgmyuv_compatibility_hack=0;
279
280
281 #define DEFAULT_PASS_LOGFILENAME "ffmpeg2pass"
282
283 struct AVInputStream;
284
285 typedef struct AVOutputStream {
286 int file_index; /* file index */
287 int index; /* stream index in the output file */
288 int source_index; /* AVInputStream index */
289 AVStream *st; /* stream in the output file */
290 int encoding_needed; /* true if encoding needed for this stream */
291 int frame_number;
292 /* input pts and corresponding output pts
293 for A/V sync */
294 //double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
295 struct AVInputStream *sync_ist; /* input stream to sync against */
296 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
297 /* video only */
298 int video_resample; /* video_resample and video_crop are mutually exclusive */
299 AVFrame pict_tmp; /* temporary image for resampling */
300 ImgReSampleContext *img_resample_ctx; /* for image resampling */
301
302 int video_crop; /* video_resample and video_crop are mutually exclusive */
303 int topBand; /* cropping area sizes */
304 int leftBand;
305
306 int video_pad; /* video_resample and video_pad are mutually exclusive */
307 int padtop; /* padding area sizes */
308 int padbottom;
309 int padleft;
310 int padright;
311
312 /* audio only */
313 int audio_resample;
314 ReSampleContext *resample; /* for audio resampling */
315 FifoBuffer fifo; /* for compression: one audio fifo per codec */
316 FILE *logfile;
317 } AVOutputStream;
318
319 typedef struct AVInputStream {
320 int file_index;
321 int index;
322 AVStream *st;
323 int discard; /* true if stream data should be discarded */
324 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
325 int64_t sample_index; /* current sample */
326
327 int64_t start; /* time when read started */
328 unsigned long frame; /* current frame */
329 int64_t next_pts; /* synthetic pts for cases where pkt.pts
330 is not defined */
331 int64_t pts; /* current pts */
332 int is_start; /* is 1 at the start and after a discontinuity */
333 } AVInputStream;
334
335 typedef struct AVInputFile {
336 int eof_reached; /* true if eof reached */
337 int ist_index; /* index of first stream in ist_table */
338 int buffer_size; /* current total buffer size */
339 int buffer_size_max; /* buffer size at which we consider we can stop
340 buffering */
341 int nb_streams; /* nb streams we are aware of */
342 } AVInputFile;
343
344 #ifndef CONFIG_WIN32
345
346 /* init terminal so that we can grab keys */
347 static struct termios oldtty;
348
349 static void term_exit(void)
350 {
351 tcsetattr (0, TCSANOW, &oldtty);
352 }
353
354 static volatile sig_atomic_t received_sigterm = 0;
355
356 static void
357 sigterm_handler(int sig)
358 {
359 received_sigterm = sig;
360 term_exit();
361 }
362
363 static void term_init(void)
364 {
365 struct termios tty;
366
367 tcgetattr (0, &tty);
368 oldtty = tty;
369
370 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
371 |INLCR|IGNCR|ICRNL|IXON);
372 tty.c_oflag |= OPOST;
373 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
374 tty.c_cflag &= ~(CSIZE|PARENB);
375 tty.c_cflag |= CS8;
376 tty.c_cc[VMIN] = 1;
377 tty.c_cc[VTIME] = 0;
378
379 tcsetattr (0, TCSANOW, &tty);
380
381 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
382 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
383 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
384 /*
385 register a function to be called at normal program termination
386 */
387 atexit(term_exit);
388 #ifdef CONFIG_BEOS_NETSERVER
389 fcntl(0, F_SETFL, fcntl(0, F_GETFL) | O_NONBLOCK);
390 #endif
391 }
392
393 /* read a key without blocking */
394 static int read_key(void)
395 {
396 int n = 1;
397 unsigned char ch;
398 #ifndef CONFIG_BEOS_NETSERVER
399 struct timeval tv;
400 fd_set rfds;
401
402 FD_ZERO(&rfds);
403 FD_SET(0, &rfds);
404 tv.tv_sec = 0;
405 tv.tv_usec = 0;
406 n = select(1, &rfds, NULL, NULL, &tv);
407 #endif
408 if (n > 0) {
409 n = read(0, &ch, 1);
410 if (n == 1)
411 return ch;
412
413 return n;
414 }
415 return -1;
416 }
417
418 static int decode_interrupt_cb(void)
419 {
420 return q_pressed || (q_pressed = read_key() == 'q');
421 }
422
423 #else
424
425 static volatile int received_sigterm = 0;
426
427 /* no interactive support */
428 static void term_exit(void)
429 {
430 }
431
432 static void term_init(void)
433 {
434 }
435
436 static int read_key(void)
437 {
438 return 0;
439 }
440
441 #endif
442
443 static int read_ffserver_streams(AVFormatContext *s, const char *filename)
444 {
445 int i, err;
446 AVFormatContext *ic;
447
448 err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL);
449 if (err < 0)
450 return err;
451 /* copy stream format */
452 s->nb_streams = ic->nb_streams;
453 for(i=0;i<ic->nb_streams;i++) {
454 AVStream *st;
455
456 st = av_mallocz(sizeof(AVStream));
457 memcpy(st, ic->streams[i], sizeof(AVStream));
458 s->streams[i] = st;
459 }
460
461 av_close_input_file(ic);
462 return 0;
463 }
464
465 static double
466 get_sync_ipts(const AVOutputStream *ost)
467 {
468 const AVInputStream *ist = ost->sync_ist;
469 return (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/AV_TIME_BASE;
470 }
471
472 #define MAX_AUDIO_PACKET_SIZE (128 * 1024)
473
474 static void do_audio_out(AVFormatContext *s,
475 AVOutputStream *ost,
476 AVInputStream *ist,
477 unsigned char *buf, int size)
478 {
479 uint8_t *buftmp;
480 static uint8_t *audio_buf = NULL;
481 static uint8_t *audio_out = NULL;
482 const int audio_out_size= 4*MAX_AUDIO_PACKET_SIZE;
483
484 int size_out, frame_bytes, ret;
485 AVCodecContext *enc= &ost->st->codec;
486
487 /* SC: dynamic allocation of buffers */
488 if (!audio_buf)
489 audio_buf = av_malloc(2*MAX_AUDIO_PACKET_SIZE);
490 if (!audio_out)
491 audio_out = av_malloc(audio_out_size);
492 if (!audio_buf || !audio_out)
493 return; /* Should signal an error ! */
494
495 if(audio_sync_method){
496 double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
497 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2);
498 double idelta= delta*ist->st->codec.sample_rate / enc->sample_rate;
499 int byte_delta= ((int)idelta)*2*ist->st->codec.channels;
500
501 //FIXME resample delay
502 if(fabs(delta) > 50){
503 if(ist->is_start){
504 if(byte_delta < 0){
505 byte_delta= FFMAX(byte_delta, -size);
506 size += byte_delta;
507 buf -= byte_delta;
508 if(verbose > 2)
509 fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
510 if(!size)
511 return;
512 ist->is_start=0;
513 }else{
514 static uint8_t *input_tmp= NULL;
515 input_tmp= av_realloc(input_tmp, byte_delta + size);
516
517 if(byte_delta + size <= MAX_AUDIO_PACKET_SIZE)
518 ist->is_start=0;
519 else
520 byte_delta= MAX_AUDIO_PACKET_SIZE - size;
521
522 memset(input_tmp, 0, byte_delta);
523 memcpy(input_tmp + byte_delta, buf, size);
524 buf= input_tmp;
525 size += byte_delta;
526 if(verbose > 2)
527 fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
528 }
529 }else if(audio_sync_method>1){
530 int comp= clip(delta, -audio_sync_method, audio_sync_method);
531 assert(ost->audio_resample);
532 if(verbose > 2)
533 fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
534 // fprintf(stderr, "drift:%f len:%d opts:%lld ipts:%lld fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2));
535 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
536 }
537 }
538 }else
539 ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
540 - fifo_size(&ost->fifo, ost->fifo.rptr)/(ost->st->codec.channels * 2); //FIXME wrong
541
542 if (ost->audio_resample) {
543 buftmp = audio_buf;
544 size_out = audio_resample(ost->resample,
545 (short *)buftmp, (short *)buf,
546 size / (ist->st->codec.channels * 2));
547 size_out = size_out * enc->channels * 2;
548 } else {
549 buftmp = buf;
550 size_out = size;
551 }
552
553 /* now encode as many frames as possible */
554 if (enc->frame_size > 1) {
555 /* output resampled raw samples */
556 fifo_write(&ost->fifo, buftmp, size_out,
557 &ost->fifo.wptr);
558
559 frame_bytes = enc->frame_size * 2 * enc->channels;
560
561 while (fifo_read(&ost->fifo, audio_buf, frame_bytes,
562 &ost->fifo.rptr) == 0) {
563 AVPacket pkt;
564 av_init_packet(&pkt);
565
566 ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
567 (short *)audio_buf);
568 audio_size += ret;
569 pkt.stream_index= ost->index;
570 pkt.data= audio_out;
571 pkt.size= ret;
572 if(enc->coded_frame)
573 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
574 pkt.flags |= PKT_FLAG_KEY;
575 av_interleaved_write_frame(s, &pkt);
576
577 ost->sync_opts += enc->frame_size;
578 }
579 } else {
580 AVPacket pkt;
581 av_init_packet(&pkt);
582
583 ost->sync_opts += size_out / (2 * enc->channels);
584
585 /* output a pcm frame */
586 /* XXX: change encoding codec API to avoid this ? */
587 switch(enc->codec->id) {
588 case CODEC_ID_PCM_S16LE:
589 case CODEC_ID_PCM_S16BE:
590 case CODEC_ID_PCM_U16LE:
591 case CODEC_ID_PCM_U16BE:
592 break;
593 default:
594 size_out = size_out >> 1;
595 break;
596 }
597 ret = avcodec_encode_audio(enc, audio_out, size_out,
598 (short *)buftmp);
599 audio_size += ret;
600 pkt.stream_index= ost->index;
601 pkt.data= audio_out;
602 pkt.size= ret;
603 if(enc->coded_frame)
604 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
605 pkt.flags |= PKT_FLAG_KEY;
606 av_interleaved_write_frame(s, &pkt);
607 }
608 }
609
610 static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void **bufp)
611 {
612 AVCodecContext *dec;
613 AVPicture *picture2;
614 AVPicture picture_tmp;
615 uint8_t *buf = 0;
616
617 dec = &ist->st->codec;
618
619 /* deinterlace : must be done before any resize */
620 if (do_deinterlace || using_vhook) {
621 int size;
622
623 /* create temporary picture */
624 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
625 buf = av_malloc(size);
626 if (!buf)
627 return;
628
629 picture2 = &picture_tmp;
630 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
631
632 if (do_deinterlace){
633 if(avpicture_deinterlace(picture2, picture,
634 dec->pix_fmt, dec->width, dec->height) < 0) {
635 /* if error, do not deinterlace */
636 av_free(buf);
637 buf = NULL;
638 picture2 = picture;
639 }
640 } else {
641 if (img_convert(picture2, dec->pix_fmt, picture,
642 dec->pix_fmt, dec->width, dec->height) < 0) {
643 /* if error, do not copy */
644 av_free(buf);
645 buf = NULL;
646 picture2 = picture;
647 }
648 }
649 } else {
650 picture2 = picture;
651 }
652
653 frame_hook_process(picture2, dec->pix_fmt, dec->width, dec->height);
654
655 if (picture != picture2)
656 *picture = *picture2;
657 *bufp = buf;
658 }
659
660 /* we begin to correct av delay at this threshold */
661 #define AV_DELAY_MAX 0.100
662
663
664 /* Expects img to be yuv420 */
665 static void fill_pad_region(AVPicture* img, int height, int width,
666 int padtop, int padbottom, int padleft, int padright, int *color) {
667
668 int i, y, shift;
669 uint8_t *optr;
670
671 for (i = 0; i < 3; i++) {
672 shift = (i == 0) ? 0 : 1;
673
674 if (padtop || padleft) {
675 memset(img->data[i], color[i], (((img->linesize[i] * padtop) +
676 padleft) >> shift));
677 }
678
679 if (padleft || padright) {
680 optr = img->data[i] + (img->linesize[i] * (padtop >> shift)) +
681 (img->linesize[i] - (padright >> shift));
682
683 for (y = 0; y < ((height - (padtop + padbottom) - 1) >> shift); y++) {
684 memset(optr, color[i], (padleft + padright) >> shift);
685 optr += img->linesize[i];
686 }
687 }
688
689 if (padbottom || padright) {
690 optr = img->data[i] + (((img->linesize[i] * (height - padbottom)) - padright) >> shift);
691 memset(optr, color[i], (((img->linesize[i] * padbottom) + padright) >> shift));
692 }
693 }
694 }
695
696 static int bit_buffer_size= 1024*256;
697 static uint8_t *bit_buffer= NULL;
698
699 static void do_video_out(AVFormatContext *s,
700 AVOutputStream *ost,
701 AVInputStream *ist,
702 AVFrame *in_picture,
703 int *frame_size)
704 {
705 int nb_frames, i, ret;
706 AVFrame *final_picture, *formatted_picture;
707 AVFrame picture_format_temp, picture_crop_temp;
708 uint8_t *buf = NULL, *buf1 = NULL;
709 AVCodecContext *enc, *dec;
710 enum PixelFormat target_pixfmt;
711
712 avcodec_get_frame_defaults(&picture_format_temp);
713 avcodec_get_frame_defaults(&picture_crop_temp);
714
715 enc = &ost->st->codec;
716 dec = &ist->st->codec;
717
718 /* by default, we output a single frame */
719 nb_frames = 1;
720
721 *frame_size = 0;
722
723 if(video_sync_method){
724 double vdelta;
725 vdelta = get_sync_ipts(ost) / av_q2d(enc->time_base) - ost->sync_opts;
726 //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
727 if (vdelta < -1.1)
728 nb_frames = 0;
729 else if (vdelta > 1.1)
730 nb_frames = lrintf(vdelta);
731 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%lld, ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, ost->sync_ipts, nb_frames);
732 if (nb_frames == 0){
733 ++nb_frames_drop;
734 if (verbose>2)
735 fprintf(stderr, "*** drop!\n");
736 }else if (nb_frames > 1) {
737 nb_frames_dup += nb_frames;
738 if (verbose>2)
739 fprintf(stderr, "*** %d dup!\n", nb_frames-1);
740 }
741 }else
742 ost->sync_opts= lrintf(get_sync_ipts(ost) / av_q2d(enc->time_base));
743
744 nb_frames= FFMIN(nb_frames, max_frames[CODEC_TYPE_VIDEO] - ost->frame_number);
745 if (nb_frames <= 0)
746 return;
747
748 /* convert pixel format if needed */
749 target_pixfmt = ost->video_resample || ost->video_pad
750 ? PIX_FMT_YUV420P : enc->pix_fmt;
751 if (dec->pix_fmt != target_pixfmt) {
752 int size;
753
754 /* create temporary picture */
755 size = avpicture_get_size(target_pixfmt, dec->width, dec->height);
756 buf = av_malloc(size);
757 if (!buf)
758 return;
759 formatted_picture = &picture_format_temp;
760 avpicture_fill((AVPicture*)formatted_picture, buf, target_pixfmt, dec->width, dec->height);
761
762 if (img_convert((AVPicture*)formatted_picture, target_pixfmt,
763 (AVPicture *)in_picture, dec->pix_fmt,
764 dec->width, dec->height) < 0) {
765
766 if (verbose >= 0)
767 fprintf(stderr, "pixel format conversion not handled\n");
768
769 goto the_end;
770 }
771 } else {
772 formatted_picture = in_picture;
773 }
774
775 /* XXX: resampling could be done before raw format conversion in
776 some cases to go faster */
777 /* XXX: only works for YUV420P */
778 if (ost->video_resample) {
779 final_picture = &ost->pict_tmp;
780 img_resample(ost->img_resample_ctx, (AVPicture*)final_picture, (AVPicture*)formatted_picture);
781
782 if (ost->padtop || ost->padbottom || ost->padleft || ost->padright) {
783 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
784 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
785 padcolor);
786 }
787
788 if (enc->pix_fmt != PIX_FMT_YUV420P) {
789 int size;
790
791 av_free(buf);
792 /* create temporary picture */
793 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
794 buf = av_malloc(size);
795 if (!buf)
796 return;
797 final_picture = &picture_format_temp;
798 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
799
800 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
801 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
802 enc->width, enc->height) < 0) {
803
804 if (verbose >= 0)
805 fprintf(stderr, "pixel format conversion not handled\n");
806
807 goto the_end;
808 }
809 }
810 } else if (ost->video_crop) {
811 picture_crop_temp.data[0] = formatted_picture->data[0] +
812 (ost->topBand * formatted_picture->linesize[0]) + ost->leftBand;
813
814 picture_crop_temp.data[1] = formatted_picture->data[1] +
815 ((ost->topBand >> 1) * formatted_picture->linesize[1]) +
816 (ost->leftBand >> 1);
817
818 picture_crop_temp.data[2] = formatted_picture->data[2] +
819 ((ost->topBand >> 1) * formatted_picture->linesize[2]) +
820 (ost->leftBand >> 1);
821
822 picture_crop_temp.linesize[0] = formatted_picture->linesize[0];
823 picture_crop_temp.linesize[1] = formatted_picture->linesize[1];
824 picture_crop_temp.linesize[2] = formatted_picture->linesize[2];
825 final_picture = &picture_crop_temp;
826 } else if (ost->video_pad) {
827 final_picture = &ost->pict_tmp;
828
829 for (i = 0; i < 3; i++) {
830 uint8_t *optr, *iptr;
831 int shift = (i == 0) ? 0 : 1;
832 int y, yheight;
833
834 /* set offset to start writing image into */
835 optr = final_picture->data[i] + (((final_picture->linesize[i] *
836 ost->padtop) + ost->padleft) >> shift);
837 iptr = formatted_picture->data[i];
838
839 yheight = (enc->height - ost->padtop - ost->padbottom) >> shift;
840 for (y = 0; y < yheight; y++) {
841 /* copy unpadded image row into padded image row */
842 memcpy(optr, iptr, formatted_picture->linesize[i]);
843 optr += final_picture->linesize[i];
844 iptr += formatted_picture->linesize[i];
845 }
846 }
847
848 fill_pad_region((AVPicture*)final_picture, enc->height, enc->width,
849 ost->padtop, ost->padbottom, ost->padleft, ost->padright,
850 padcolor);
851
852 if (enc->pix_fmt != PIX_FMT_YUV420P) {
853 int size;
854
855 av_free(buf);
856 /* create temporary picture */
857 size = avpicture_get_size(enc->pix_fmt, enc->width, enc->height);
858 buf = av_malloc(size);
859 if (!buf)
860 return;
861 final_picture = &picture_format_temp;
862 avpicture_fill((AVPicture*)final_picture, buf, enc->pix_fmt, enc->width, enc->height);
863
864 if (img_convert((AVPicture*)final_picture, enc->pix_fmt,
865 (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
866 enc->width, enc->height) < 0) {
867
868 if (verbose >= 0)
869 fprintf(stderr, "pixel format conversion not handled\n");
870
871 goto the_end;
872 }
873 }
874 } else {
875 final_picture = formatted_picture;
876 }
877 /* duplicates frame if needed */
878 for(i=0;i<nb_frames;i++) {
879 AVPacket pkt;
880 av_init_packet(&pkt);
881 pkt.stream_index= ost->index;
882
883 if (s->oformat->flags & AVFMT_RAWPICTURE) {
884 /* raw pictures are written as AVPicture structure to
885 avoid any copies. We support temorarily the older
886 method. */
887 AVFrame* old_frame = enc->coded_frame;
888 enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
889 pkt.data= (uint8_t *)final_picture;
890 pkt.size= sizeof(AVPicture);
891 if(dec->coded_frame)
892 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
893 if(dec->coded_frame && dec->coded_frame->key_frame)
894 pkt.flags |= PKT_FLAG_KEY;
895
896 av_interleaved_write_frame(s, &pkt);
897 enc->coded_frame = old_frame;
898 } else {
899 AVFrame big_picture;
900
901 big_picture= *final_picture;
902 /* better than nothing: use input picture interlaced
903 settings */
904 big_picture.interlaced_frame = in_picture->interlaced_frame;
905 if(do_interlace_me || do_interlace_dct){
906 if(top_field_first == -1)
907 big_picture.top_field_first = in_picture->top_field_first;
908 else
909 big_picture.top_field_first = top_field_first;
910 }
911
912 /* handles sameq here. This is not correct because it may
913 not be a global option */
914 if (same_quality) {
915 big_picture.quality = ist->st->quality;
916 }else
917 big_picture.quality = ost->st->quality;
918 if(!me_threshold)
919 big_picture.pict_type = 0;
920 // big_picture.pts = AV_NOPTS_VALUE;
921 big_picture.pts= ost->sync_opts;
922 // big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
923 //av_log(NULL, AV_LOG_DEBUG, "%lld -> encoder\n", ost->sync_opts);
924 ret = avcodec_encode_video(enc,
925 bit_buffer, bit_buffer_size,
926 &big_picture);
927 //enc->frame_number = enc->real_pict_num;
928 if(ret>0){
929 pkt.data= bit_buffer;
930 pkt.size= ret;
931 if(enc->coded_frame)
932 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
933 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %lld/%lld\n",
934 pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
935 pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
936
937 if(enc->coded_frame && enc->coded_frame->key_frame)
938 pkt.flags |= PKT_FLAG_KEY;
939 av_interleaved_write_frame(s, &pkt);
940 *frame_size = ret;
941 //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d",
942 // enc->frame_number-1, enc->real_pict_num, ret,
943 // enc->pict_type);
944 /* if two pass, output log */
945 if (ost->logfile && enc->stats_out) {
946 fprintf(ost->logfile, "%s", enc->stats_out);
947 }
948 }
949 }
950 ost->sync_opts++;
951 ost->frame_number++;
952 }
953 the_end:
954 av_free(buf);
955 av_free(buf1);
956 }
957
958 static double psnr(double d){
959 if(d==0) return INFINITY;
960 return -10.0*log(d)/log(10.0);
961 }
962
963 static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
964 int frame_size)
965 {
966 static FILE *fvstats=NULL;
967 char filename[40];
968 time_t today2;
969 struct tm *today;
970 AVCodecContext *enc;
971 int frame_number;
972 int64_t ti;
973 double ti1, bitrate, avg_bitrate;
974
975 if (!fvstats) {
976 today2 = time(NULL);
977 today = localtime(&today2);
978 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour,
979 today->tm_min,
980 today->tm_sec);
981 fvstats = fopen(filename,"w");
982 if (!fvstats) {
983 perror("fopen");
984 exit(1);
985 }
986 }
987
988 ti = MAXINT64;
989 enc = &ost->st->codec;
990 if (enc->codec_type == CODEC_TYPE_VIDEO) {
991 frame_number = ost->frame_number;
992 fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
993 if (enc->flags&CODEC_FLAG_PSNR)
994 fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
995
996 fprintf(fvstats,"f_size= %6d ", frame_size);
997 /* compute pts value */
998 ti1 = ost->sync_opts * av_q2d(enc->time_base);
999 if (ti1 < 0.01)
1000 ti1 = 0.01;
1001
1002 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1003 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1004 fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1005 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1006 fprintf(fvstats,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
1007 }
1008 }
1009
1010 static void print_report(AVFormatContext **output_files,
1011 AVOutputStream **ost_table, int nb_ostreams,
1012 int is_last_report)
1013 {
1014 char buf[1024];
1015 AVOutputStream *ost;
1016 AVFormatContext *oc, *os;
1017 int64_t total_size;
1018 AVCodecContext *enc;
1019 int frame_number, vid, i;
1020 double bitrate, ti1, pts;
1021 static int64_t last_time = -1;
1022
1023 if (!is_last_report) {
1024 int64_t cur_time;
1025 /* display the report every 0.5 seconds */
1026 cur_time = av_gettime();
1027 if (last_time == -1) {
1028 last_time = cur_time;
1029 return;
1030 }
1031 if ((cur_time - last_time) < 500000)
1032 return;
1033 last_time = cur_time;
1034 }
1035
1036
1037 oc = output_files[0];
1038
1039 total_size = url_ftell(&oc->pb);
1040
1041 buf[0] = '\0';
1042 ti1 = 1e10;
1043 vid = 0;
1044 for(i=0;i<nb_ostreams;i++) {
1045 ost = ost_table[i];
1046 os = output_files[ost->file_index];
1047 enc = &ost->st->codec;
1048 if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1049 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ",
1050 enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1051 }
1052 if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1053 frame_number = ost->frame_number;
1054 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d q=%2.1f ",
1055 frame_number, enc->coded_frame ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : 0);
1056 if(is_last_report)
1057 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1058 if (enc->flags&CODEC_FLAG_PSNR){
1059 int j;
1060 double error, error_sum=0;
1061 double scale, scale_sum=0;
1062 char type[3]= {'Y','U','V'};
1063 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1064 for(j=0; j<3; j++){
1065 if(is_last_report){
1066 error= enc->error[j];
1067 scale= enc->width*enc->height*255.0*255.0*frame_number;
1068 }else{
1069 error= enc->coded_frame->error[j];
1070 scale= enc->width*enc->height*255.0*255.0;
1071 }
1072 if(j) scale/=4;
1073 error_sum += error;
1074 scale_sum += scale;
1075 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
1076 }
1077 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
1078 }
1079 vid = 1;
1080 }
1081 /* compute min output value */
1082 pts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den;
1083 if ((pts < ti1) && (pts > 0))
1084 ti1 = pts;
1085 }
1086 if (ti1 < 0.01)
1087 ti1 = 0.01;
1088
1089 if (verbose || is_last_report) {
1090 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1091
1092 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1093 "size=%8.0fkB time=%0.1f bitrate=%6.1fkbits/s",
1094 (double)total_size / 1024, ti1, bitrate);
1095
1096 if (verbose > 1)
1097 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1098 nb_frames_dup, nb_frames_drop);
1099
1100 if (verbose >= 0)
1101 fprintf(stderr, "%s \r", buf);
1102
1103 fflush(stderr);
1104 }
1105
1106 if (is_last_report && verbose >= 0){
1107 int64_t raw= audio_size + video_size + extra_size;
1108 fprintf(stderr, "\n");
1109 fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1110 video_size/1024.0,
1111 audio_size/1024.0,
1112 extra_size/1024.0,
1113 100.0*(total_size - raw)/raw
1114 );
1115 }
1116 }
1117
1118 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1119 static int output_packet(AVInputStream *ist, int ist_index,
1120 AVOutputStream **ost_table, int nb_ostreams,
1121 const AVPacket *pkt)
1122 {
1123 AVFormatContext *os;
1124 AVOutputStream *ost;
1125 uint8_t *ptr;
1126 int len, ret, i;
1127 uint8_t *data_buf;
1128 int data_size, got_picture;
1129 AVFrame picture;
1130 void *buffer_to_free;
1131 static int samples_size= 0;
1132 static short *samples= NULL;
1133
1134 if(!pkt){
1135 ist->pts= ist->next_pts; // needed for last packet if vsync=0
1136 } else if (pkt->dts != AV_NOPTS_VALUE) { //FIXME seems redundant, as libavformat does this too
1137 ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1138 } else {
1139 // assert(ist->pts == ist->next_pts);
1140 }
1141
1142 if (pkt == NULL) {
1143 /* EOF handling */
1144 ptr = NULL;
1145 len = 0;
1146 goto handle_eof;
1147 }
1148
1149 len = pkt->size;
1150 ptr = pkt->data;
1151 while (len > 0) {
1152 handle_eof:
1153 /* decode the packet if needed */
1154 data_buf = NULL; /* fail safe */
1155 data_size = 0;
1156 if (ist->decoding_needed) {
1157 switch(ist->st->codec.codec_type) {
1158 case CODEC_TYPE_AUDIO:{
1159 if(pkt)
1160 samples= av_fast_realloc(samples, &samples_size, FFMAX(pkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE));
1161 /* XXX: could avoid copy if PCM 16 bits with same
1162 endianness as CPU */
1163 ret = avcodec_decode_audio(&ist->st->codec, samples, &data_size,
1164 ptr, len);
1165 if (ret < 0)
1166 goto fail_decode;
1167 ptr += ret;
1168 len -= ret;
1169 /* Some bug in mpeg audio decoder gives */
1170 /* data_size < 0, it seems they are overflows */
1171 if (data_size <= 0) {
1172 /* no audio frame */
1173 continue;
1174 }
1175 data_buf = (uint8_t *)samples;
1176 ist->next_pts += ((int64_t)AV_TIME_BASE/2 * data_size) /
1177 (ist->st->codec.sample_rate * ist->st->codec.channels);
1178 break;}
1179 case CODEC_TYPE_VIDEO:
1180 data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
1181 /* XXX: allocate picture correctly */
1182 avcodec_get_frame_defaults(&picture);
1183
1184 ret = avcodec_decode_video(&ist->st->codec,
1185 &picture, &got_picture, ptr, len);
1186 ist->st->quality= picture.quality;
1187 if (ret < 0)
1188 goto fail_decode;
1189 if (!got_picture) {
1190 /* no picture yet */
1191 goto discard_packet;
1192 }
1193 if (ist->st->codec.time_base.num != 0) {
1194 ist->next_pts += ((int64_t)AV_TIME_BASE *
1195 ist->st->codec.time_base.num) /
1196 ist->st->codec.time_base.den;
1197 }
1198 len = 0;
1199 break;
1200 default:
1201 goto fail_decode;
1202 }
1203 } else {
1204 switch(ist->st->codec.codec_type) {
1205 case CODEC_TYPE_AUDIO:
1206 ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec.frame_size) /
1207 (ist->st->codec.sample_rate * ist->st->codec.channels);
1208 break;
1209 case CODEC_TYPE_VIDEO:
1210 if (ist->st->codec.time_base.num != 0) {
1211 ist->next_pts += ((int64_t)AV_TIME_BASE *
1212 ist->st->codec.time_base.num) /
1213 ist->st->codec.time_base.den;
1214 }
1215 break;
1216 }
1217 data_buf = ptr;
1218 data_size = len;
1219 ret = len;
1220 len = 0;
1221 }
1222
1223 buffer_to_free = NULL;
1224 if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO) {
1225 pre_process_video_frame(ist, (AVPicture *)&picture,
1226 &buffer_to_free);
1227 }
1228
1229 // preprocess audio (volume)
1230 if (ist->st->codec.codec_type == CODEC_TYPE_AUDIO) {
1231 if (audio_volume != 256) {
1232 short *volp;
1233 volp = samples;
1234 for(i=0;i<(data_size / sizeof(short));i++) {
1235 int v = ((*volp) * audio_volume + 128) >> 8;
1236 if (v < -32768) v = -32768;
1237 if (v > 32767) v = 32767;
1238 *volp++ = v;
1239 }
1240 }
1241 }
1242
1243 /* frame rate emulation */
1244 if (ist->st->codec.rate_emu) {
1245 int64_t pts = av_rescale((int64_t) ist->frame * ist->st->codec.time_base.num, 1000000, ist->st->codec.time_base.den);
1246 int64_t now = av_gettime() - ist->start;
1247 if (pts > now)
1248 usleep(pts - now);
1249
1250 ist->frame++;
1251 }
1252
1253 #if 0
1254 /* mpeg PTS deordering : if it is a P or I frame, the PTS
1255 is the one of the next displayed one */
1256 /* XXX: add mpeg4 too ? */
1257 if (ist->st->codec.codec_id == CODEC_ID_MPEG1VIDEO) {
1258 if (ist->st->codec.pict_type != B_TYPE) {
1259 int64_t tmp;
1260 tmp = ist->last_ip_pts;
1261 ist->last_ip_pts = ist->frac_pts.val;
1262 ist->frac_pts.val = tmp;
1263 }
1264 }
1265 #endif
1266 /* if output time reached then transcode raw format,
1267 encode packets and output them */
1268 if (start_time == 0 || ist->pts >= start_time)
1269 for(i=0;i<nb_ostreams;i++) {
1270 int frame_size;
1271
1272 ost = ost_table[i];
1273 if (ost->source_index == ist_index) {
1274 os = output_files[ost->file_index];
1275
1276 #if 0
1277 printf("%d: got pts=%0.3f %0.3f\n", i,
1278 (double)pkt->pts / AV_TIME_BASE,
1279 ((double)ist->pts / AV_TIME_BASE) -
1280 ((double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den));
1281 #endif
1282 /* set the input output pts pairs */
1283 //ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/ AV_TIME_BASE;
1284
1285 if (ost->encoding_needed) {
1286 switch(ost->st->codec.codec_type) {
1287 case CODEC_TYPE_AUDIO:
1288 do_audio_out(os, ost, ist, data_buf, data_size);
1289 break;
1290 case CODEC_TYPE_VIDEO:
1291 do_video_out(os, ost, ist, &picture, &frame_size);
1292 video_size += frame_size;
1293 if (do_vstats && frame_size)
1294 do_video_stats(os, ost, frame_size);
1295 break;
1296 default:
1297 av_abort();
1298 }
1299 } else {
1300 AVFrame avframe; //FIXME/XXX remove this
1301 AVPacket opkt;
1302 av_init_packet(&opkt);
1303
1304 /* no reencoding needed : output the packet directly */
1305 /* force the input stream PTS */
1306
1307 avcodec_get_frame_defaults(&avframe);
1308 ost->st->codec.coded_frame= &avframe;
1309 avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
1310
1311 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO)
1312 audio_size += data_size;
1313 else if (ost->st->codec.codec_type == CODEC_TYPE_VIDEO) {
1314 video_size += data_size;
1315 ost->sync_opts++;
1316 }
1317
1318 opkt.stream_index= ost->index;
1319 opkt.data= data_buf;
1320 opkt.size= data_size;
1321 if(pkt->pts != AV_NOPTS_VALUE)
1322 opkt.pts= av_rescale_q(av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q) + input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ost->st->time_base);
1323 else
1324 opkt.pts= AV_NOPTS_VALUE;
1325 opkt.dts= av_rescale_q(av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q) + input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ost->st->time_base);
1326 opkt.flags= pkt->flags;
1327
1328 av_interleaved_write_frame(os, &opkt);
1329 ost->st->codec.frame_number++;
1330 ost->frame_number++;
1331 }
1332 }
1333 }
1334 av_free(buffer_to_free);
1335 }
1336 discard_packet:
1337 if (pkt == NULL) {
1338 /* EOF handling */
1339
1340 for(i=0;i<nb_ostreams;i++) {
1341 ost = ost_table[i];
1342 if (ost->source_index == ist_index) {
1343 AVCodecContext *enc= &ost->st->codec;
1344 os = output_files[ost->file_index];
1345
1346 if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO && enc->frame_size <=1)
1347 continue;
1348 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
1349 continue;
1350
1351 if (ost->encoding_needed) {
1352 for(;;) {
1353 AVPacket pkt;
1354 av_init_packet(&pkt);
1355 pkt.stream_index= ost->index;
1356
1357 switch(ost->st->codec.codec_type) {
1358 case CODEC_TYPE_AUDIO:
1359 ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
1360 audio_size += ret;
1361 pkt.flags |= PKT_FLAG_KEY;
1362 break;
1363 case CODEC_TYPE_VIDEO:
1364 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
1365 video_size += ret;
1366 if(enc->coded_frame && enc->coded_frame->key_frame)
1367 pkt.flags |= PKT_FLAG_KEY;
1368 if (ost->logfile && enc->stats_out) {
1369 fprintf(ost->logfile, "%s", enc->stats_out);
1370 }
1371 break;
1372 default:
1373 ret=-1;
1374 }
1375
1376 if(ret<=0)
1377 break;
1378 pkt.data= bit_buffer;
1379 pkt.size= ret;
1380 if(enc->coded_frame)
1381 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1382 av_interleaved_write_frame(os, &pkt);
1383 }
1384 }
1385 }
1386 }
1387 }
1388
1389 return 0;
1390 fail_decode:
1391 return -1;
1392 }
1393
1394
1395 /*
1396 * The following code is the main loop of the file converter
1397 */
1398 static int av_encode(AVFormatContext **output_files,
1399 int nb_output_files,
1400 AVFormatContext **input_files,
1401 int nb_input_files,
1402 AVStreamMap *stream_maps, int nb_stream_maps)
1403 {
1404 int ret, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
1405 AVFormatContext *is, *os;
1406 AVCodecContext *codec, *icodec;
1407 AVOutputStream *ost, **ost_table = NULL;
1408 AVInputStream *ist, **ist_table = NULL;
1409 AVInputFile *file_table;
1410 AVFormatContext *stream_no_data;
1411 int key;
1412
1413 file_table= (AVInputFile*) av_mallocz(nb_input_files * sizeof(AVInputFile));
1414 if (!file_table)
1415 goto fail;
1416
1417 /* input stream init */
1418 j = 0;
1419 for(i=0;i<nb_input_files;i++) {
1420 is = input_files[i];
1421 file_table[i].ist_index = j;
1422 file_table[i].nb_streams = is->nb_streams;
1423 j += is->nb_streams;
1424 }
1425 nb_istreams = j;
1426
1427 ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
1428 if (!ist_table)
1429 goto fail;
1430
1431 for(i=0;i<nb_istreams;i++) {
1432 ist = av_mallocz(sizeof(AVInputStream));
1433 if (!ist)
1434 goto fail;
1435 ist_table[i] = ist;
1436 }
1437 j = 0;
1438 for(i=0;i<nb_input_files;i++) {
1439 is = input_files[i];
1440 for(k=0;k<is->nb_streams;k++) {
1441 ist = ist_table[j++];
1442 ist->st = is->streams[k];
1443 ist->file_index = i;
1444 ist->index = k;
1445 ist->discard = 1; /* the stream is discarded by default
1446 (changed later) */
1447
1448 if (ist->st->codec.rate_emu) {
1449 ist->start = av_gettime();
1450 ist->frame = 0;
1451 }
1452 }
1453 }
1454
1455 /* output stream init */
1456 nb_ostreams = 0;
1457 for(i=0;i<nb_output_files;i++) {
1458 os = output_files[i];
1459 nb_ostreams += os->nb_streams;
1460 }
1461 if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
1462 fprintf(stderr, "Number of stream maps must match number of output streams\n");
1463 exit(1);
1464 }
1465
1466 /* Sanity check the mapping args -- do the input files & streams exist? */
1467 for(i=0;i<nb_stream_maps;i++) {
1468 int fi = stream_maps[i].file_index;
1469 int si = stream_maps[i].stream_index;
1470
1471 if (fi < 0 || fi > nb_input_files - 1 ||
1472 si < 0 || si > file_table[fi].nb_streams - 1) {
1473 fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
1474 exit(1);
1475 }
1476 fi = stream_maps[i].sync_file_index;
1477 si = stream_maps[i].sync_stream_index;
1478 if (fi < 0 || fi > nb_input_files - 1 ||
1479 si < 0 || si > file_table[fi].nb_streams - 1) {
1480 fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si);
1481 exit(1);
1482 }
1483 }
1484
1485 ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
1486 if (!ost_table)
1487 goto fail;
1488 for(i=0;i<nb_ostreams;i++) {
1489 ost = av_mallocz(sizeof(AVOutputStream));
1490 if (!ost)
1491 goto fail;
1492 ost_table[i] = ost;
1493 }
1494
1495 n = 0;
1496 for(k=0;k<nb_output_files;k++) {
1497 os = output_files[k];
1498 for(i=0;i<os->nb_streams;i++) {
1499 int found;
1500 ost = ost_table[n++];
1501 ost->file_index = k;
1502 ost->index = i;
1503 ost->st = os->streams[i];
1504 if (nb_stream_maps > 0) {
1505 ost->source_index = file_table[stream_maps[n-1].file_index].ist_index +
1506 stream_maps[n-1].stream_index;
1507
1508 /* Sanity check that the stream types match */
1509 if (ist_table[ost->source_index]->st->codec.codec_type != ost->st->codec.codec_type) {
1510 fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
1511 stream_maps[n-1].file_index, stream_maps[n-1].stream_index,
1512 ost->file_index, ost->index);
1513 exit(1);
1514 }
1515
1516 } else {
1517 /* get corresponding input stream index : we select the first one with the right type */
1518 found = 0;
1519 for(j=0;j<nb_istreams;j++) {
1520 ist = ist_table[j];
1521 if (ist->discard &&
1522 ist->st->codec.codec_type == ost->st->codec.codec_type) {
1523 ost->source_index = j;
1524 found = 1;
1525 break;
1526 }
1527 }
1528
1529 if (!found) {
1530 /* try again and reuse existing stream */
1531 for(j=0;j<nb_istreams;j++) {
1532 ist = ist_table[j];
1533 if (ist->st->codec.codec_type == ost->st->codec.codec_type) {
1534 ost->source_index = j;
1535 found = 1;
1536 }
1537 }
1538 if (!found) {
1539 fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
1540 ost->file_index, ost->index);
1541 exit(1);
1542 }
1543 }
1544 }
1545 ist = ist_table[ost->source_index];
1546 ist->discard = 0;
1547 ost->sync_ist = (nb_stream_maps > 0) ?
1548 ist_table[file_table[stream_maps[n-1].sync_file_index].ist_index +
1549 stream_maps[n-1].sync_stream_index] : ist;
1550 }
1551 }
1552
1553 /* for each output stream, we compute the right encoding parameters */
1554 for(i=0;i<nb_ostreams;i++) {
1555 ost = ost_table[i];
1556 ist = ist_table[ost->source_index];
1557
1558 codec = &ost->st->codec;
1559 icodec = &ist->st->codec;
1560
1561 if (ost->st->stream_copy) {
1562 /* if stream_copy is selected, no need to decode or encode */
1563 codec->codec_id = icodec->codec_id;
1564 codec->codec_type = icodec->codec_type;
1565 if(!codec->codec_tag) codec->codec_tag = icodec->codec_tag;
1566 codec->bit_rate = icodec->bit_rate;
1567 codec->extradata= icodec->extradata;
1568 codec->extradata_size= icodec->extradata_size;
1569 switch(codec->codec_type) {
1570 case CODEC_TYPE_AUDIO:
1571 codec->sample_rate = icodec->sample_rate;
1572 codec->channels = icodec->channels;
1573 codec->frame_size = icodec->frame_size;
1574 codec->block_align= icodec->block_align;
1575 break;
1576 case CODEC_TYPE_VIDEO:
1577 codec->time_base = icodec->time_base;
1578 codec->width = icodec->width;
1579 codec->height = icodec->height;
1580 codec->has_b_frames = icodec->has_b_frames;
1581 break;
1582 default:
1583 av_abort();
1584 }
1585 } else {
1586 switch(codec->codec_type) {
1587 case CODEC_TYPE_AUDIO:
1588 if (fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE))
1589 goto fail;
1590
1591 if (codec->channels == icodec->channels &&
1592 codec->sample_rate == icodec->sample_rate) {
1593 ost->audio_resample = 0;
1594 } else {
1595 if (codec->channels != icodec->channels &&
1596 (icodec->codec_id == CODEC_ID_AC3 ||
1597 icodec->codec_id == CODEC_ID_DTS)) {
1598 /* Special case for 5:1 AC3 and DTS input */
1599 /* and mono or stereo output */
1600 /* Request specific number of channels */
1601 icodec->channels = codec->channels;
1602 if (codec->sample_rate == icodec->sample_rate)
1603 ost->audio_resample = 0;
1604 else {
1605 ost->audio_resample = 1;
1606 }
1607 } else {
1608 ost->audio_resample = 1;
1609 }
1610 }
1611 if(audio_sync_method>1)
1612 ost->audio_resample = 1;
1613
1614 if(ost->audio_resample){
1615 ost->resample = audio_resample_init(codec->channels, icodec->channels,
1616 codec->sample_rate, icodec->sample_rate);
1617 if(!ost->resample){
1618 printf("Can't resample. Aborting.\n");
1619 av_abort();
1620 }
1621 }
1622 ist->decoding_needed = 1;
1623 ost->encoding_needed = 1;
1624 break;
1625 case CODEC_TYPE_VIDEO:
1626 if (codec->width == icodec->width &&
1627 codec->height == icodec->height &&
1628 frame_topBand == 0 &&
1629 frame_bottomBand == 0 &&
1630 frame_leftBand == 0 &&
1631 frame_rightBand == 0 &&
1632 frame_padtop == 0 &&
1633 frame_padbottom == 0 &&
1634 frame_padleft == 0 &&
1635 frame_padright == 0)
1636 {
1637 ost->video_resample = 0;
1638 ost->video_crop = 0;
1639 ost->video_pad = 0;
1640 } else if ((codec->width == icodec->width -
1641 (frame_leftBand + frame_rightBand)) &&
1642 (codec->height == icodec->height -
1643 (frame_topBand + frame_bottomBand)))
1644 {
1645 ost->video_resample = 0;
1646 ost->video_crop = 1;
1647 ost->topBand = frame_topBand;
1648 ost->leftBand = frame_leftBand;
1649 } else if ((codec->width == icodec->width +
1650 (frame_padleft + frame_padright)) &&
1651 (codec->height == icodec->height +
1652 (frame_padtop + frame_padbottom))) {
1653 ost->video_resample = 0;
1654 ost->video_crop = 0;
1655 ost->video_pad = 1;
1656 ost->padtop = frame_padtop;
1657 ost->padleft = frame_padleft;
1658 ost->padbottom = frame_padbottom;
1659 ost->padright = frame_padright;
1660 avcodec_get_frame_defaults(&ost->pict_tmp);
1661 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1662 codec->width, codec->height ) )
1663 goto fail;
1664 } else {
1665 ost->video_resample = 1;
1666 ost->video_crop = 0; // cropping is handled as part of resample
1667 avcodec_get_frame_defaults(&ost->pict_tmp);
1668 if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, PIX_FMT_YUV420P,
1669 codec->width, codec->height ) )
1670 goto fail;
1671
1672 ost->img_resample_ctx = img_resample_full_init(
1673 ost->st->codec.width, ost->st->codec.height,
1674 ist->st->codec.width, ist->st->codec.height,
1675 frame_topBand, frame_bottomBand,
1676 frame_leftBand, frame_rightBand,
1677 frame_padtop, frame_padbottom,
1678 frame_padleft, frame_padright);
1679
1680 ost->padtop = frame_padtop;
1681 ost->padleft = frame_padleft;
1682 ost->padbottom = frame_padbottom;
1683 ost->padright = frame_padright;
1684
1685 }
1686 ost->encoding_needed = 1;
1687 ist->decoding_needed = 1;
1688 break;
1689 default:
1690 av_abort();
1691 }
1692 /* two pass mode */
1693 if (ost->encoding_needed &&
1694 (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1695 char logfilename[1024];
1696 FILE *f;
1697 int size;
1698 char *logbuffer;
1699
1700 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1701 pass_logfilename ?
1702 pass_logfilename : DEFAULT_PASS_LOGFILENAME, i);
1703 if (codec->flags & CODEC_FLAG_PASS1) {
1704 f = fopen(logfilename, "w");
1705 if (!f) {
1706 perror(logfilename);
1707 exit(1);
1708 }
1709 ost->logfile = f;
1710 } else {
1711 /* read the log file */
1712 f = fopen(logfilename, "r");
1713 if (!f) {
1714 perror(logfilename);
1715 exit(1);
1716 }
1717 fseek(f, 0, SEEK_END);
1718 size = ftell(f);
1719 fseek(f, 0, SEEK_SET);
1720 logbuffer = av_malloc(size + 1);
1721 if (!logbuffer) {
1722 fprintf(stderr, "Could not allocate log buffer\n");
1723 exit(1);
1724 }
1725 size = fread(logbuffer, 1, size, f);
1726 fclose(f);
1727 logbuffer[size] = '\0';
1728 codec->stats_in = logbuffer;
1729 }
1730 }
1731 }
1732 if(codec->codec_type == CODEC_TYPE_VIDEO){
1733 int size= codec->width * codec->height;
1734 bit_buffer_size= FFMAX(bit_buffer_size, 4*size);
1735 }
1736 }
1737
1738 if (!bit_buffer)
1739 bit_buffer = av_malloc(bit_buffer_size);
1740 if (!bit_buffer)
1741 goto fail;
1742
1743 /* dump the file output parameters - cannot be done before in case
1744 of stream copy */
1745 for(i=0;i<nb_output_files;i++) {
1746 dump_format(output_files[i], i, output_files[i]->filename, 1);
1747 }
1748
1749 /* dump the stream mapping */
1750 if (verbose >= 0) {
1751 fprintf(stderr, "Stream mapping:\n");
1752 for(i=0;i<nb_ostreams;i++) {
1753 ost = ost_table[i];
1754 fprintf(stderr, " Stream #%d.%d -> #%d.%d",
1755 ist_table[ost->source_index]->file_index,
1756 ist_table[ost->source_index]->index,
1757 ost->file_index,
1758 ost->index);
1759 if (ost->sync_ist != ist_table[ost->source_index])
1760 fprintf(stderr, " [sync #%d.%d]",
1761 ost->sync_ist->file_index,
1762 ost->sync_ist->index);
1763 fprintf(stderr, "\n");
1764 }
1765 }
1766
1767 /* open each encoder */
1768 for(i=0;i<nb_ostreams;i++) {
1769 ost = ost_table[i];
1770 if (ost->encoding_needed) {
1771 AVCodec *codec;
1772 codec = avcodec_find_encoder(ost->st->codec.codec_id);
1773 if (!codec) {
1774 fprintf(stderr, "Unsupported codec for output stream #%d.%d\n",
1775 ost->file_index, ost->index);
1776 exit(1);
1777 }
1778 if (avcodec_open(&ost->st->codec, codec) < 0) {
1779 fprintf(stderr, "Error while opening codec for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height\n",
1780 ost->file_index, ost->index);
1781 exit(1);
1782 }
1783 extra_size += ost->st->codec.extradata_size;
1784 }
1785 }
1786
1787 /* open each decoder */
1788 for(i=0;i<nb_istreams;i++) {
1789 ist = ist_table[i];
1790 if (ist->decoding_needed) {
1791 AVCodec *codec;
1792 codec = avcodec_find_decoder(ist->st->codec.codec_id);
1793 if (!codec) {
1794 fprintf(stderr, "Unsupported codec (id=%d) for input stream #%d.%d\n",
1795 ist->st->codec.codec_id, ist->file_index, ist->index);
1796 exit(1);
1797 }
1798 if (avcodec_open(&ist->st->codec, codec) < 0) {
1799 fprintf(stderr, "Error while opening codec for input stream #%d.%d\n",
1800 ist->file_index, ist->index);
1801 exit(1);
1802 }
1803 //if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO)
1804 // ist->st->codec.flags |= CODEC_FLAG_REPEAT_FIELD;
1805 }
1806 }
1807
1808 /* init pts */
1809 for(i=0;i<nb_istreams;i++) {
1810 ist = ist_table[i];
1811 is = input_files[ist->file_index];
1812 ist->pts = 0;
1813 ist->next_pts = av_rescale_q(ist->st->start_time, ist->st->time_base, AV_TIME_BASE_Q);
1814 if(ist->st->start_time == AV_NOPTS_VALUE)
1815 ist->next_pts=0;
1816 if(input_files_ts_offset[ist->file_index])
1817 ist->next_pts= AV_NOPTS_VALUE;
1818 ist->is_start = 1;
1819 }
1820
1821 /* compute buffer size max (should use a complete heuristic) */
1822 for(i=0;i<nb_input_files;i++) {
1823 file_table[i].buffer_size_max = 2048;
1824 }
1825
1826 /* set meta data information from input file if required */
1827 for (i=0;i<nb_meta_data_maps;i++) {
1828 AVFormatContext *out_file;
1829 AVFormatContext *in_file;
1830
1831 int out_file_index = meta_data_maps[i].out_file;
1832 int in_file_index = meta_data_maps[i].in_file;
1833 if ( out_file_index < 0 || out_file_index >= nb_output_files ) {
1834 fprintf(stderr, "Invalid output file index %d map_meta_data(%d,%d)\n", out_file_index, out_file_index, in_file_index);
1835 ret = -EINVAL;
1836 goto fail;
1837 }
1838 if ( in_file_index < 0 || in_file_index >= nb_input_files ) {
1839 fprintf(stderr, "Invalid input file index %d map_meta_data(%d,%d)\n", in_file_index, out_file_index, in_file_index);
1840 ret = -EINVAL;
1841 goto fail;
1842 }
1843
1844 out_file = output_files[out_file_index];
1845 in_file = input_files[in_file_index];
1846
1847 strcpy(out_file->title, in_file->title);
1848 strcpy(out_file->author, in_file->author);
1849 strcpy(out_file->copyright, in_file->copyright);
1850 strcpy(out_file->comment, in_file->comment);
1851 strcpy(out_file->album, in_file->album);
1852 out_file->year = in_file->year;
1853 out_file->track = in_file->track;
1854 strcpy(out_file->genre, in_file->genre);
1855 }
1856
1857 /* open files and write file headers */
1858 for(i=0;i<nb_output_files;i++) {
1859 os = output_files[i];
1860 if (av_write_header(os) < 0) {
1861 fprintf(stderr, "Could not write header for output file #%d (incorrect codec parameters ?)\n", i);
1862 ret = -EINVAL;
1863 goto fail;
1864 }
1865 }
1866
1867 #ifndef CONFIG_WIN32
1868 if ( !using_stdin && verbose >= 0) {
1869 fprintf(stderr, "Press [q] to stop encoding\n");
1870 url_set_interrupt_cb(decode_interrupt_cb);
1871 }
1872 #endif
1873 term_init();
1874
1875 stream_no_data = 0;
1876 key = -1;
1877
1878 for(; received_sigterm == 0;) {
1879 int file_index, ist_index;
1880 AVPacket pkt;
1881 double ipts_min;
1882 double opts_min;
1883
1884 redo:
1885 ipts_min= 1e100;
1886 opts_min= 1e100;
1887 /* if 'q' pressed, exits */
1888 if (!using_stdin) {
1889 if (q_pressed)
1890 break;
1891 /* read_key() returns 0 on EOF */
1892 key = read_key();
1893 if (key == 'q')
1894 break;
1895 }
1896
1897 /* select the stream that we must read now by looking at the
1898 smallest output pts */
1899 file_index = -1;
1900 for(i=0;i<nb_ostreams;i++) {
1901 double ipts, opts;
1902 ost = ost_table[i];
1903 os = output_files[ost->file_index];
1904 ist = ist_table[ost->source_index];
1905 if(ost->st->codec.codec_type == CODEC_TYPE_VIDEO)
1906 opts = ost->sync_opts * av_q2d(ost->st->codec.time_base);
1907 else
1908 opts = ost->st->pts.val * av_q2d(ost->st->time_base);
1909 ipts = (double)ist->pts;
1910 if (!file_table[ist->file_index].eof_reached){
1911 if(ipts < ipts_min) {
1912 ipts_min = ipts;
1913 if(input_sync ) file_index = ist->file_index;
1914 }
1915 if(opts < opts_min) {
1916 opts_min = opts;
1917 if(!input_sync) file_index = ist->file_index;
1918 }
1919 }
1920 if(ost->frame_number >= max_frames[ost->st->codec.codec_type]){
1921 file_index= -1;
1922 break;
1923 }
1924 }
1925 /* if none, if is finished */
1926 if (file_index < 0) {
1927 break;
1928 }
1929
1930 /* finish if recording time exhausted */
1931 if (recording_time > 0 && opts_min >= (recording_time / 1000000.0))
1932 break;
1933
1934 /* finish if limit size exhausted */
1935 if (limit_filesize != 0 && (limit_filesize * 1024) < url_ftell(&output_files[0]->pb))
1936 break;
1937
1938 /* read a frame from it and output it in the fifo */
1939 is = input_files[file_index];
1940 if (av_read_frame(is, &pkt) < 0) {
1941 file_table[file_index].eof_reached = 1;
1942 if (opt_shortest) break; else continue; //
1943 }
1944
1945 if (!pkt.size) {
1946 stream_no_data = is;
1947 } else {
1948 stream_no_data = 0;
1949 }
1950 if (do_pkt_dump) {
1951 av_pkt_dump(stdout, &pkt, do_hex_dump);
1952 }
1953 /* the following test is needed in case new streams appear
1954 dynamically in stream : we ignore them */
1955 if (pkt.stream_index >= file_table[file_index].nb_streams)
1956 goto discard_packet;
1957 ist_index = file_table[file_index].ist_index + pkt.stream_index;
1958 ist = ist_table[ist_index];
1959 if (ist->discard)
1960 goto discard_packet;
1961
1962 // fprintf(stderr, "next:%lld dts:%lld off:%lld %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec.codec_type);
1963 if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE) {
1964 int64_t delta= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q) - ist->next_pts;
1965 if(ABS(delta) > 10LL*AV_TIME_BASE && !copy_ts){
1966 input_files_ts_offset[ist->file_index]-= delta;
1967 if (verbose > 2)
1968 fprintf(stderr, "timestamp discontinuity %lld, new offset= %lld\n", delta, input_files_ts_offset[ist->file_index]);
1969 for(i=0; i<file_table[file_index].nb_streams; i++){
1970 int index= file_table[file_index].ist_index + i;
1971 ist_table[index]->next_pts += delta;
1972 ist_table[index]->is_start=1;
1973 }
1974 }
1975 }
1976
1977 //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
1978 if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
1979
1980 if (verbose >= 0)
1981 fprintf(stderr, "Error while decoding stream #%d.%d\n",
1982 ist->file_index, ist->index);
1983
1984 av_free_packet(&pkt);
1985 goto redo;
1986 }
1987
1988 discard_packet:
1989 av_free_packet(&pkt);
1990
1991 /* dump report by using the output first video and audio streams */
1992 print_report(output_files, ost_table, nb_ostreams, 0);
1993 }
1994
1995 /* at the end of stream, we must flush the decoder buffers */
1996 for(i=0;i<nb_istreams;i++) {
1997 ist = ist_table[i];
1998 if (ist->decoding_needed) {
1999 output_packet(ist, i, ost_table, nb_ostreams, NULL);
2000 }
2001 }
2002
2003 term_exit();
2004
2005 /* write the trailer if needed and close file */
2006 for(i=0;i<nb_output_files;i++) {
2007 os = output_files[i];
2008 av_write_trailer(os);
2009 }
2010
2011 /* dump report by using the first video and audio streams */
2012 print_report(output_files, ost_table, nb_ostreams, 1);
2013
2014 /* close each encoder */
2015 for(i=0;i<nb_ostreams;i++) {
2016 ost = ost_table[i];
2017 if (ost->encoding_needed) {
2018 av_freep(&ost->st->codec.stats_in);
2019 avcodec_close(&ost->st->codec);
2020 }
2021 }
2022
2023 /* close each decoder */
2024 for(i=0;i<nb_istreams;i++) {
2025 ist = ist_table[i];
2026 if (ist->decoding_needed) {
2027 avcodec_close(&ist->st->codec);
2028 }
2029 }
2030
2031 /* finished ! */
2032
2033 ret = 0;
2034 fail1:
2035 av_freep(&bit_buffer);
2036 av_free(file_table);
2037
2038 if (ist_table) {
2039 for(i=0;i<nb_istreams;i++) {
2040 ist = ist_table[i];
2041 av_free(ist);
2042 }
2043 av_free(ist_table);
2044 }
2045 if (ost_table) {
2046 for(i=0;i<nb_ostreams;i++) {
2047 ost = ost_table[i];
2048 if (ost) {
2049 if (ost->logfile) {
2050 fclose(ost->logfile);
2051 ost->logfile = NULL;
2052 }
2053 fifo_free(&ost->fifo); /* works even if fifo is not
2054 initialized but set to zero */
2055 av_free(ost->pict_tmp.data[0]);
2056 if (ost->video_resample)
2057 img_resample_close(ost->img_resample_ctx);
2058 if (ost->audio_resample)
2059 audio_resample_close(ost->resample);
2060 av_free(ost);
2061 }
2062 }
2063 av_free(ost_table);
2064 }
2065 return ret;
2066 fail:
2067 ret = -ENOMEM;
2068 goto fail1;
2069 }
2070
2071 #if 0
2072 int file_read(const char *filename)
2073 {
2074 URLContext *h;
2075 unsigned char buffer[1024];
2076 int len, i;
2077
2078 if (url_open(&h, filename, O_RDONLY) < 0) {
2079 printf("could not open '%s'\n", filename);
2080 return -1;
2081 }
2082 for(;;) {
2083 len = url_read(h, buffer, sizeof(buffer));
2084 if (len <= 0)
2085 break;
2086 for(i=0;i<len;i++) putchar(buffer[i]);
2087 }
2088 url_close(h);
2089 return 0;
2090 }
2091 #endif
2092
2093 static void opt_image_format(const char *arg)
2094 {
2095 AVImageFormat *f;
2096
2097 for(f = first_image_format; f != NULL; f = f->next) {
2098 if (!strcmp(arg, f->name))
2099 break;
2100 }
2101 if (!f) {
2102 fprintf(stderr, "Unknown image format: '%s'\n", arg);
2103 exit(1);
2104 }
2105 image_format = f;
2106 }
2107
2108 static void opt_format(const char *arg)
2109 {
2110 /* compatibility stuff for pgmyuv */
2111 if (!strcmp(arg, "pgmyuv")) {
2112 pgmyuv_compatibility_hack=1;
2113 // opt_image_format(arg);
2114 arg = "image2";
2115 }
2116
2117 file_iformat = av_find_input_format(arg);
2118 file_oformat = guess_format(arg, NULL, NULL);
2119 if (!file_iformat && !file_oformat) {
2120 fprintf(stderr, "Unknown input or output format: %s\n", arg);
2121 exit(1);
2122 }
2123 }
2124
2125 static void opt_video_bitrate(const char *arg)
2126 {
2127 video_bit_rate = atoi(arg) * 1000;
2128 }
2129
2130 static void opt_video_bitrate_tolerance(const char *arg)
2131 {
2132 video_bit_rate_tolerance = atoi(arg) * 1000;
2133 }
2134
2135 static void opt_video_bitrate_max(const char *arg)
2136 {
2137 video_rc_max_rate = atoi(arg) * 1000;
2138 }
2139
2140 static void opt_video_bitrate_min(const char *arg)
2141 {
2142 video_rc_min_rate = atoi(arg) * 1000;
2143 }
2144
2145 static void opt_video_buffer_size(const char *arg)
2146 {
2147 video_rc_buffer_size = atoi(arg) * 8*1024;
2148 }
2149
2150 static void opt_video_rc_eq(char *arg)
2151 {
2152 video_rc_eq = arg;
2153 }
2154
2155 static void opt_video_rc_override_string(char *arg)
2156 {
2157 video_rc_override_string = arg;
2158 }
2159
2160
2161 static void opt_workaround_bugs(const char *arg)
2162 {
2163 workaround_bugs = atoi(arg);
2164 }
2165
2166 static void opt_dct_algo(const char *arg)
2167 {
2168 dct_algo = atoi(arg);
2169 }
2170
2171 static void opt_idct_algo(const char *arg)
2172 {
2173 idct_algo = atoi(arg);
2174 }
2175
2176 static void opt_me_threshold(const char *arg)
2177 {
2178 me_threshold = atoi(arg);
2179 }
2180
2181 static void opt_mb_threshold(const char *arg)
2182 {
2183 mb_threshold = atoi(arg);
2184 }
2185
2186 static void opt_error_resilience(const char *arg)
2187 {
2188 error_resilience = atoi(arg);
2189 }
2190
2191 static void opt_error_concealment(const char *arg)
2192 {
2193 error_concealment = atoi(arg);
2194 }
2195
2196 static void opt_debug(const char *arg)
2197 {
2198 debug = atoi(arg);
2199 }
2200
2201 static void opt_vismv(const char *arg)
2202 {
2203 debug_mv = atoi(arg);
2204 }
2205
2206 static void opt_verbose(const char *arg)
2207 {
2208 verbose = atoi(arg);
2209 av_log_set_level(atoi(arg));
2210 }
2211
2212 static void opt_frame_rate(const char *arg)
2213 {
2214 if (parse_frame_rate(&frame_rate, &frame_rate_base, arg) < 0) {
2215 fprintf(stderr, "Incorrect frame rate\n");
2216 exit(1);
2217 }
2218 }
2219
2220 static void opt_frame_crop_top(const char *arg)
2221 {
2222 frame_topBand = atoi(arg);
2223 if (frame_topBand < 0) {
2224 fprintf(stderr, "Incorrect top crop size\n");
2225 exit(1);
2226 }
2227 if ((frame_topBand % 2) != 0) {
2228 fprintf(stderr, "Top crop size must be a multiple of 2\n");
2229 exit(1);
2230 }
2231 if ((frame_topBand) >= frame_height){
2232 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2233 exit(1);
2234 }
2235 frame_height -= frame_topBand;
2236 }
2237
2238 static void opt_frame_crop_bottom(const char *arg)
2239 {
2240 frame_bottomBand = atoi(arg);
2241 if (frame_bottomBand < 0) {
2242 fprintf(stderr, "Incorrect bottom crop size\n");
2243 exit(1);
2244 }
2245 if ((frame_bottomBand % 2) != 0) {
2246 fprintf(stderr, "Bottom crop size must be a multiple of 2\n");
2247 exit(1);
2248 }
2249 if ((frame_bottomBand) >= frame_height){
2250 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2251 exit(1);
2252 }
2253 frame_height -= frame_bottomBand;
2254 }
2255
2256 static void opt_frame_crop_left(const char *arg)
2257 {
2258 frame_leftBand = atoi(arg);
2259 if (frame_leftBand < 0) {
2260 fprintf(stderr, "Incorrect left crop size\n");
2261 exit(1);
2262 }
2263 if ((frame_leftBand % 2) != 0) {
2264 fprintf(stderr, "Left crop size must be a multiple of 2\n");
2265 exit(1);
2266 }
2267 if ((frame_leftBand) >= frame_width){
2268 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2269 exit(1);
2270 }
2271 frame_width -= frame_leftBand;
2272 }
2273
2274 static void opt_frame_crop_right(const char *arg)
2275 {
2276 frame_rightBand = atoi(arg);
2277 if (frame_rightBand < 0) {
2278 fprintf(stderr, "Incorrect right crop size\n");
2279 exit(1);
2280 }
2281 if ((frame_rightBand % 2) != 0) {
2282 fprintf(stderr, "Right crop size must be a multiple of 2\n");
2283 exit(1);
2284 }
2285 if ((frame_rightBand) >= frame_width){
2286 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2287 exit(1);
2288 }
2289 frame_width -= frame_rightBand;
2290 }
2291
2292 static void opt_frame_size(const char *arg)
2293 {
2294 if (parse_image_size(&frame_width, &frame_height, arg) < 0) {
2295 fprintf(stderr, "Incorrect frame size\n");
2296 exit(1);
2297 }
2298 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2299 fprintf(stderr, "Frame size must be a multiple of 2\n");
2300 exit(1);
2301 }
2302 }
2303
2304
2305 #define SCALEBITS 10
2306 #define ONE_HALF (1 << (SCALEBITS - 1))
2307 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
2308
2309 #define RGB_TO_Y(r, g, b) \
2310 ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
2311 FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
2312
2313 #define RGB_TO_U(r1, g1, b1, shift)\
2314 (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
2315 FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2316
2317 #define RGB_TO_V(r1, g1, b1, shift)\
2318 (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
2319 FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
2320
2321 static void opt_pad_color(const char *arg) {
2322 /* Input is expected to be six hex digits similar to
2323 how colors are expressed in html tags (but without the #) */
2324 int rgb = strtol(arg, NULL, 16);
2325 int r,g,b;
2326
2327 r = (rgb >> 16);
2328 g = ((rgb >> 8) & 255);
2329 b = (rgb & 255);
2330
2331 padcolor[0] = RGB_TO_Y(r,g,b);
2332 padcolor[1] = RGB_TO_U(r,g,b,0);
2333 padcolor[2] = RGB_TO_V(r,g,b,0);
2334 }
2335
2336 static void opt_frame_pad_top(const char *arg)
2337 {
2338 frame_padtop = atoi(arg);
2339 if (frame_padtop < 0) {
2340 fprintf(stderr, "Incorrect top pad size\n");
2341 exit(1);
2342 }
2343 if ((frame_padtop % 2) != 0) {
2344 fprintf(stderr, "Top pad size must be a multiple of 2\n");
2345 exit(1);
2346 }
2347 }
2348
2349 static void opt_frame_pad_bottom(const char *arg)
2350 {
2351 frame_padbottom = atoi(arg);
2352 if (frame_padbottom < 0) {
2353 fprintf(stderr, "Incorrect bottom pad size\n");
2354 exit(1);
2355 }
2356 if ((frame_padbottom % 2) != 0) {
2357 fprintf(stderr, "Bottom pad size must be a multiple of 2\n");
2358 exit(1);
2359 }
2360 }
2361
2362
2363 static void opt_frame_pad_left(const char *arg)
2364 {
2365 frame_padleft = atoi(arg);
2366 if (frame_padleft < 0) {
2367 fprintf(stderr, "Incorrect left pad size\n");
2368 exit(1);
2369 }
2370 if ((frame_padleft % 2) != 0) {
2371 fprintf(stderr, "Left pad size must be a multiple of 2\n");
2372 exit(1);
2373 }
2374 }
2375
2376
2377 static void opt_frame_pad_right(const char *arg)
2378 {
2379 frame_padright = atoi(arg);
2380 if (frame_padright < 0) {
2381 fprintf(stderr, "Incorrect right pad size\n");
2382 exit(1);
2383 }
2384 if ((frame_padright % 2) != 0) {
2385 fprintf(stderr, "Right pad size must be a multiple of 2\n");
2386 exit(1);
2387 }
2388 }
2389
2390
2391 static void opt_frame_pix_fmt(const char *arg)
2392 {
2393 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2394 }
2395
2396 static void opt_frame_aspect_ratio(const char *arg)
2397 {
2398 int x = 0, y = 0;
2399 double ar = 0;
2400 const char *p;
2401
2402 p = strchr(arg, ':');
2403 if (p) {
2404 x = strtol(arg, (char **)&arg, 10);
2405 if (arg == p)
2406 y = strtol(arg+1, (char **)&arg, 10);
2407 if (x > 0 && y > 0)
2408 ar = (double)x / (double)y;
2409 } else
2410 ar = strtod(arg, (char **)&arg);
2411
2412 if (!ar) {
2413 fprintf(stderr, "Incorrect aspect ratio specification.\n");
2414 exit(1);
2415 }
2416 frame_aspect_ratio = ar;
2417 }
2418
2419 static void opt_gop_size(const char *arg)
2420 {
2421 gop_size = atoi(arg);
2422 }
2423
2424 static void opt_b_frames(const char *arg)
2425 {
2426 b_frames = atoi(arg);
2427 if (b_frames > FF_MAX_B_FRAMES) {
2428 fprintf(stderr, "\nCannot have more than %d B frames, increase FF_MAX_B_FRAMES.\n", FF_MAX_B_FRAMES);
2429 exit(1);
2430 } else if (b_frames < 1) {
2431 fprintf(stderr, "\nNumber of B frames must be higher than 0\n");
2432 exit(1);
2433 }
2434 }
2435
2436 static void opt_mb_decision(const char *arg)
2437 {
2438 mb_decision = atoi(arg);
2439 }
2440
2441 static void opt_mb_cmp(const char *arg)
2442 {
2443 mb_cmp = atoi(arg);
2444 }
2445
2446 static void opt_ildct_cmp(const char *arg)
2447 {
2448 ildct_cmp = atoi(arg);
2449 }
2450
2451 static void opt_sub_cmp(const char *arg)
2452 {
2453 sub_cmp = atoi(arg);
2454 }
2455
2456 static void opt_cmp(const char *arg)
2457 {
2458 cmp = atoi(arg);
2459 }
2460
2461 static void opt_pre_cmp(const char *arg)
2462 {
2463 pre_cmp = atoi(arg);
2464 }
2465
2466 static void opt_pre_me(const char *arg)
2467 {
2468 pre_me = atoi(arg);
2469 }
2470
2471 static void opt_lumi_mask(const char *arg)
2472 {
2473 lumi_mask = atof(arg);
2474 }
2475
2476 static void opt_dark_mask(const char *arg)
2477 {
2478 dark_mask = atof(arg);
2479 }
2480
2481 static void opt_scplx_mask(const char *arg)
2482 {
2483 scplx_mask = atof(arg);
2484 }
2485
2486 static void opt_tcplx_mask(const char *arg)
2487 {
2488 tcplx_mask = atof(arg);
2489 }
2490
2491 static void opt_p_mask(const char *arg)
2492 {
2493 p_mask = atof(arg);
2494 }
2495
2496 static void opt_qscale(const char *arg)
2497 {
2498 video_qscale = atof(arg);
2499 if (video_qscale < 0.01 ||
2500 video_qscale > 255) {
2501 fprintf(stderr, "qscale must be >= 0.01 and <= 255\n");
2502 exit(1);
2503 }
2504 }
2505
2506 static void opt_qsquish(const char *arg)
2507 {
2508 video_qsquish = atof(arg);
2509 if (video_qsquish < 0.0 ||
2510 video_qsquish > 99.0) {
2511 fprintf(stderr, "qsquish must be >= 0.0 and <= 99.0\n");
2512 exit(1);
2513 }
2514 }
2515
2516 static void opt_lelim(const char *arg)
2517 {
2518 video_lelim = atoi(arg);
2519 if (video_lelim < -99 ||
2520 video_lelim > 99) {
2521 fprintf(stderr, "lelim must be >= -99 and <= 99\n");
2522 exit(1);
2523 }
2524 }
2525
2526 static void opt_celim(const char *arg)
2527 {
2528 video_celim = atoi(arg);
2529 if (video_celim < -99 ||
2530 video_celim > 99) {
2531 fprintf(stderr, "celim must be >= -99 and <= 99\n");
2532 exit(1);
2533 }
2534 }
2535
2536 static void opt_lmax(const char *arg)
2537 {
2538 video_lmax = atof(arg)*FF_QP2LAMBDA;
2539 }
2540
2541 static void opt_lmin(const char *arg)
2542 {
2543 video_lmin = atof(arg)*FF_QP2LAMBDA;
2544 }
2545
2546 static void opt_qmin(const char *arg)
2547 {
2548 video_qmin = atoi(arg);
2549 if (video_qmin < 1 ||
2550 video_qmin > 31) {
2551 fprintf(stderr, "qmin must be >= 1 and <= 31\n");
2552 exit(1);
2553 }
2554 }
2555
2556 static void opt_qmax(const char *arg)
2557 {
2558 video_qmax = atoi(arg);
2559 if (video_qmax < 1 ||
2560 video_qmax > 31) {
2561 fprintf(stderr, "qmax must be >= 1 and <= 31\n");
2562 exit(1);
2563 }
2564 }
2565
2566 static void opt_mb_lmin(const char *arg)
2567 {
2568 video_mb_lmin = atof(arg)*FF_QP2LAMBDA;
2569 if (video_mb_lmin < 1 ||
2570 video_mb_lmin > FF_LAMBDA_MAX) {
2571 fprintf(stderr, "mblmin must be >= 1 and <= %d\n", FF_LAMBDA_MAX / FF_QP2LAMBDA);
2572 exit(1);
2573 }
2574 }
2575
2576 static void opt_mb_lmax(const char *arg)
2577 {
2578 video_mb_lmax = atof(arg)*FF_QP2LAMBDA;
2579 if (video_mb_lmax < 1 ||
2580 video_mb_lmax > FF_LAMBDA_MAX) {
2581 fprintf(stderr, "mblmax must be >= 1 and <= %d\n", FF_LAMBDA_MAX / FF_QP2LAMBDA);
2582 exit(1);
2583 }
2584 }
2585
2586 static void opt_qdiff(const char *arg)
2587 {
2588 video_qdiff = atoi(arg);
2589 if (video_qdiff < 0 ||
2590 video_qdiff > 31) {
2591 fprintf(stderr, "qdiff must be >= 1 and <= 31\n");
2592 exit(1);
2593 }
2594 }
2595
2596 static void opt_qblur(const char *arg)
2597 {
2598 video_qblur = atof(arg);
2599 }
2600
2601 static void opt_qcomp(const char *arg)
2602 {
2603 video_qcomp = atof(arg);
2604 }
2605
2606 static void opt_rc_initial_cplx(const char *arg)
2607 {
2608 video_rc_initial_cplx = atof(arg);
2609 }
2610 static void opt_b_qfactor(const char *arg)
2611 {
2612 video_b_qfactor = atof(arg);
2613 }
2614 static void opt_i_qfactor(const char *arg)
2615 {
2616 video_i_qfactor = atof(arg);
2617 }
2618 static void opt_b_qoffset(const char *arg)
2619 {
2620 video_b_qoffset = atof(arg);
2621 }
2622 static void opt_i_qoffset(const char *arg)
2623 {
2624 video_i_qoffset = atof(arg);
2625 }
2626
2627 static void opt_ibias(const char *arg)
2628 {
2629 video_intra_quant_bias = atoi(arg);
2630 }
2631 static void opt_pbias(const char *arg)
2632 {
2633 video_inter_quant_bias = atoi(arg);
2634 }
2635
2636 static void opt_packet_size(const char *arg)
2637 {
2638 packet_size= atoi(arg);
2639 }
2640
2641 static void opt_error_rate(const char *arg)
2642 {
2643 error_rate= atoi(arg);
2644 }
2645
2646 static void opt_strict(const char *arg)
2647 {
2648 strict= atoi(arg);
2649 }
2650
2651 static void opt_top_field_first(const char *arg)
2652 {
2653 top_field_first= atoi(arg);
2654 }
2655
2656 static void opt_noise_reduction(const char *arg)
2657 {
2658 noise_reduction= atoi(arg);
2659 }
2660
2661 static void opt_qns(const char *arg)
2662 {
2663 qns= atoi(arg);
2664 }
2665
2666 static void opt_sc_threshold(const char *arg)
2667 {
2668 sc_threshold= atoi(arg);
2669 }
2670
2671 static void opt_me_range(const char *arg)
2672 {
2673 me_range = atoi(arg);
2674 }
2675
2676 static void opt_thread_count(const char *arg)
2677 {
2678 thread_count= atoi(arg);
2679 #if !defined(HAVE_THREADS)
2680 if (verbose >= 0)
2681 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2682 #endif
2683 }
2684
2685 static void opt_audio_bitrate(const char *arg)
2686 {
2687 audio_bit_rate = atoi(arg) * 1000;
2688 }
2689
2690 static void opt_audio_rate(const char *arg)
2691 {
2692 audio_sample_rate = atoi(arg);
2693 }
2694
2695 static void opt_audio_channels(const char *arg)
2696 {
2697 audio_channels = atoi(arg);
2698 }
2699
2700 static void opt_video_device(const char *arg)
2701 {
2702 video_device = av_strdup(arg);
2703 }
2704
2705 static void opt_grab_device(const char *arg)
2706 {
2707 grab_device = av_strdup(arg);
2708 }
2709
2710 static void opt_video_channel(const char *arg)
2711 {
2712 video_channel = strtol(arg, NULL, 0);
2713 }
2714
2715 static void opt_video_standard(const char *arg)
2716 {
2717 video_standard = av_strdup(arg);
2718 }
2719
2720 static void opt_audio_device(const char *arg)
2721 {
2722 audio_device = av_strdup(arg);
2723 }
2724
2725 static void opt_audio_codec(const char *arg)
2726 {
2727 AVCodec *p;
2728
2729 if (!strcmp(arg, "copy")) {
2730 audio_stream_copy = 1;
2731 } else {
2732 p = first_avcodec;
2733 while (p) {
2734 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_AUDIO)
2735 break;
2736 p = p->next;
2737 }
2738 if (p == NULL) {
2739 fprintf(stderr, "Unknown audio codec '%s'\n", arg);
2740 exit(1);
2741 } else {
2742 audio_codec_id = p->id;
2743 }
2744 }
2745 }
2746
2747 static void opt_audio_tag(const char *arg)
2748 {
2749 char *tail;
2750 audio_codec_tag= strtol(arg, &tail, 0);
2751
2752 if(!tail || *tail)
2753 audio_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2754 }
2755
2756 static void opt_video_tag(const char *arg)
2757 {
2758 char *tail;
2759 video_codec_tag= strtol(arg, &tail, 0);
2760
2761 if(!tail || *tail)
2762 video_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2763 }
2764
2765 static void add_frame_hooker(const char *arg)
2766 {
2767 int argc = 0;
2768 char *argv[64];
2769 int i;
2770 char *args = av_strdup(arg);
2771
2772 using_vhook = 1;
2773
2774 argv[0] = strtok(args, " ");
2775 while (argc < 62 && (argv[++argc] = strtok(NULL, " "))) {
2776 }
2777
2778 i = frame_hook_add(argc, argv);
2779
2780 if (i != 0) {
2781 fprintf(stderr, "Failed to add video hook function: %s\n", arg);
2782 exit(1);
2783 }
2784 }
2785
2786 const char *motion_str[] = {
2787 "zero",
2788 "full",
2789 "log",
2790 "phods",
2791 "epzs",
2792 "x1",
2793 NULL,
2794 };
2795
2796 static void opt_motion_estimation(const char *arg)
2797 {
2798 const char **p;
2799 p = motion_str;
2800 for(;;) {
2801 if (!*p) {
2802 fprintf(stderr, "Unknown motion estimation method '%s'\n", arg);
2803 exit(1);
2804 }
2805 if (!strcmp(*p, arg))
2806 break;
2807 p++;
2808 }
2809 me_method = (p - motion_str) + 1;
2810 }
2811
2812 static void opt_video_codec(const char *arg)
2813 {
2814 AVCodec *p;
2815
2816 if (!strcmp(arg, "copy")) {
2817 video_stream_copy = 1;
2818 } else {
2819 p = first_avcodec;
2820 while (p) {
2821 if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_VIDEO)
2822 break;
2823 p = p->next;
2824 }
2825 if (p == NULL) {
2826 fprintf(stderr, "Unknown video codec '%s'\n", arg);
2827 exit(1);
2828 } else {
2829 video_codec_id = p->id;
2830 }
2831 }
2832 }
2833
2834 static void opt_map(const char *arg)
2835 {
2836 AVStreamMap *m;
2837 const char *p;
2838
2839 p = arg;
2840 m = &stream_maps[nb_stream_maps++];
2841
2842 m->file_index = strtol(arg, (char **)&p, 0);
2843 if (*p)
2844 p++;
2845
2846 m->stream_index = strtol(p, (char **)&p, 0);
2847 if (*p) {
2848 p++;
2849 m->sync_file_index = strtol(p, (char **)&p, 0);
2850 if (*p)
2851 p++;
2852 m->sync_stream_index = strtol(p, (char **)&p, 0);
2853 } else {
2854 m->sync_file_index = m->file_index;
2855 m->sync_stream_index = m->stream_index;
2856 }
2857 }
2858
2859 static void opt_map_meta_data(const char *arg)
2860 {
2861 AVMetaDataMap *m;
2862 const char *p;
2863
2864 p = arg;
2865 m = &meta_data_maps[nb_meta_data_maps++];
2866
2867 m->out_file = strtol(arg, (char **)&p, 0);
2868 if (*p)
2869 p++;
2870
2871 m->in_file = strtol(p, (char **)&p, 0);
2872 }
2873
2874 static void opt_recording_time(const char *arg)
2875 {
2876 recording_time = parse_date(arg, 1);
2877 }
2878
2879 static void opt_start_time(const char *arg)
2880 {
2881 start_time = parse_date(arg, 1);
2882 }
2883
2884 static void opt_rec_timestamp(const char *arg)
2885 {
2886 rec_timestamp = parse_date(arg, 0) / 1000000;
2887 }
2888
2889 static void opt_input_ts_offset(const char *arg)
2890 {
2891 input_ts_offset = parse_date(arg, 1);
2892 }
2893
2894 static void opt_input_file(const char *filename)
2895 {
2896 AVFormatContext *ic;
2897 AVFormatParameters params, *ap = &params;
2898 int err, i, ret, rfps, rfps_base;
2899 int64_t timestamp;
2900
2901 if (!strcmp(filename, "-"))
2902 filename = "pipe:";
2903
2904 using_stdin |= !strncmp(filename, "pipe:", 5) ||
2905 !strcmp( filename, "/dev/stdin" );
2906
2907 /* get default parameters from command line */
2908 memset(ap, 0, sizeof(*ap));
2909 ap->sample_rate = audio_sample_rate;
2910 ap->channels = audio_channels;
2911 ap->time_base.den = frame_rate;
2912 ap->time_base.num = frame_rate_base;
2913 ap->width = frame_width + frame_padleft + frame_padright;
2914 ap->height = frame_height + frame_padtop + frame_padbottom;
2915 ap->image_format = image_format;
2916 ap->pix_fmt = frame_pix_fmt;
2917 ap->device = grab_device;
2918 ap->channel = video_channel;
2919 ap->standard = video_standard;
2920 ap->video_codec_id = video_codec_id;
2921 ap->audio_codec_id = audio_codec_id;
2922 if(pgmyuv_compatibility_hack)
2923 ap->video_codec_id= CODEC_ID_PGMYUV;
2924
2925 /* open the input file with generic libav function */
2926 err = av_open_input_file(&ic, filename, file_iformat, 0, ap);
2927 if (err < 0) {
2928 print_error(filename, err);
2929 exit(1);
2930 }
2931
2932 /* If not enough info to get the stream parameters, we decode the
2933 first frames to get it. (used in mpeg case for example) */
2934 ret = av_find_stream_info(ic);
2935 if (ret < 0 && verbose >= 0) {
2936 fprintf(stderr, "%s: could not find codec parameters\n", filename);
2937 exit(1);
2938 }
2939
2940 timestamp = start_time;
2941 /* add the stream start time */
2942 if (ic->start_time != AV_NOPTS_VALUE)
2943 timestamp += ic->start_time;
2944
2945 /* if seeking requested, we execute it */
2946 if (start_time != 0) {
2947 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
2948 if (ret < 0) {
2949 fprintf(stderr, "%s: could not seek to position %0.3f\n",
2950 filename, (double)timestamp / AV_TIME_BASE);
2951 }
2952 /* reset seek info */
2953 start_time = 0;
2954 }
2955
2956 /* update the current parameters so that they match the one of the input stream */
2957 for(i=0;i<ic->nb_streams;i++) {
2958 AVCodecContext *enc = &ic->streams[i]->codec;
2959 #if defined(HAVE_THREADS)
2960 if(thread_count>1)
2961 avcodec_thread_init(enc, thread_count);
2962 #endif
2963 enc->thread_count= thread_count;
2964 switch(enc->codec_type) {
2965 case CODEC_TYPE_AUDIO:
2966 //fprintf(stderr, "\nInput Audio channels: %d", enc->channels);
2967 audio_channels = enc->channels;
2968 audio_sample_rate = enc->sample_rate;
2969 if(audio_disable)
2970 ic->streams[i]->discard= AVDISCARD_ALL;
2971 break;
2972 case CODEC_TYPE_VIDEO:
2973 frame_height = enc->height;
2974 frame_width = enc->width;
2975 frame_aspect_ratio = av_q2d(enc->sample_aspect_ratio) * enc->width / enc->height;
2976 frame_pix_fmt = enc->pix_fmt;
2977 rfps = ic->streams[i]->r_frame_rate.num;
2978 rfps_base = ic->streams[i]->r_frame_rate.den;
2979 enc->workaround_bugs = workaround_bugs;
2980 enc->error_resilience = error_resilience;
2981 enc->error_concealment = error_concealment;
2982 enc->idct_algo = idct_algo;
2983 enc->debug = debug;
2984 enc->debug_mv = debug_mv;
2985 enc->lowres= lowres;
2986 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
2987 if(bitexact)
2988 enc->flags|= CODEC_FLAG_BITEXACT;
2989 if(me_threshold)
2990 enc->debug |= FF_DEBUG_MV;
2991 if(gray_only)
2992 enc->flags |= CODEC_FLAG_GRAY;
2993
2994 if (enc->time_base.den != rfps || enc->time_base.num != rfps_base) {
2995
2996 if (verbose >= 0)
2997 fprintf(stderr,"\nSeems that stream %d comes from film source: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
2998 i, (float)enc->time_base.den / enc->time_base.num, enc->time_base.den, enc->time_base.num,
2999
3000 (float)rfps / rfps_base, rfps, rfps_base);
3001 }
3002 /* update the current frame rate to match the stream frame rate */
3003 frame_rate = rfps;
3004 frame_rate_base = rfps_base;
3005
3006 enc->rate_emu = rate_emu;
3007 if(video_disable)
3008 ic->streams[i]->discard= AVDISCARD_ALL;
3009 else if(video_discard)
3010 ic->streams[i]->discard= video_discard;
3011 break;
3012 case CODEC_TYPE_DATA:
3013 break;
3014 default:
3015 av_abort();
3016 }
3017 }
3018
3019 input_files[nb_input_files] = ic;
3020 input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp);
3021 /* dump the file content */
3022 if (verbose >= 0)
3023 dump_format(ic, nb_input_files, filename, 0);
3024
3025 nb_input_files++;
3026 file_iformat = NULL;
3027 file_oformat = NULL;
3028 image_format = NULL;
3029
3030 grab_device = NULL;
3031 video_channel = 0;
3032
3033 rate_emu = 0;
3034 }
3035
3036 static void opt_grab(const char *arg)
3037 {
3038 file_iformat = av_find_input_format(arg);
3039 opt_input_file("");
3040 }
3041
3042 static void check_audio_video_inputs(int *has_video_ptr, int *has_audio_ptr)
3043 {
3044 int has_video, has_audio, i, j;
3045 AVFormatContext *ic;
3046
3047 has_video = 0;
3048 has_audio = 0;
3049 for(j=0;j<nb_input_files;j++) {
3050 ic = input_files[j];
3051 for(i=0;i<ic->nb_streams;i++) {
3052 AVCodecContext *enc = &ic->streams[i]->codec;
3053 switch(enc->codec_type) {
3054 case CODEC_TYPE_AUDIO:
3055 has_audio = 1;
3056 break;
3057 case CODEC_TYPE_VIDEO:
3058 has_video = 1;
3059 break;
3060 case CODEC_TYPE_DATA:
3061 break;
3062 default:
3063 av_abort();
3064 }
3065 }
3066 }
3067 *has_video_ptr = has_video;
3068 *has_audio_ptr = has_audio;
3069 }
3070
3071 static void opt_output_file(const char *filename)
3072 {
3073 AVStream *st;
3074 AVFormatContext *oc;
3075 int use_video, use_audio, nb_streams, input_has_video, input_has_audio;
3076 int codec_id;
3077 AVFormatParameters params, *ap = &params;
3078
3079 if (!strcmp(filename, "-"))
3080 filename = "pipe:";
3081
3082 oc = av_alloc_format_context();
3083
3084 if (!file_oformat) {
3085 file_oformat = guess_format(NULL, filename, NULL);
3086 if (!file_oformat) {
3087 fprintf(stderr, "Unable for find a suitable output format for '%s'\n",
3088 filename);
3089 exit(1);
3090 }
3091 }
3092
3093 oc->oformat = file_oformat;
3094
3095 if (!strcmp(file_oformat->name, "ffm") &&
3096 strstart(filename, "http:", NULL)) {
3097 /* special case for files sent to ffserver: we get the stream
3098 parameters from ffserver */
3099 if (read_ffserver_streams(oc, filename) < 0) {
3100 fprintf(stderr, "Could not read stream parameters from '%s'\n", filename);
3101 exit(1);
3102 }
3103 } else {
3104 use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy;
3105 use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy;
3106
3107 /* disable if no corresponding type found and at least one
3108 input file */
3109 if (nb_input_files > 0) {
3110 check_audio_video_inputs(&input_has_video, &input_has_audio);
3111 if (!input_has_video)
3112 use_video = 0;
3113 if (!input_has_audio)
3114 use_audio = 0;
3115 }
3116
3117 /* manual disable */
3118 if (audio_disable) {
3119 use_audio = 0;
3120 }
3121 if (video_disable) {
3122 use_video = 0;
3123 }
3124
3125 nb_streams = 0;
3126 if (use_video) {
3127 AVCodecContext *video_enc;
3128
3129 st = av_new_stream(oc, nb_streams++);
3130 if (!st) {
3131 fprintf(stderr, "Could not alloc stream\n");
3132 exit(1);
3133 }
3134 #if defined(HAVE_THREADS)
3135 if(thread_count>1)
3136 avcodec_thread_init(&st->codec, thread_count);
3137 #endif
3138
3139 video_enc = &st->codec;
3140
3141 if(video_codec_tag)
3142 video_enc->codec_tag= video_codec_tag;
3143
3144 if (file_oformat->flags & AVFMT_GLOBALHEADER)
3145 video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3146 if (video_stream_copy) {
3147 st->stream_copy = 1;
3148 video_enc->codec_type = CODEC_TYPE_VIDEO;
3149 } else {
3150 char *p;
3151 int i;
3152 AVCodec *codec;
3153
3154 codec_id = av_guess_codec(file_oformat, NULL, filename, NULL, CODEC_TYPE_VIDEO);
3155 if (video_codec_id != CODEC_ID_NONE)
3156 codec_id = video_codec_id;
3157
3158 video_enc->codec_id = codec_id;
3159 codec = avcodec_find_encoder(codec_id);
3160
3161 video_enc->bit_rate = video_bit_rate;
3162 video_enc->bit_rate_tolerance = video_bit_rate_tolerance;
3163 video_enc->time_base.den = frame_rate;
3164 video_enc->time_base.num = frame_rate_base;
3165 if(codec && codec->supported_framerates){
3166 const AVRational *p= codec->supported_framerates;
3167 AVRational req= (AVRational){frame_rate, frame_rate_base};
3168 const AVRational *best=NULL;
3169 AVRational best_error= (AVRational){INT_MAX, 1};
3170 for(; p->den!=0; p++){
3171 AVRational error= av_sub_q(req, *p);
3172 if(error.num <0) error.num *= -1;
3173 if(av_cmp_q(error, best_error) < 0){
3174 best_error= error;
3175 best= p;
3176 }
3177 }
3178 video_enc->time_base.den= best->num;
3179 video_enc->time_base.num= best->den;
3180 }
3181
3182 video_enc->width = frame_width + frame_padright + frame_padleft;
3183 video_enc->height = frame_height + frame_padtop + frame_padbottom;
3184 video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
3185 video_enc->pix_fmt = frame_pix_fmt;
3186
3187 if(codec && codec->pix_fmts){
3188 const enum PixelFormat *p= codec->pix_fmts;
3189 for(; *p!=-1; p++){
3190 if(*p == video_enc->pix_fmt)
3191 break;
3192 }
3193 if(*p == -1)
3194 video_enc->pix_fmt = codec->pix_fmts[0];
3195 }
3196
3197 if (!intra_only)
3198 video_enc->gop_size = gop_size;
3199 else
3200 video_enc->gop_size = 0;
3201 if (video_qscale || same_quality) {
3202 video_enc->flags |= CODEC_FLAG_QSCALE;
3203 video_enc->global_quality=
3204 st->quality = FF_QP2LAMBDA * video_qscale;
3205 }
3206
3207 if(intra_matrix)
3208 video_enc->intra_matrix = intra_matrix;
3209 if(inter_matrix)
3210 video_enc->inter_matrix = inter_matrix;
3211
3212 if(bitexact)
3213 video_enc->flags |= CODEC_FLAG_BITEXACT;
3214
3215 video_enc->mb_decision = mb_decision;
3216 video_enc->mb_cmp = mb_cmp;
3217 video_enc->ildct_cmp = ildct_cmp;
3218 video_enc->me_sub_cmp = sub_cmp;
3219 video_enc->me_cmp = cmp;
3220 video_enc->me_pre_cmp = pre_cmp;
3221 video_enc->pre_me = pre_me;
3222 video_enc->lumi_masking = lumi_mask;
3223 video_enc->dark_masking = dark_mask;
3224 video_enc->spatial_cplx_masking = scplx_mask;
3225 video_enc->temporal_cplx_masking = tcplx_mask;
3226 video_enc->p_masking = p_mask;
3227 video_enc->quantizer_noise_shaping= qns;
3228
3229 if (use_umv) {
3230 video_enc->flags |= CODEC_FLAG_H263P_UMV;
3231 }
3232 if (use_ss) {
3233 video_enc->flags |= CODEC_FLAG_H263P_SLICE_STRUCT;
3234 }
3235 if (use_aic) {
3236 video_enc->flags |= CODEC_FLAG_H263P_AIC;
3237 }
3238 if (use_aiv) {
3239 video_enc->flags |= CODEC_FLAG_H263P_AIV;
3240 }
3241 if (use_4mv) {
3242 video_enc->flags |= CODEC_FLAG_4MV;
3243 }
3244 if (use_obmc) {
3245 video_enc->flags |= CODEC_FLAG_OBMC;
3246 }
3247 if (use_loop) {
3248 video_enc->flags |= CODEC_FLAG_LOOP_FILTER;
3249 }
3250
3251 if(use_part) {
3252 video_enc->flags |= CODEC_FLAG_PART;
3253 }
3254 if (use_alt_scan) {
3255 video_enc->flags |= CODEC_FLAG_ALT_SCAN;
3256 }
3257 if (use_trell) {
3258 video_enc->flags |= CODEC_FLAG_TRELLIS_QUANT;
3259 }
3260 if (use_mv0) {
3261 video_enc->flags |= CODEC_FLAG_MV0;
3262 }
3263 if (do_normalize_aqp) {
3264 video_enc->flags |= CODEC_FLAG_NORMALIZE_AQP;
3265 }
3266 if (use_scan_offset) {
3267 video_enc->flags |= CODEC_FLAG_SVCD_SCAN_OFFSET;
3268 }
3269 if (closed_gop) {
3270 video_enc->flags |= CODEC_FLAG_CLOSED_GOP;
3271 }
3272 if (strict_gop) {
3273 video_enc->flags2 |= CODEC_FLAG2_STRICT_GOP;
3274 }
3275 if (use_qpel) {
3276 video_enc->flags |= CODEC_FLAG_QPEL;
3277 }
3278 if (use_qprd) {
3279 video_enc->flags |= CODEC_FLAG_QP_RD;
3280 }
3281 if (use_cbprd) {
3282 video_enc->flags |= CODEC_FLAG_CBP_RD;
3283 }
3284 if (b_frames) {
3285 video_enc->max_b_frames = b_frames;
3286 video_enc->b_frame_strategy = b_strategy;
3287 video_enc->b_quant_factor = 2.0;
3288 }
3289 if (do_interlace_dct) {
3290 video_enc->flags |= CODEC_FLAG_INTERLACED_DCT;
3291 }
3292 if (do_interlace_me) {
3293 video_enc->flags |= CODEC_FLAG_INTERLACED_ME;
3294 }
3295 if (no_output) {
3296 video_enc->flags2 |= CODEC_FLAG2_NO_OUTPUT;
3297