30a80505cdbdf96535fa59dd3485e1c90a255ea5
[libav.git] / ffmpeg.c
1 /*
2 * FFmpeg main
3 * Copyright (c) 2000-2003 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /* needed for usleep() */
23 #define _XOPEN_SOURCE 600
24
25 #include "config.h"
26 #include <ctype.h>
27 #include <string.h>
28 #include <math.h>
29 #include <stdlib.h>
30 #include <errno.h>
31 #include <signal.h>
32 #include <limits.h>
33 #include <unistd.h>
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/opt.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavcodec/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/pixdesc.h"
42 #include "libavutil/avstring.h"
43 #include "libavutil/libm.h"
44 #include "libavformat/os_support.h"
45
46 #if HAVE_SYS_RESOURCE_H
47 #include <sys/types.h>
48 #include <sys/time.h>
49 #include <sys/resource.h>
50 #elif HAVE_GETPROCESSTIMES
51 #include <windows.h>
52 #endif
53 #if HAVE_GETPROCESSMEMORYINFO
54 #include <windows.h>
55 #include <psapi.h>
56 #endif
57
58 #if HAVE_SYS_SELECT_H
59 #include <sys/select.h>
60 #endif
61
62 #if HAVE_TERMIOS_H
63 #include <fcntl.h>
64 #include <sys/ioctl.h>
65 #include <sys/time.h>
66 #include <termios.h>
67 #elif HAVE_CONIO_H
68 #include <conio.h>
69 #endif
70 #include <time.h>
71
72 #include "cmdutils.h"
73
74 #undef NDEBUG
75 #include <assert.h>
76
77 const char program_name[] = "FFmpeg";
78 const int program_birth_year = 2000;
79
80 /* select an input stream for an output stream */
81 typedef struct AVStreamMap {
82 int file_index;
83 int stream_index;
84 int sync_file_index;
85 int sync_stream_index;
86 } AVStreamMap;
87
88 /** select an input file for an output file */
89 typedef struct AVMetaDataMap {
90 int out_file;
91 int in_file;
92 } AVMetaDataMap;
93
94 static const OptionDef options[];
95
96 #define MAX_FILES 100
97
98 static const char *last_asked_format = NULL;
99 static AVFormatContext *input_files[MAX_FILES];
100 static int64_t input_files_ts_offset[MAX_FILES];
101 static double input_files_ts_scale[MAX_FILES][MAX_STREAMS];
102 static AVCodec *input_codecs[MAX_FILES*MAX_STREAMS];
103 static int nb_input_files = 0;
104 static int nb_icodecs;
105
106 static AVFormatContext *output_files[MAX_FILES];
107 static AVCodec *output_codecs[MAX_FILES*MAX_STREAMS];
108 static int nb_output_files = 0;
109 static int nb_ocodecs;
110
111 static AVStreamMap stream_maps[MAX_FILES*MAX_STREAMS];
112 static int nb_stream_maps;
113
114 static AVMetaDataMap meta_data_maps[MAX_FILES];
115 static int nb_meta_data_maps;
116
117 static int frame_width = 0;
118 static int frame_height = 0;
119 static float frame_aspect_ratio = 0;
120 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
121 static enum SampleFormat audio_sample_fmt = SAMPLE_FMT_NONE;
122 static int frame_padtop = 0;
123 static int frame_padbottom = 0;
124 static int frame_padleft = 0;
125 static int frame_padright = 0;
126 static int padcolor[3] = {16,128,128}; /* default to black */
127 static int frame_topBand = 0;
128 static int frame_bottomBand = 0;
129 static int frame_leftBand = 0;
130 static int frame_rightBand = 0;
131 static int max_frames[4] = {INT_MAX, INT_MAX, INT_MAX, INT_MAX};
132 static AVRational frame_rate;
133 static float video_qscale = 0;
134 static uint16_t *intra_matrix = NULL;
135 static uint16_t *inter_matrix = NULL;
136 static const char *video_rc_override_string=NULL;
137 static int video_disable = 0;
138 static int video_discard = 0;
139 static char *video_codec_name = NULL;
140 static int video_codec_tag = 0;
141 static char *video_language = NULL;
142 static int same_quality = 0;
143 static int do_deinterlace = 0;
144 static int top_field_first = -1;
145 static int me_threshold = 0;
146 static int intra_dc_precision = 8;
147 static int loop_input = 0;
148 static int loop_output = AVFMT_NOOUTPUTLOOP;
149 static int qp_hist = 0;
150
151 static int intra_only = 0;
152 static int audio_sample_rate = 44100;
153 static int64_t channel_layout = 0;
154 #define QSCALE_NONE -99999
155 static float audio_qscale = QSCALE_NONE;
156 static int audio_disable = 0;
157 static int audio_channels = 1;
158 static char *audio_codec_name = NULL;
159 static int audio_codec_tag = 0;
160 static char *audio_language = NULL;
161
162 static int subtitle_disable = 0;
163 static char *subtitle_codec_name = NULL;
164 static char *subtitle_language = NULL;
165 static int subtitle_codec_tag = 0;
166
167 static float mux_preload= 0.5;
168 static float mux_max_delay= 0.7;
169
170 static int64_t recording_time = INT64_MAX;
171 static int64_t start_time = 0;
172 static int64_t rec_timestamp = 0;
173 static int64_t input_ts_offset = 0;
174 static int file_overwrite = 0;
175 static int metadata_count;
176 static AVMetadataTag *metadata;
177 static int do_benchmark = 0;
178 static int do_hex_dump = 0;
179 static int do_pkt_dump = 0;
180 static int do_psnr = 0;
181 static int do_pass = 0;
182 static char *pass_logfilename_prefix = NULL;
183 static int audio_stream_copy = 0;
184 static int video_stream_copy = 0;
185 static int subtitle_stream_copy = 0;
186 static int video_sync_method= -1;
187 static int audio_sync_method= 0;
188 static float audio_drift_threshold= 0.1;
189 static int copy_ts= 0;
190 static int opt_shortest = 0;
191 static int video_global_header = 0;
192 static char *vstats_filename;
193 static FILE *vstats_file;
194 static int opt_programid = 0;
195 static int copy_initial_nonkeyframes = 0;
196
197 static int rate_emu = 0;
198
199 static int video_channel = 0;
200 static char *video_standard;
201
202 static int audio_volume = 256;
203
204 static int exit_on_error = 0;
205 static int using_stdin = 0;
206 static int verbose = 1;
207 static int thread_count= 1;
208 static int q_pressed = 0;
209 static int64_t video_size = 0;
210 static int64_t audio_size = 0;
211 static int64_t extra_size = 0;
212 static int nb_frames_dup = 0;
213 static int nb_frames_drop = 0;
214 static int input_sync;
215 static uint64_t limit_filesize = 0;
216 static int force_fps = 0;
217
218 static int pgmyuv_compatibility_hack=0;
219 static float dts_delta_threshold = 10;
220
221 static unsigned int sws_flags = SWS_BICUBIC;
222
223 static int64_t timer_start;
224
225 static uint8_t *audio_buf;
226 static uint8_t *audio_out;
227 unsigned int allocated_audio_out_size, allocated_audio_buf_size;
228
229 static short *samples;
230
231 static AVBitStreamFilterContext *video_bitstream_filters=NULL;
232 static AVBitStreamFilterContext *audio_bitstream_filters=NULL;
233 static AVBitStreamFilterContext *subtitle_bitstream_filters=NULL;
234 static AVBitStreamFilterContext *bitstream_filters[MAX_FILES][MAX_STREAMS];
235
236 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
237
238 struct AVInputStream;
239
240 typedef struct AVOutputStream {
241 int file_index; /* file index */
242 int index; /* stream index in the output file */
243 int source_index; /* AVInputStream index */
244 AVStream *st; /* stream in the output file */
245 int encoding_needed; /* true if encoding needed for this stream */
246 int frame_number;
247 /* input pts and corresponding output pts
248 for A/V sync */
249 //double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
250 struct AVInputStream *sync_ist; /* input stream to sync against */
251 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
252 /* video only */
253 int video_resample;
254 AVFrame pict_tmp; /* temporary image for resampling */
255 struct SwsContext *img_resample_ctx; /* for image resampling */
256 int resample_height;
257 int resample_width;
258 int resample_pix_fmt;
259
260 /* full frame size of first frame */
261 int original_height;
262 int original_width;
263
264 /* cropping area sizes */
265 int video_crop;
266 int topBand;
267 int bottomBand;
268 int leftBand;
269 int rightBand;
270
271 /* cropping area of first frame */
272 int original_topBand;
273 int original_bottomBand;
274 int original_leftBand;
275 int original_rightBand;
276
277 /* padding area sizes */
278 int video_pad;
279 int padtop;
280 int padbottom;
281 int padleft;
282 int padright;
283
284 /* audio only */
285 int audio_resample;
286 ReSampleContext *resample; /* for audio resampling */
287 int reformat_pair;
288 AVAudioConvert *reformat_ctx;
289 AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
290 FILE *logfile;
291 } AVOutputStream;
292
293 typedef struct AVInputStream {
294 int file_index;
295 int index;
296 AVStream *st;
297 int discard; /* true if stream data should be discarded */
298 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
299 int64_t sample_index; /* current sample */
300
301 int64_t start; /* time when read started */
302 int64_t next_pts; /* synthetic pts for cases where pkt.pts
303 is not defined */
304 int64_t pts; /* current pts */
305 int is_start; /* is 1 at the start and after a discontinuity */
306 int showed_multi_packet_warning;
307 int is_past_recording_time;
308 } AVInputStream;
309
310 typedef struct AVInputFile {
311 int eof_reached; /* true if eof reached */
312 int ist_index; /* index of first stream in ist_table */
313 int buffer_size; /* current total buffer size */
314 int nb_streams; /* nb streams we are aware of */
315 } AVInputFile;
316
317 #if HAVE_TERMIOS_H
318
319 /* init terminal so that we can grab keys */
320 static struct termios oldtty;
321 #endif
322
323 static void term_exit(void)
324 {
325 #if HAVE_TERMIOS_H
326 tcsetattr (0, TCSANOW, &oldtty);
327 #endif
328 }
329
330 static volatile int received_sigterm = 0;
331
332 static void
333 sigterm_handler(int sig)
334 {
335 received_sigterm = sig;
336 term_exit();
337 }
338
339 static void term_init(void)
340 {
341 #if HAVE_TERMIOS_H
342 struct termios tty;
343
344 tcgetattr (0, &tty);
345 oldtty = tty;
346 atexit(term_exit);
347
348 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
349 |INLCR|IGNCR|ICRNL|IXON);
350 tty.c_oflag |= OPOST;
351 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
352 tty.c_cflag &= ~(CSIZE|PARENB);
353 tty.c_cflag |= CS8;
354 tty.c_cc[VMIN] = 1;
355 tty.c_cc[VTIME] = 0;
356
357 tcsetattr (0, TCSANOW, &tty);
358 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
359 #endif
360
361 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
362 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
363 #ifdef SIGXCPU
364 signal(SIGXCPU, sigterm_handler);
365 #endif
366
367 #if CONFIG_BEOS_NETSERVER
368 fcntl(0, F_SETFL, fcntl(0, F_GETFL) | O_NONBLOCK);
369 #endif
370 }
371
372 /* read a key without blocking */
373 static int read_key(void)
374 {
375 #if HAVE_TERMIOS_H
376 int n = 1;
377 unsigned char ch;
378 #if !CONFIG_BEOS_NETSERVER
379 struct timeval tv;
380 fd_set rfds;
381
382 FD_ZERO(&rfds);
383 FD_SET(0, &rfds);
384 tv.tv_sec = 0;
385 tv.tv_usec = 0;
386 n = select(1, &rfds, NULL, NULL, &tv);
387 #endif
388 if (n > 0) {
389 n = read(0, &ch, 1);
390 if (n == 1)
391 return ch;
392
393 return n;
394 }
395 #elif HAVE_CONIO_H
396 if(kbhit())
397 return(getch());
398 #endif
399 return -1;
400 }
401
402 static int decode_interrupt_cb(void)
403 {
404 return q_pressed || (q_pressed = read_key() == 'q');
405 }
406
407 static int av_exit(int ret)
408 {
409 int i;
410
411 /* close files */
412 for(i=0;i<nb_output_files;i++) {
413 /* maybe av_close_output_file ??? */
414 AVFormatContext *s = output_files[i];
415 int j;
416 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
417 url_fclose(s->pb);
418 for(j=0;j<s->nb_streams;j++) {
419 av_metadata_free(&s->streams[j]->metadata);
420 av_free(s->streams[j]->codec);
421 av_free(s->streams[j]);
422 }
423 for(j=0;j<s->nb_programs;j++) {
424 av_metadata_free(&s->programs[j]->metadata);
425 }
426 for(j=0;j<s->nb_chapters;j++) {
427 av_metadata_free(&s->chapters[j]->metadata);
428 }
429 av_metadata_free(&s->metadata);
430 av_free(s);
431 }
432 for(i=0;i<nb_input_files;i++)
433 av_close_input_file(input_files[i]);
434
435 av_free(intra_matrix);
436 av_free(inter_matrix);
437
438 if (vstats_file)
439 fclose(vstats_file);
440 av_free(vstats_filename);
441
442 av_free(opt_names);
443
444 av_free(video_codec_name);
445 av_free(audio_codec_name);
446 av_free(subtitle_codec_name);
447
448 av_free(video_standard);
449
450 #if CONFIG_POWERPC_PERF
451 void powerpc_display_perf_report(void);
452 powerpc_display_perf_report();
453 #endif /* CONFIG_POWERPC_PERF */
454
455 for (i=0;i<CODEC_TYPE_NB;i++)
456 av_free(avcodec_opts[i]);
457 av_free(avformat_opts);
458 av_free(sws_opts);
459 av_free(audio_buf);
460 av_free(audio_out);
461 allocated_audio_buf_size= allocated_audio_out_size= 0;
462 av_free(samples);
463
464 if (received_sigterm) {
465 fprintf(stderr,
466 "Received signal %d: terminating.\n",
467 (int) received_sigterm);
468 exit (255);
469 }
470
471 exit(ret); /* not all OS-es handle main() return value */
472 return ret;
473 }
474
475 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
476 {
477 if(codec && codec->sample_fmts){
478 const enum SampleFormat *p= codec->sample_fmts;
479 for(; *p!=-1; p++){
480 if(*p == st->codec->sample_fmt)
481 break;
482 }
483 if(*p == -1)
484 st->codec->sample_fmt = codec->sample_fmts[0];
485 }
486 }
487
488 static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
489 {
490 if(codec && codec->pix_fmts){
491 const enum PixelFormat *p= codec->pix_fmts;
492 for(; *p!=-1; p++){
493 if(*p == st->codec->pix_fmt)
494 break;
495 }
496 if(*p == -1
497 && !( st->codec->codec_id==CODEC_ID_MJPEG
498 && st->codec->strict_std_compliance <= FF_COMPLIANCE_INOFFICIAL
499 && ( st->codec->pix_fmt == PIX_FMT_YUV420P
500 || st->codec->pix_fmt == PIX_FMT_YUV422P)))
501 st->codec->pix_fmt = codec->pix_fmts[0];
502 }
503 }
504
505 static int read_ffserver_streams(AVFormatContext *s, const char *filename)
506 {
507 int i, err;
508 AVFormatContext *ic;
509 int nopts = 0;
510
511 err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL);
512 if (err < 0)
513 return err;
514 /* copy stream format */
515 s->nb_streams = ic->nb_streams;
516 for(i=0;i<ic->nb_streams;i++) {
517 AVStream *st;
518
519 // FIXME: a more elegant solution is needed
520 st = av_mallocz(sizeof(AVStream));
521 memcpy(st, ic->streams[i], sizeof(AVStream));
522 st->codec = avcodec_alloc_context();
523 if (!st->codec) {
524 print_error(filename, AVERROR(ENOMEM));
525 av_exit(1);
526 }
527 memcpy(st->codec, ic->streams[i]->codec, sizeof(AVCodecContext));
528 s->streams[i] = st;
529
530 if (st->codec->codec_type == CODEC_TYPE_AUDIO && audio_stream_copy)
531 st->stream_copy = 1;
532 else if (st->codec->codec_type == CODEC_TYPE_VIDEO && video_stream_copy)
533 st->stream_copy = 1;
534
535 if(!st->codec->thread_count)
536 st->codec->thread_count = 1;
537 if(st->codec->thread_count>1)
538 avcodec_thread_init(st->codec, st->codec->thread_count);
539
540 if(st->codec->flags & CODEC_FLAG_BITEXACT)
541 nopts = 1;
542 }
543
544 if (!nopts)
545 s->timestamp = av_gettime();
546
547 av_close_input_file(ic);
548 return 0;
549 }
550
551 static double
552 get_sync_ipts(const AVOutputStream *ost)
553 {
554 const AVInputStream *ist = ost->sync_ist;
555 return (double)(ist->pts - start_time)/AV_TIME_BASE;
556 }
557
558 static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
559 int ret;
560
561 while(bsfc){
562 AVPacket new_pkt= *pkt;
563 int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
564 &new_pkt.data, &new_pkt.size,
565 pkt->data, pkt->size,
566 pkt->flags & PKT_FLAG_KEY);
567 if(a>0){
568 av_free_packet(pkt);
569 new_pkt.destruct= av_destruct_packet;
570 } else if(a<0){
571 fprintf(stderr, "%s failed for stream %d, codec %s",
572 bsfc->filter->name, pkt->stream_index,
573 avctx->codec ? avctx->codec->name : "copy");
574 print_error("", a);
575 if (exit_on_error)
576 av_exit(1);
577 }
578 *pkt= new_pkt;
579
580 bsfc= bsfc->next;
581 }
582
583 ret= av_interleaved_write_frame(s, pkt);
584 if(ret < 0){
585 print_error("av_interleaved_write_frame()", ret);
586 av_exit(1);
587 }
588 }
589
590 #define MAX_AUDIO_PACKET_SIZE (128 * 1024)
591
592 static void do_audio_out(AVFormatContext *s,
593 AVOutputStream *ost,
594 AVInputStream *ist,
595 unsigned char *buf, int size)
596 {
597 uint8_t *buftmp;
598 int64_t audio_out_size, audio_buf_size;
599 int64_t allocated_for_size= size;
600
601 int size_out, frame_bytes, ret;
602 AVCodecContext *enc= ost->st->codec;
603 AVCodecContext *dec= ist->st->codec;
604 int osize= av_get_bits_per_sample_format(enc->sample_fmt)/8;
605 int isize= av_get_bits_per_sample_format(dec->sample_fmt)/8;
606 const int coded_bps = av_get_bits_per_sample(enc->codec->id);
607
608 need_realloc:
609 audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
610 audio_buf_size= (audio_buf_size*enc->sample_rate + dec->sample_rate) / dec->sample_rate;
611 audio_buf_size= audio_buf_size*2 + 10000; //safety factors for the deprecated resampling API
612 audio_buf_size*= osize*enc->channels;
613
614 audio_out_size= FFMAX(audio_buf_size, enc->frame_size * osize * enc->channels);
615 if(coded_bps > 8*osize)
616 audio_out_size= audio_out_size * coded_bps / (8*osize);
617 audio_out_size += FF_MIN_BUFFER_SIZE;
618
619 if(audio_out_size > INT_MAX || audio_buf_size > INT_MAX){
620 fprintf(stderr, "Buffer sizes too large\n");
621 av_exit(1);
622 }
623
624 av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
625 av_fast_malloc(&audio_out, &allocated_audio_out_size, audio_out_size);
626 if (!audio_buf || !audio_out){
627 fprintf(stderr, "Out of memory in do_audio_out\n");
628 av_exit(1);
629 }
630
631 if (enc->channels != dec->channels)
632 ost->audio_resample = 1;
633
634 if (ost->audio_resample && !ost->resample) {
635 if (dec->sample_fmt != SAMPLE_FMT_S16)
636 fprintf(stderr, "Warning, using s16 intermediate sample format for resampling\n");
637 ost->resample = av_audio_resample_init(enc->channels, dec->channels,
638 enc->sample_rate, dec->sample_rate,
639 enc->sample_fmt, dec->sample_fmt,
640 16, 10, 0, 0.8);
641 if (!ost->resample) {
642 fprintf(stderr, "Can not resample %d channels @ %d Hz to %d channels @ %d Hz\n",
643 dec->channels, dec->sample_rate,
644 enc->channels, enc->sample_rate);
645 av_exit(1);
646 }
647 }
648
649 #define MAKE_SFMT_PAIR(a,b) ((a)+SAMPLE_FMT_NB*(b))
650 if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt &&
651 MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt)!=ost->reformat_pair) {
652 if (ost->reformat_ctx)
653 av_audio_convert_free(ost->reformat_ctx);
654 ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1,
655 dec->sample_fmt, 1, NULL, 0);
656 if (!ost->reformat_ctx) {
657 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
658 avcodec_get_sample_fmt_name(dec->sample_fmt),
659 avcodec_get_sample_fmt_name(enc->sample_fmt));
660 av_exit(1);
661 }
662 ost->reformat_pair=MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
663 }
664
665 if(audio_sync_method){
666 double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
667 - av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2);
668 double idelta= delta*ist->st->codec->sample_rate / enc->sample_rate;
669 int byte_delta= ((int)idelta)*2*ist->st->codec->channels;
670
671 //FIXME resample delay
672 if(fabs(delta) > 50){
673 if(ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate){
674 if(byte_delta < 0){
675 byte_delta= FFMAX(byte_delta, -size);
676 size += byte_delta;
677 buf -= byte_delta;
678 if(verbose > 2)
679 fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
680 if(!size)
681 return;
682 ist->is_start=0;
683 }else{
684 static uint8_t *input_tmp= NULL;
685 input_tmp= av_realloc(input_tmp, byte_delta + size);
686
687 if(byte_delta > allocated_for_size - size){
688 allocated_for_size= byte_delta + (int64_t)size;
689 goto need_realloc;
690 }
691 ist->is_start=0;
692
693 memset(input_tmp, 0, byte_delta);
694 memcpy(input_tmp + byte_delta, buf, size);
695 buf= input_tmp;
696 size += byte_delta;
697 if(verbose > 2)
698 fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
699 }
700 }else if(audio_sync_method>1){
701 int comp= av_clip(delta, -audio_sync_method, audio_sync_method);
702 assert(ost->audio_resample);
703 if(verbose > 2)
704 fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
705 // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
706 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
707 }
708 }
709 }else
710 ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
711 - av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2); //FIXME wrong
712
713 if (ost->audio_resample) {
714 buftmp = audio_buf;
715 size_out = audio_resample(ost->resample,
716 (short *)buftmp, (short *)buf,
717 size / (ist->st->codec->channels * isize));
718 size_out = size_out * enc->channels * osize;
719 } else {
720 buftmp = buf;
721 size_out = size;
722 }
723
724 if (!ost->audio_resample && dec->sample_fmt!=enc->sample_fmt) {
725 const void *ibuf[6]= {buftmp};
726 void *obuf[6]= {audio_buf};
727 int istride[6]= {isize};
728 int ostride[6]= {osize};
729 int len= size_out/istride[0];
730 if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
731 printf("av_audio_convert() failed\n");
732 if (exit_on_error)
733 av_exit(1);
734 return;
735 }
736 buftmp = audio_buf;
737 size_out = len*osize;
738 }
739
740 /* now encode as many frames as possible */
741 if (enc->frame_size > 1) {
742 /* output resampled raw samples */
743 if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
744 fprintf(stderr, "av_fifo_realloc2() failed\n");
745 av_exit(1);
746 }
747 av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
748
749 frame_bytes = enc->frame_size * osize * enc->channels;
750
751 while (av_fifo_size(ost->fifo) >= frame_bytes) {
752 AVPacket pkt;
753 av_init_packet(&pkt);
754
755 av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
756
757 //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
758
759 ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
760 (short *)audio_buf);
761 if (ret < 0) {
762 fprintf(stderr, "Audio encoding failed\n");
763 av_exit(1);
764 }
765 audio_size += ret;
766 pkt.stream_index= ost->index;
767 pkt.data= audio_out;
768 pkt.size= ret;
769 if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
770 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
771 pkt.flags |= PKT_FLAG_KEY;
772 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
773
774 ost->sync_opts += enc->frame_size;
775 }
776 } else {
777 AVPacket pkt;
778 av_init_packet(&pkt);
779
780 ost->sync_opts += size_out / (osize * enc->channels);
781
782 /* output a pcm frame */
783 /* determine the size of the coded buffer */
784 size_out /= osize;
785 if (coded_bps)
786 size_out = size_out*coded_bps/8;
787
788 if(size_out > audio_out_size){
789 fprintf(stderr, "Internal error, buffer size too small\n");
790 av_exit(1);
791 }
792
793 //FIXME pass ost->sync_opts as AVFrame.pts in avcodec_encode_audio()
794 ret = avcodec_encode_audio(enc, audio_out, size_out,
795 (short *)buftmp);
796 if (ret < 0) {
797 fprintf(stderr, "Audio encoding failed\n");
798 av_exit(1);
799 }
800 audio_size += ret;
801 pkt.stream_index= ost->index;
802 pkt.data= audio_out;
803 pkt.size= ret;
804 if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
805 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
806 pkt.flags |= PKT_FLAG_KEY;
807 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
808 }
809 }
810
811 static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void **bufp)
812 {
813 AVCodecContext *dec;
814 AVPicture *picture2;
815 AVPicture picture_tmp;
816 uint8_t *buf = 0;
817
818 dec = ist->st->codec;
819
820 /* deinterlace : must be done before any resize */
821 if (do_deinterlace) {
822 int size;
823
824 /* create temporary picture */
825 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
826 buf = av_malloc(size);
827 if (!buf)
828 return;
829
830 picture2 = &picture_tmp;
831 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
832
833 if(avpicture_deinterlace(picture2, picture,
834 dec->pix_fmt, dec->width, dec->height) < 0) {
835 /* if error, do not deinterlace */
836 fprintf(stderr, "Deinterlacing failed\n");
837 av_free(buf);
838 buf = NULL;
839 picture2 = picture;
840 }
841 } else {
842 picture2 = picture;
843 }
844
845 if (picture != picture2)
846 *picture = *picture2;
847 *bufp = buf;
848 }
849
850 /* we begin to correct av delay at this threshold */
851 #define AV_DELAY_MAX 0.100
852
853 static void do_subtitle_out(AVFormatContext *s,
854 AVOutputStream *ost,
855 AVInputStream *ist,
856 AVSubtitle *sub,
857 int64_t pts)
858 {
859 static uint8_t *subtitle_out = NULL;
860 int subtitle_out_max_size = 1024 * 1024;
861 int subtitle_out_size, nb, i;
862 AVCodecContext *enc;
863 AVPacket pkt;
864
865 if (pts == AV_NOPTS_VALUE) {
866 fprintf(stderr, "Subtitle packets must have a pts\n");
867 if (exit_on_error)
868 av_exit(1);
869 return;
870 }
871
872 enc = ost->st->codec;
873
874 if (!subtitle_out) {
875 subtitle_out = av_malloc(subtitle_out_max_size);
876 }
877
878 /* Note: DVB subtitle need one packet to draw them and one other
879 packet to clear them */
880 /* XXX: signal it in the codec context ? */
881 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
882 nb = 2;
883 else
884 nb = 1;
885
886 for(i = 0; i < nb; i++) {
887 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
888 // start_display_time is required to be 0
889 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){1, 1000}, AV_TIME_BASE_Q);
890 sub->end_display_time -= sub->start_display_time;
891 sub->start_display_time = 0;
892 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
893 subtitle_out_max_size, sub);
894 if (subtitle_out_size < 0) {
895 fprintf(stderr, "Subtitle encoding failed\n");
896 av_exit(1);
897 }
898
899 av_init_packet(&pkt);
900 pkt.stream_index = ost->index;
901 pkt.data = subtitle_out;
902 pkt.size = subtitle_out_size;
903 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
904 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
905 /* XXX: the pts correction is handled here. Maybe handling
906 it in the codec would be better */
907 if (i == 0)
908 pkt.pts += 90 * sub->start_display_time;
909 else
910 pkt.pts += 90 * sub->end_display_time;
911 }
912 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
913 }
914 }
915
916 static int bit_buffer_size= 1024*256;
917 static uint8_t *bit_buffer= NULL;
918
919 static void do_video_out(AVFormatContext *s,
920 AVOutputStream *ost,
921 AVInputStream *ist,
922 AVFrame *in_picture,
923 int *frame_size)
924 {
925 int nb_frames, i, ret;
926 int64_t topBand, bottomBand, leftBand, rightBand;
927 AVFrame *final_picture, *formatted_picture, *resampling_dst, *padding_src;
928 AVFrame picture_crop_temp, picture_pad_temp;
929 AVCodecContext *enc, *dec;
930 double sync_ipts;
931
932 avcodec_get_frame_defaults(&picture_crop_temp);
933 avcodec_get_frame_defaults(&picture_pad_temp);
934
935 enc = ost->st->codec;
936 dec = ist->st->codec;
937
938 sync_ipts = get_sync_ipts(ost) / av_q2d(enc->time_base);
939
940 /* by default, we output a single frame */
941 nb_frames = 1;
942
943 *frame_size = 0;
944
945 if(video_sync_method){
946 double vdelta = sync_ipts - ost->sync_opts;
947 //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
948 if (vdelta < -1.1)
949 nb_frames = 0;
950 else if (video_sync_method == 2 || (video_sync_method<0 && (s->oformat->flags & AVFMT_VARIABLE_FPS))){
951 if(vdelta<=-0.6){
952 nb_frames=0;
953 }else if(vdelta>0.6)
954 ost->sync_opts= lrintf(sync_ipts);
955 }else if (vdelta > 1.1)
956 nb_frames = lrintf(vdelta);
957 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames);
958 if (nb_frames == 0){
959 ++nb_frames_drop;
960 if (verbose>2)
961 fprintf(stderr, "*** drop!\n");
962 }else if (nb_frames > 1) {
963 nb_frames_dup += nb_frames - 1;
964 if (verbose>2)
965 fprintf(stderr, "*** %d dup!\n", nb_frames-1);
966 }
967 }else
968 ost->sync_opts= lrintf(sync_ipts);
969
970 nb_frames= FFMIN(nb_frames, max_frames[CODEC_TYPE_VIDEO] - ost->frame_number);
971 if (nb_frames <= 0)
972 return;
973
974 if (ost->video_crop) {
975 if (av_picture_crop((AVPicture *)&picture_crop_temp, (AVPicture *)in_picture, dec->pix_fmt, ost->topBand, ost->leftBand) < 0) {
976 fprintf(stderr, "error cropping picture\n");
977 if (exit_on_error)
978 av_exit(1);
979 return;
980 }
981 formatted_picture = &picture_crop_temp;
982 } else {
983 formatted_picture = in_picture;
984 }
985
986 final_picture = formatted_picture;
987 padding_src = formatted_picture;
988 resampling_dst = &ost->pict_tmp;
989 if (ost->video_pad) {
990 final_picture = &ost->pict_tmp;
991 if (ost->video_resample) {
992 if (av_picture_crop((AVPicture *)&picture_pad_temp, (AVPicture *)final_picture, enc->pix_fmt, ost->padtop, ost->padleft) < 0) {
993 fprintf(stderr, "error padding picture\n");
994 if (exit_on_error)
995 av_exit(1);
996 return;
997 }
998 resampling_dst = &picture_pad_temp;
999 }
1000 }
1001
1002 if( (ost->resample_height != (ist->st->codec->height - (ost->topBand + ost->bottomBand)))
1003 || (ost->resample_width != (ist->st->codec->width - (ost->leftBand + ost->rightBand)))
1004 || (ost->resample_pix_fmt!= ist->st->codec->pix_fmt) ) {
1005
1006 fprintf(stderr,"Input Stream #%d.%d frame size changed to %dx%d, %s\n", ist->file_index, ist->index, ist->st->codec->width, ist->st->codec->height,avcodec_get_pix_fmt_name(ist->st->codec->pix_fmt));
1007 if(!ost->video_resample)
1008 av_exit(1);
1009 }
1010
1011 if (ost->video_resample) {
1012 padding_src = NULL;
1013 final_picture = &ost->pict_tmp;
1014 if( (ost->resample_height != (ist->st->codec->height - (ost->topBand + ost->bottomBand)))
1015 || (ost->resample_width != (ist->st->codec->width - (ost->leftBand + ost->rightBand)))
1016 || (ost->resample_pix_fmt!= ist->st->codec->pix_fmt) ) {
1017
1018 /* keep bands proportional to the frame size */
1019 topBand = ((int64_t)ist->st->codec->height * ost->original_topBand / ost->original_height) & ~1;
1020 bottomBand = ((int64_t)ist->st->codec->height * ost->original_bottomBand / ost->original_height) & ~1;
1021 leftBand = ((int64_t)ist->st->codec->width * ost->original_leftBand / ost->original_width) & ~1;
1022 rightBand = ((int64_t)ist->st->codec->width * ost->original_rightBand / ost->original_width) & ~1;
1023
1024 /* sanity check to ensure no bad band sizes sneak in */
1025 assert(topBand <= INT_MAX && topBand >= 0);
1026 assert(bottomBand <= INT_MAX && bottomBand >= 0);
1027 assert(leftBand <= INT_MAX && leftBand >= 0);
1028 assert(rightBand <= INT_MAX && rightBand >= 0);
1029
1030 ost->topBand = topBand;
1031 ost->bottomBand = bottomBand;
1032 ost->leftBand = leftBand;
1033 ost->rightBand = rightBand;
1034
1035 ost->resample_height = ist->st->codec->height - (ost->topBand + ost->bottomBand);
1036 ost->resample_width = ist->st->codec->width - (ost->leftBand + ost->rightBand);
1037 ost->resample_pix_fmt= ist->st->codec->pix_fmt;
1038
1039 /* initialize a new scaler context */
1040 sws_freeContext(ost->img_resample_ctx);
1041 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1042 ost->img_resample_ctx = sws_getContext(
1043 ist->st->codec->width - (ost->leftBand + ost->rightBand),
1044 ist->st->codec->height - (ost->topBand + ost->bottomBand),
1045 ist->st->codec->pix_fmt,
1046 ost->st->codec->width - (ost->padleft + ost->padright),
1047 ost->st->codec->height - (ost->padtop + ost->padbottom),
1048 ost->st->codec->pix_fmt,
1049 sws_flags, NULL, NULL, NULL);
1050 if (ost->img_resample_ctx == NULL) {
1051 fprintf(stderr, "Cannot get resampling context\n");
1052 av_exit(1);
1053 }
1054 }
1055 sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize,
1056 0, ost->resample_height, resampling_dst->data, resampling_dst->linesize);
1057 }
1058
1059 if (ost->video_pad) {
1060 av_picture_pad((AVPicture*)final_picture, (AVPicture *)padding_src,
1061 enc->height, enc->width, enc->pix_fmt,
1062 ost->padtop, ost->padbottom, ost->padleft, ost->padright, padcolor);
1063 }
1064
1065 /* duplicates frame if needed */
1066 for(i=0;i<nb_frames;i++) {
1067 AVPacket pkt;
1068 av_init_packet(&pkt);
1069 pkt.stream_index= ost->index;
1070
1071 if (s->oformat->flags & AVFMT_RAWPICTURE) {
1072 /* raw pictures are written as AVPicture structure to
1073 avoid any copies. We support temorarily the older
1074 method. */
1075 AVFrame* old_frame = enc->coded_frame;
1076 enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
1077 pkt.data= (uint8_t *)final_picture;
1078 pkt.size= sizeof(AVPicture);
1079 pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1080 pkt.flags |= PKT_FLAG_KEY;
1081
1082 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
1083 enc->coded_frame = old_frame;
1084 } else {
1085 AVFrame big_picture;
1086
1087 big_picture= *final_picture;
1088 /* better than nothing: use input picture interlaced
1089 settings */
1090 big_picture.interlaced_frame = in_picture->interlaced_frame;
1091 if(avcodec_opts[CODEC_TYPE_VIDEO]->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)){
1092 if(top_field_first == -1)
1093 big_picture.top_field_first = in_picture->top_field_first;
1094 else
1095 big_picture.top_field_first = top_field_first;
1096 }
1097
1098 /* handles sameq here. This is not correct because it may
1099 not be a global option */
1100 if (same_quality) {
1101 big_picture.quality = ist->st->quality;
1102 }else
1103 big_picture.quality = ost->st->quality;
1104 if(!me_threshold)
1105 big_picture.pict_type = 0;
1106 // big_picture.pts = AV_NOPTS_VALUE;
1107 big_picture.pts= ost->sync_opts;
1108 // big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
1109 //av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
1110 ret = avcodec_encode_video(enc,
1111 bit_buffer, bit_buffer_size,
1112 &big_picture);
1113 if (ret < 0) {
1114 fprintf(stderr, "Video encoding failed\n");
1115 av_exit(1);
1116 }
1117
1118 if(ret>0){
1119 pkt.data= bit_buffer;
1120 pkt.size= ret;
1121 if(enc->coded_frame->pts != AV_NOPTS_VALUE)
1122 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1123 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
1124 pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
1125 pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
1126
1127 if(enc->coded_frame->key_frame)
1128 pkt.flags |= PKT_FLAG_KEY;
1129 write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
1130 *frame_size = ret;
1131 video_size += ret;
1132 //fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
1133 // enc->frame_number-1, ret, enc->pict_type);
1134 /* if two pass, output log */
1135 if (ost->logfile && enc->stats_out) {
1136 fprintf(ost->logfile, "%s", enc->stats_out);
1137 }
1138 }
1139 }
1140 ost->sync_opts++;
1141 ost->frame_number++;
1142 }
1143 }
1144
1145 static double psnr(double d){
1146 return -10.0*log(d)/log(10.0);
1147 }
1148
1149 static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
1150 int frame_size)
1151 {
1152 AVCodecContext *enc;
1153 int frame_number;
1154 double ti1, bitrate, avg_bitrate;
1155
1156 /* this is executed just the first time do_video_stats is called */
1157 if (!vstats_file) {
1158 vstats_file = fopen(vstats_filename, "w");
1159 if (!vstats_file) {
1160 perror("fopen");
1161 av_exit(1);
1162 }
1163 }
1164
1165 enc = ost->st->codec;
1166 if (enc->codec_type == CODEC_TYPE_VIDEO) {
1167 frame_number = ost->frame_number;
1168 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1169 if (enc->flags&CODEC_FLAG_PSNR)
1170 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
1171
1172 fprintf(vstats_file,"f_size= %6d ", frame_size);
1173 /* compute pts value */
1174 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1175 if (ti1 < 0.01)
1176 ti1 = 0.01;
1177
1178 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1179 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1180 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1181 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1182 fprintf(vstats_file,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
1183 }
1184 }
1185
1186 static void print_report(AVFormatContext **output_files,
1187 AVOutputStream **ost_table, int nb_ostreams,
1188 int is_last_report)
1189 {
1190 char buf[1024];
1191 AVOutputStream *ost;
1192 AVFormatContext *oc;
1193 int64_t total_size;
1194 AVCodecContext *enc;
1195 int frame_number, vid, i;
1196 double bitrate, ti1, pts;
1197 static int64_t last_time = -1;
1198 static int qp_histogram[52];
1199
1200 if (!is_last_report) {
1201 int64_t cur_time;
1202 /* display the report every 0.5 seconds */
1203 cur_time = av_gettime();
1204 if (last_time == -1) {
1205 last_time = cur_time;
1206 return;
1207 }
1208 if ((cur_time - last_time) < 500000)
1209 return;
1210 last_time = cur_time;
1211 }
1212
1213
1214 oc = output_files[0];
1215
1216 total_size = url_fsize(oc->pb);
1217 if(total_size<0) // FIXME improve url_fsize() so it works with non seekable output too
1218 total_size= url_ftell(oc->pb);
1219
1220 buf[0] = '\0';
1221 ti1 = 1e10;
1222 vid = 0;
1223 for(i=0;i<nb_ostreams;i++) {
1224 ost = ost_table[i];
1225 enc = ost->st->codec;
1226 if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1227 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ",
1228 !ost->st->stream_copy ?
1229 enc->coded_frame->quality/(float)FF_QP2LAMBDA : -1);
1230 }
1231 if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
1232 float t = (av_gettime()-timer_start) / 1000000.0;
1233
1234 frame_number = ost->frame_number;
1235 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1236 frame_number, (t>1)?(int)(frame_number/t+0.5) : 0,
1237 !ost->st->stream_copy ?
1238 enc->coded_frame->quality/(float)FF_QP2LAMBDA : -1);
1239 if(is_last_report)
1240 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1241 if(qp_hist){
1242 int j;
1243 int qp= lrintf(enc->coded_frame->quality/(float)FF_QP2LAMBDA);
1244 if(qp>=0 && qp<FF_ARRAY_ELEMS(qp_histogram))
1245 qp_histogram[qp]++;
1246 for(j=0; j<32; j++)
1247 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j]+1)/log(2)));
1248 }
1249 if (enc->flags&CODEC_FLAG_PSNR){
1250 int j;
1251 double error, error_sum=0;
1252 double scale, scale_sum=0;
1253 char type[3]= {'Y','U','V'};
1254 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1255 for(j=0; j<3; j++){
1256 if(is_last_report){
1257 error= enc->error[j];
1258 scale= enc->width*enc->height*255.0*255.0*frame_number;
1259 }else{
1260 error= enc->coded_frame->error[j];
1261 scale= enc->width*enc->height*255.0*255.0;
1262 }
1263 if(j) scale/=4;
1264 error_sum += error;
1265 scale_sum += scale;
1266 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
1267 }
1268 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
1269 }
1270 vid = 1;
1271 }
1272 /* compute min output value */
1273 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1274 if ((pts < ti1) && (pts > 0))
1275 ti1 = pts;
1276 }
1277 if (ti1 < 0.01)
1278 ti1 = 0.01;
1279
1280 if (verbose || is_last_report) {
1281 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1282
1283 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1284 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1285 (double)total_size / 1024, ti1, bitrate);
1286
1287 if (nb_frames_dup || nb_frames_drop)
1288 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1289 nb_frames_dup, nb_frames_drop);
1290
1291 if (verbose >= 0)
1292 fprintf(stderr, "%s \r", buf);
1293
1294 fflush(stderr);
1295 }
1296
1297 if (is_last_report && verbose >= 0){
1298 int64_t raw= audio_size + video_size + extra_size;
1299 fprintf(stderr, "\n");
1300 fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1301 video_size/1024.0,
1302 audio_size/1024.0,
1303 extra_size/1024.0,
1304 100.0*(total_size - raw)/raw
1305 );
1306 }
1307 }
1308
1309 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1310 static int output_packet(AVInputStream *ist, int ist_index,
1311 AVOutputStream **ost_table, int nb_ostreams,
1312 const AVPacket *pkt)
1313 {
1314 AVFormatContext *os;
1315 AVOutputStream *ost;
1316 int ret, i;
1317 int got_picture;
1318 AVFrame picture;
1319 void *buffer_to_free;
1320 static unsigned int samples_size= 0;
1321 AVSubtitle subtitle, *subtitle_to_free;
1322 int got_subtitle;
1323 AVPacket avpkt;
1324 int bps = av_get_bits_per_sample_format(ist->st->codec->sample_fmt)>>3;
1325
1326 if(ist->next_pts == AV_NOPTS_VALUE)
1327 ist->next_pts= ist->pts;
1328
1329 if (pkt == NULL) {
1330 /* EOF handling */
1331 av_init_packet(&avpkt);
1332 avpkt.data = NULL;
1333 avpkt.size = 0;
1334 goto handle_eof;
1335 } else {
1336 avpkt = *pkt;
1337 }
1338
1339 if(pkt->dts != AV_NOPTS_VALUE)
1340 ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1341
1342 //while we have more to decode or while the decoder did output something on EOF
1343 while (avpkt.size > 0 || (!pkt && ist->next_pts != ist->pts)) {
1344 uint8_t *data_buf, *decoded_data_buf;
1345 int data_size, decoded_data_size;
1346 handle_eof:
1347 ist->pts= ist->next_pts;
1348
1349 if(avpkt.size && avpkt.size != pkt->size &&
1350 ((!ist->showed_multi_packet_warning && verbose>0) || verbose>1)){
1351 fprintf(stderr, "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1352 ist->showed_multi_packet_warning=1;
1353 }
1354
1355 /* decode the packet if needed */
1356 decoded_data_buf = NULL; /* fail safe */
1357 decoded_data_size= 0;
1358 data_buf = avpkt.data;
1359 data_size = avpkt.size;
1360 subtitle_to_free = NULL;
1361 if (ist->decoding_needed) {
1362 switch(ist->st->codec->codec_type) {
1363 case CODEC_TYPE_AUDIO:{
1364 if(pkt && samples_size < FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
1365 samples_size = FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE);
1366 av_free(samples);
1367 samples= av_malloc(samples_size);
1368 }
1369 decoded_data_size= samples_size;
1370 /* XXX: could avoid copy if PCM 16 bits with same
1371 endianness as CPU */
1372 ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
1373 &avpkt);
1374 if (ret < 0)
1375 goto fail_decode;
1376 avpkt.data += ret;
1377 avpkt.size -= ret;
1378 data_size = ret;
1379 /* Some bug in mpeg audio decoder gives */
1380 /* decoded_data_size < 0, it seems they are overflows */
1381 if (decoded_data_size <= 0) {
1382 /* no audio frame */
1383 continue;
1384 }
1385 decoded_data_buf = (uint8_t *)samples;
1386 ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
1387 (ist->st->codec->sample_rate * ist->st->codec->channels);
1388 break;}
1389 case CODEC_TYPE_VIDEO:
1390 decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
1391 /* XXX: allocate picture correctly */
1392 avcodec_get_frame_defaults(&picture);
1393
1394 ret = avcodec_decode_video2(ist->st->codec,
1395 &picture, &got_picture, &avpkt);
1396 ist->st->quality= picture.quality;
1397 if (ret < 0)
1398 goto fail_decode;
1399 if (!got_picture) {
1400 /* no picture yet */
1401 goto discard_packet;
1402 }
1403 if (ist->st->codec->time_base.num != 0) {
1404 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
1405 ist->next_pts += ((int64_t)AV_TIME_BASE *
1406 ist->st->codec->time_base.num * ticks) /
1407 ist->st->codec->time_base.den;
1408 }
1409 avpkt.size = 0;
1410 break;
1411 case CODEC_TYPE_SUBTITLE:
1412 ret = avcodec_decode_subtitle2(ist->st->codec,
1413 &subtitle, &got_subtitle, &avpkt);
1414 if (ret < 0)
1415 goto fail_decode;
1416 if (!got_subtitle) {
1417 goto discard_packet;
1418 }
1419 subtitle_to_free = &subtitle;
1420 avpkt.size = 0;
1421 break;
1422 default:
1423 goto fail_decode;
1424 }
1425 } else {
1426 switch(ist->st->codec->codec_type) {
1427 case CODEC_TYPE_AUDIO:
1428 ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1429 ist->st->codec->sample_rate;
1430 break;
1431 case CODEC_TYPE_VIDEO:
1432 if (ist->st->codec->time_base.num != 0) {
1433 int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
1434 ist->next_pts += ((int64_t)AV_TIME_BASE *
1435 ist->st->codec->time_base.num * ticks) /
1436 ist->st->codec->time_base.den;
1437 }
1438 break;
1439 }
1440 ret = avpkt.size;
1441 avpkt.size = 0;
1442 }
1443
1444 buffer_to_free = NULL;
1445 if (ist->st->codec->codec_type == CODEC_TYPE_VIDEO) {
1446 pre_process_video_frame(ist, (AVPicture *)&picture,
1447 &buffer_to_free);
1448 }
1449
1450 // preprocess audio (volume)
1451 if (ist->st->codec->codec_type == CODEC_TYPE_AUDIO) {
1452 if (audio_volume != 256) {
1453 short *volp;
1454 volp = samples;
1455 for(i=0;i<(decoded_data_size / sizeof(short));i++) {
1456 int v = ((*volp) * audio_volume + 128) >> 8;
1457 if (v < -32768) v = -32768;
1458 if (v > 32767) v = 32767;
1459 *volp++ = v;
1460 }
1461 }
1462 }
1463
1464 /* frame rate emulation */
1465 if (rate_emu) {
1466 int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
1467 int64_t now = av_gettime() - ist->start;
1468 if (pts > now)
1469 usleep(pts - now);
1470 }
1471
1472 /* if output time reached then transcode raw format,
1473 encode packets and output them */
1474 if (start_time == 0 || ist->pts >= start_time)
1475 for(i=0;i<nb_ostreams;i++) {
1476 int frame_size;
1477
1478 ost = ost_table[i];
1479 if (ost->source_index == ist_index) {
1480 os = output_files[ost->file_index];
1481
1482 /* set the input output pts pairs */
1483 //ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/ AV_TIME_BASE;
1484
1485 if (ost->encoding_needed) {
1486 assert(ist->decoding_needed);
1487 switch(ost->st->codec->codec_type) {
1488 case CODEC_TYPE_AUDIO:
1489 do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
1490 break;
1491 case CODEC_TYPE_VIDEO:
1492 do_video_out(os, ost, ist, &picture, &frame_size);
1493 if (vstats_filename && frame_size)
1494 do_video_stats(os, ost, frame_size);
1495 break;
1496 case CODEC_TYPE_SUBTITLE:
1497 do_subtitle_out(os, ost, ist, &subtitle,
1498 pkt->pts);
1499 break;
1500 default:
1501 abort();
1502 }
1503 } else {
1504 AVFrame avframe; //FIXME/XXX remove this
1505 AVPacket opkt;
1506 int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1507
1508 av_init_packet(&opkt);
1509
1510 if ((!ost->frame_number && !(pkt->flags & PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
1511 continue;
1512
1513 /* no reencoding needed : output the packet directly */
1514 /* force the input stream PTS */
1515
1516 avcodec_get_frame_defaults(&avframe);
1517 ost->st->codec->coded_frame= &avframe;
1518 avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
1519
1520 if(ost->st->codec->codec_type == CODEC_TYPE_AUDIO)
1521 audio_size += data_size;
1522 else if (ost->st->codec->codec_type == CODEC_TYPE_VIDEO) {
1523 video_size += data_size;
1524 ost->sync_opts++;
1525 }
1526
1527 opkt.stream_index= ost->index;
1528 if(pkt->pts != AV_NOPTS_VALUE)
1529 opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1530 else
1531 opkt.pts= AV_NOPTS_VALUE;
1532
1533 if (pkt->dts == AV_NOPTS_VALUE)
1534 opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
1535 else
1536 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1537 opkt.dts -= ost_tb_start_time;
1538
1539 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1540 opkt.flags= pkt->flags;
1541
1542 //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1543 if( ost->st->codec->codec_id != CODEC_ID_H264
1544 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
1545 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
1546 ) {
1547 if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & PKT_FLAG_KEY))
1548 opkt.destruct= av_destruct_packet;
1549 } else {
1550 opkt.data = data_buf;
1551 opkt.size = data_size;
1552 }
1553
1554 write_frame(os, &opkt, ost->st->codec, bitstream_filters[ost->file_index][opkt.stream_index]);
1555 ost->st->codec->frame_number++;
1556 ost->frame_number++;
1557 av_free_packet(&opkt);
1558 }
1559 }
1560 }
1561 av_free(buffer_to_free);
1562 /* XXX: allocate the subtitles in the codec ? */
1563 if (subtitle_to_free) {
1564 if (subtitle_to_free->rects != NULL) {
1565 for (i = 0; i < subtitle_to_free->num_rects; i++) {
1566 av_freep(&subtitle_to_free->rects[i]->pict.data[0]);
1567 av_freep(&subtitle_to_free->rects[i]->pict.data[1]);
1568 av_freep(&subtitle_to_free->rects[i]);
1569 }
1570 av_freep(&subtitle_to_free->rects);
1571 }
1572 subtitle_to_free->num_rects = 0;
1573 subtitle_to_free = NULL;
1574 }
1575 }
1576 discard_packet:
1577 if (pkt == NULL) {
1578 /* EOF handling */
1579
1580 for(i=0;i<nb_ostreams;i++) {
1581 ost = ost_table[i];
1582 if (ost->source_index == ist_index) {
1583 AVCodecContext *enc= ost->st->codec;
1584 os = output_files[ost->file_index];
1585
1586 if(ost->st->codec->codec_type == CODEC_TYPE_AUDIO && enc->frame_size <=1)
1587 continue;
1588 if(ost->st->codec->codec_type == CODEC_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
1589 continue;
1590
1591 if (ost->encoding_needed) {
1592 for(;;) {
1593 AVPacket pkt;
1594 int fifo_bytes;
1595 av_init_packet(&pkt);
1596 pkt.stream_index= ost->index;
1597
1598 switch(ost->st->codec->codec_type) {
1599 case CODEC_TYPE_AUDIO:
1600 fifo_bytes = av_fifo_size(ost->fifo);
1601 ret = 0;
1602 /* encode any samples remaining in fifo */
1603 if (fifo_bytes > 0) {
1604 int osize = av_get_bits_per_sample_format(enc->sample_fmt) >> 3;
1605 int fs_tmp = enc->frame_size;
1606
1607 av_fifo_generic_read(ost->fifo, samples, fifo_bytes, NULL);
1608 if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
1609 enc->frame_size = fifo_bytes / (osize * enc->channels);
1610 } else { /* pad */
1611 int frame_bytes = enc->frame_size*osize*enc->channels;
1612 if (samples_size < frame_bytes)
1613 av_exit(1);
1614 memset((uint8_t*)samples+fifo_bytes, 0, frame_bytes - fifo_bytes);
1615 }
1616
1617 ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, samples);
1618 pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
1619 ost->st->time_base.num, enc->sample_rate);
1620 enc->frame_size = fs_tmp;
1621 }
1622 if(ret <= 0) {
1623 ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
1624 }
1625 if (ret < 0) {
1626 fprintf(stderr, "Audio encoding failed\n");
1627 av_exit(1);
1628 }
1629 audio_size += ret;
1630 pkt.flags |= PKT_FLAG_KEY;
1631 break;
1632 case CODEC_TYPE_VIDEO:
1633 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
1634 if (ret < 0) {
1635 fprintf(stderr, "Video encoding failed\n");
1636 av_exit(1);
1637 }
1638 video_size += ret;
1639 if(enc->coded_frame && enc->coded_frame->key_frame)
1640 pkt.flags |= PKT_FLAG_KEY;
1641 if (ost->logfile && enc->stats_out) {
1642 fprintf(ost->logfile, "%s", enc->stats_out);
1643 }
1644 break;
1645 default:
1646 ret=-1;
1647 }
1648
1649 if(ret<=0)
1650 break;
1651 pkt.data= bit_buffer;
1652 pkt.size= ret;
1653 if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1654 pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1655 write_frame(os, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
1656 }
1657 }
1658 }
1659 }
1660 }
1661
1662 return 0;
1663 fail_decode:
1664 return -1;
1665 }
1666
1667 static void print_sdp(AVFormatContext **avc, int n)
1668 {
1669 char sdp[2048];
1670
1671 avf_sdp_create(avc, n, sdp, sizeof(sdp));
1672 printf("SDP:\n%s\n", sdp);
1673 fflush(stdout);
1674 }
1675
1676 static int copy_chapters(int infile, int outfile)
1677 {
1678 AVFormatContext *is = input_files[infile];
1679 AVFormatContext *os = output_files[outfile];
1680 int i;
1681
1682 for (i = 0; i < is->nb_chapters; i++) {
1683 AVChapter *in_ch = is->chapters[i], *out_ch;
1684 AVMetadataTag *t = NULL;
1685 int64_t ts_off = av_rescale_q(start_time - input_files_ts_offset[infile],
1686 AV_TIME_BASE_Q, in_ch->time_base);
1687 int64_t rt = (recording_time == INT64_MAX) ? INT64_MAX :
1688 av_rescale_q(recording_time, AV_TIME_BASE_Q, in_ch->time_base);
1689
1690
1691 if (in_ch->end < ts_off)
1692 continue;
1693 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
1694 break;
1695
1696 out_ch = av_mallocz(sizeof(AVChapter));
1697 if (!out_ch)
1698 return AVERROR(ENOMEM);
1699
1700 out_ch->id = in_ch->id;
1701 out_ch->time_base = in_ch->time_base;
1702 out_ch->start = FFMAX(0, in_ch->start - ts_off);
1703 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
1704
1705 while ((t = av_metadata_get(in_ch->metadata, "", t, AV_METADATA_IGNORE_SUFFIX)))
1706 av_metadata_set2(&out_ch->metadata, t->key, t->value, 0);
1707
1708 os->nb_chapters++;
1709 os->chapters = av_realloc(os->chapters, sizeof(AVChapter)*os->nb_chapters);
1710 if (!os->chapters)
1711 return AVERROR(ENOMEM);
1712 os->chapters[os->nb_chapters - 1] = out_ch;
1713 }
1714 return 0;
1715 }
1716
1717 /*
1718 * The following code is the main loop of the file converter
1719 */
1720 static int av_encode(AVFormatContext **output_files,
1721 int nb_output_files,
1722 AVFormatContext **input_files,
1723 int nb_input_files,
1724 AVStreamMap *stream_maps, int nb_stream_maps)
1725 {
1726 int ret = 0, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
1727 AVFormatContext *is, *os;
1728 AVCodecContext *codec, *icodec;
1729 AVOutputStream *ost, **ost_table = NULL;
1730 AVInputStream *ist, **ist_table = NULL;
1731 AVInputFile *file_table;
1732 char error[1024];
1733 int key;
1734 int want_sdp = 1;
1735 uint8_t no_packet[MAX_FILES]={0};
1736 int no_packet_count=0;
1737
1738 file_table= av_mallocz(nb_input_files * sizeof(AVInputFile));
1739 if (!file_table)
1740 goto fail;
1741
1742 /* input stream init */
1743 j = 0;
1744 for(i=0;i<nb_input_files;i++) {
1745 is = input_files[i];
1746 file_table[i].ist_index = j;
1747 file_table[i].nb_streams = is->nb_streams;
1748 j += is->nb_streams;
1749 }
1750 nb_istreams = j;
1751
1752 ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
1753 if (!ist_table)
1754 goto fail;
1755
1756 for(i=0;i<nb_istreams;i++) {
1757 ist = av_mallocz(sizeof(AVInputStream));
1758 if (!ist)
1759 goto fail;
1760 ist_table[i] = ist;
1761 }
1762 j = 0;
1763 for(i=0;i<nb_input_files;i++) {
1764 is = input_files[i];
1765 for(k=0;k<is->nb_streams;k++) {
1766 ist = ist_table[j++];
1767 ist->st = is->streams[k];
1768 ist->file_index = i;
1769 ist->index = k;
1770 ist->discard = 1; /* the stream is discarded by default
1771 (changed later) */
1772
1773 if (rate_emu) {
1774 ist->start = av_gettime();
1775 }
1776 }
1777 }
1778
1779 /* output stream init */
1780 nb_ostreams = 0;
1781 for(i=0;i<nb_output_files;i++) {
1782 os = output_files[i];
1783 if (!os->nb_streams) {
1784 dump_format(output_files[i], i, output_files[i]->filename, 1);
1785 fprintf(stderr, "Output file #%d does not contain any stream\n", i);
1786 av_exit(1);
1787 }
1788 nb_ostreams += os->nb_streams;
1789 }
1790 if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
1791 fprintf(stderr, "Number of stream maps must match number of output streams\n");
1792 av_exit(1);
1793 }
1794
1795 /* Sanity check the mapping args -- do the input files & streams exist? */
1796 for(i=0;i<nb_stream_maps;i++) {
1797 int fi = stream_maps[i].file_index;
1798 int si = stream_maps[i].stream_index;
1799
1800 if (fi < 0 || fi > nb_input_files - 1 ||
1801 si < 0 || si > file_table[fi].nb_streams - 1) {
1802 fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
1803 av_exit(1);
1804 }
1805 fi = stream_maps[i].sync_file_index;
1806 si = stream_maps[i].sync_stream_index;
1807 if (fi < 0 || fi > nb_input_files - 1 ||
1808 si < 0 || si > file_table[fi].nb_streams - 1) {
1809 fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si);
1810 av_exit(1);
1811 }
1812 }
1813
1814 ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
1815 if (!ost_table)
1816 goto fail;
1817 for(i=0;i<nb_ostreams;i++) {
1818 ost = av_mallocz(sizeof(AVOutputStream));
1819 if (!ost)
1820 goto fail;
1821 ost_table[i] = ost;
1822 }
1823
1824 n = 0;
1825 for(k=0;k<nb_output_files;k++) {
1826 os = output_files[k];
1827 for(i=0;i<os->nb_streams;i++,n++) {
1828 int found;
1829 ost = ost_table[n];
1830 ost->file_index = k;
1831 ost->index = i;
1832 ost->st = os->streams[i];
1833 if (nb_stream_maps > 0) {
1834 ost->source_index = file_table[stream_maps[n].file_index].ist_index +
1835 stream_maps[n].stream_index;
1836
1837 /* Sanity check that the stream types match */
1838 if (ist_table[ost->source_index]->st->codec->codec_type != ost->st->codec->codec_type) {
1839 int i= ost->file_index;
1840 dump_format(output_files[i], i, output_files[i]->filename, 1);
1841 fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
1842 stream_maps[n].file_index, stream_maps[n].stream_index,
1843 ost->file_index, ost->index);
1844 av_exit(1);
1845 }
1846
1847 } else {
1848 int best_nb_frames=-1;
1849 /* get corresponding input stream index : we select the first one with the right type */
1850 found = 0;
1851 for(j=0;j<nb_istreams;j++) {
1852 int skip=0;
1853 ist = ist_table[j];
1854 if(opt_programid){
1855 int pi,si;
1856 AVFormatContext *f= input_files[ ist->file_index ];
1857 skip=1;
1858 for(pi=0; pi<f->nb_programs; pi++){
1859 AVProgram *p= f->programs[pi];
1860 if(p->id == opt_programid)
1861 for(si=0; si<p->nb_stream_indexes; si++){
1862 if(f->streams[ p->stream_index[si] ] == ist->st)
1863 skip=0;
1864 }
1865 }
1866 }
1867 if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip &&
1868 ist->st->codec->codec_type == ost->st->codec->codec_type) {
1869 if(best_nb_frames < ist->st->codec_info_nb_frames){
1870 best_nb_frames= ist->st->codec_info_nb_frames;
1871 ost->source_index = j;
1872 found = 1;
1873 }
1874 }
1875 }
1876
1877 if (!found) {
1878 if(! opt_programid) {
1879 /* try again and reuse existing stream */
1880 for(j=0;j<nb_istreams;j++) {
1881 ist = ist_table[j];
1882 if ( ist->st->codec->codec_type == ost->st->codec->codec_type
1883 && ist->st->discard != AVDISCARD_ALL) {
1884 ost->source_index = j;
1885 found = 1;
1886 }
1887 }
1888 }
1889 if (!found) {
1890 int i= ost->file_index;
1891 dump_format(output_files[i], i, output_files[i]->filename, 1);
1892 fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
1893 ost->file_index, ost->index);
1894 av_exit(1);
1895 }
1896 }
1897 }
1898 ist = ist_table[ost->source_index];
1899 ist->discard = 0;
1900 ost->sync_ist = (nb_stream_maps > 0) ?
1901 ist_table[file_table[stream_maps[n].sync_file_index].ist_index +
1902 stream_maps[n].sync_stream_index] : ist;
1903 }
1904 }
1905
1906 /* for each output stream, we compute the right encoding parameters */
1907 for(i=0;i<nb_ostreams;i++) {
1908 AVMetadataTag *t = NULL, *lang = NULL;
1909 ost = ost_table[i];
1910 os = output_files[ost->file_index];
1911 ist = ist_table[ost->source_index];
1912
1913 codec = ost->st->codec;
1914 icodec = ist->st->codec;
1915
1916 if (av_metadata_get(ist->st->metadata, "language", NULL, 0))
1917 lang = av_metadata_get(ost->st->metadata, "language", NULL, 0);
1918 while ((t = av_metadata_get(ist->st->metadata, "", t, AV_METADATA_IGNORE_SUFFIX))) {
1919 if (lang && !strcmp(t->key, "language"))
1920 continue;
1921 av_metadata_set2(&ost->st->metadata, t->key, t->value, 0);
1922 }
1923
1924 ost->st->disposition = ist->st->disposition;
1925 codec->bits_per_raw_sample= icodec->bits_per_raw_sample;
1926 codec->chroma_sample_location = icodec->chroma_sample_location;
1927
1928 if (ost->st->stream_copy) {
1929 /* if stream_copy is selected, no need to decode or encode */
1930 codec->codec_id = icodec->codec_id;
1931 codec->codec_type = icodec->codec_type;
1932
1933 if(!codec->codec_tag){
1934 if( !os->oformat->codec_tag
1935 || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id
1936 || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0)
1937 codec->codec_tag = icodec->codec_tag;
1938 }
1939
1940 codec->bit_rate = icodec->bit_rate;
1941 codec->extradata= icodec->extradata;
1942 codec->extradata_size= icodec->extradata_size;
1943 if(av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/1000){
1944 codec->time_base = icodec->time_base;
1945 codec->time_base.num *= icodec->ticks_per_frame;
1946 }else
1947 codec->time_base = ist->st->time_base;
1948 switch(codec->codec_type) {
1949 case CODEC_TYPE_AUDIO:
1950 if(audio_volume != 256) {
1951 fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\n");
1952 av_exit(1);
1953 }
1954 codec->channel_layout = icodec->channel_layout;
1955 codec->sample_rate = icodec->sample_rate;
1956 codec->channels = icodec->channels;
1957 codec->frame_size = icodec->frame_size;
1958 codec->block_align= icodec->block_align;
1959 if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3)
1960 codec->block_align= 0;
1961 if(codec->codec_id == CODEC_ID_AC3)
1962 codec->block_align= 0;
1963 break;
1964 case CODEC_TYPE_VIDEO:
1965 codec->pix_fmt = icodec->pix_fmt;
1966 codec->width = icodec->width;
1967 codec->height = icodec->height;
1968 codec->has_b_frames = icodec->has_b_frames;
1969 break;
1970 case CODEC_TYPE_SUBTITLE:
1971 codec->width = icodec->width;
1972 codec->height = icodec->height;
1973 break;
1974 default:
1975 abort();
1976 }
1977 } else {
1978 switch(codec->codec_type) {
1979 case CODEC_TYPE_AUDIO:
1980 ost->fifo= av_fifo_alloc(1024);
1981 if(!ost->fifo)
1982 goto fail;
1983 ost->reformat_pair = MAKE_SFMT_PAIR(SAMPLE_FMT_NONE,SAMPLE_FMT_NONE);
1984 ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1;
1985 icodec->request_channels = codec->channels;
1986 ist->decoding_needed = 1;
1987 ost->encoding_needed = 1;
1988 break;
1989 case CODEC_TYPE_VIDEO:
1990 if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
1991 fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n");
1992 av_exit(1);
1993 }
1994 ost->video_crop = ((frame_leftBand + frame_rightBand + frame_topBand + frame_bottomBand) != 0);
1995 ost->video_pad = ((frame_padleft + frame_padright + frame_padtop + frame_padbottom) != 0);
1996 ost->video_resample = ((codec->width != icodec->width -
1997 (frame_leftBand + frame_rightBand) +
1998 (frame_padleft + frame_padright)) ||
1999 (codec->height != icodec->height -
2000 (frame_topBand + frame_bottomBand) +
2001 (frame_padtop + frame_padbottom)) ||
2002 (codec->pix_fmt != icodec->pix_fmt));
2003 if (ost->video_crop) {
2004 ost->topBand = ost->original_topBand = frame_topBand;
2005 ost->bottomBand = ost->original_bottomBand = frame_bottomBand;
2006 ost->leftBand = ost->original_leftBand = frame_leftBand;
2007 ost->rightBand = ost->original_rightBand = frame_rightBand;
2008 }
2009 if (ost->video_pad) {
2010 ost->padtop = frame_padtop;
2011 ost->padleft = frame_padleft;
2012 ost->padbottom = frame_padbottom;
2013 ost->padright = frame_padright;
2014 if (!ost->video_resample) {
2015 avcodec_get_frame_defaults(&ost->pict_tmp);
2016 if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
2017 codec->width, codec->height))
2018 goto fail;
2019 }
2020 }
2021 if (ost->video_resample) {
2022 avcodec_get_frame_defaults(&ost->pict_tmp);
2023 if(avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
2024 codec->width, codec->height)) {
2025 fprintf(stderr, "Cannot allocate temp picture, check pix fmt\n");
2026 av_exit(1);
2027 }
2028 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
2029 ost->img_resample_ctx = sws_getContext(
2030 icodec->width - (frame_leftBand + frame_rightBand),
2031 icodec->height - (frame_topBand + frame_bottomBand),
2032 icodec->pix_fmt,
2033 codec->width - (frame_padleft + frame_padright),
2034 codec->height - (frame_padtop + frame_padbottom),
2035 codec->pix_fmt,
2036 sws_flags, NULL, NULL, NULL);
2037 if (ost->img_resample_ctx == NULL) {
2038 fprintf(stderr, "Cannot get resampling context\n");
2039 av_exit(1);
2040 }
2041
2042 ost->original_height = icodec->height;
2043 ost->original_width = icodec->width;
2044
2045 codec->bits_per_raw_sample= 0;
2046 }
2047 ost->resample_height = icodec->height - (frame_topBand + frame_bottomBand);
2048 ost->resample_width = icodec->width - (frame_leftBand + frame_rightBand);
2049 ost->resample_pix_fmt= icodec->pix_fmt;
2050 ost->encoding_needed = 1;
2051 ist->decoding_needed = 1;
2052 break;
2053 case CODEC_TYPE_SUBTITLE:
2054 ost->encoding_needed = 1;
2055 ist->decoding_needed = 1;
2056 break;
2057 default:
2058 abort();
2059 break;
2060 }
2061 /* two pass mode */
2062 if (ost->encoding_needed &&
2063 (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2064 char logfilename[1024];
2065 FILE *f;
2066 int size;
2067 char *logbuffer;
2068
2069 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2070 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2071 i);
2072 if (codec->flags & CODEC_FLAG_PASS1) {
2073 f = fopen(logfilename, "w");
2074 if (!f) {
2075 fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno));
2076 av_exit(1);
2077 }
2078 ost->logfile = f;
2079 } else {
2080 /* read the log file */
2081 f = fopen(logfilename, "r");
2082 if (!f) {
2083 fprintf(stderr, "Cannot read log file '%s' for pass-2 encoding: %s\n", logfilename, strerror(errno));
2084 av_exit(1);
2085 }
2086 fseek(f, 0, SEEK_END);
2087 size = ftell(f);
2088 fseek(f, 0, SEEK_SET);
2089 logbuffer = av_malloc(size + 1);
2090 if (!logbuffer) {
2091 fprintf(stderr, "Could not allocate log buffer\n");
2092 av_exit(1);
2093 }
2094 size = fread(logbuffer, 1, size, f);
2095 fclose(f);
2096 logbuffer[size] = '\0';
2097 codec->stats_in = logbuffer;
2098 }
2099 }
2100 }
2101 if(codec->codec_type == CODEC_TYPE_VIDEO){
2102 int size= codec->width * codec->height;
2103 bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 200);
2104 }
2105 }
2106
2107 if (!bit_buffer)
2108 bit_buffer = av_malloc(bit_buffer_size);
2109 if (!bit_buffer) {
2110 fprintf(stderr, "Cannot allocate %d bytes output buffer\n",
2111 bit_buffer_size);
2112 ret = AVERROR(ENOMEM);
2113 goto fail;
2114 }
2115
2116 /* open each encoder */
2117 for(i=0;i<nb_ostreams;i++) {
2118 ost = ost_table[i];
2119 if (ost->encoding_needed) {
2120 AVCodec *codec = output_codecs[i];
2121 if (!codec)
2122 codec = avcodec_find_encoder(ost->st->codec->codec_id);
2123 if (!codec) {
2124 snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d",
2125 ost->st->codec->codec_id, ost->file_index, ost->index);
2126 ret = AVERROR(EINVAL);
2127 goto dump_format;
2128 }
2129 if (avcodec_open(ost->st->codec, codec) < 0) {
2130 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2131 ost->file_index, ost->index);
2132 ret = AVERROR(EINVAL);
2133 goto dump_format;
2134 }
2135 extra_size += ost->st->codec->extradata_size;
2136 }
2137 }
2138
2139 /* open each decoder */
2140 for(i=0;i<nb_istreams;i++) {
2141 ist = ist_table[i];
2142 if (ist->decoding_needed) {
2143 AVCodec *codec = input_codecs[i];
2144 if (!codec)
2145 codec = avcodec_find_decoder(ist->st->codec->codec_id);
2146 if (!codec) {
2147 snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d",
2148 ist->st->codec->codec_id, ist->file_index, ist->index);
2149 ret = AVERROR(EINVAL);
2150 goto dump_format;
2151 }
2152 if (avcodec_open(ist->st->codec, codec) < 0) {
2153 snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d",
2154 ist->file_index, ist->index);
2155 ret = AVERROR(EINVAL);
2156 goto dump_format;
2157 }
2158 //if (ist->st->codec->codec_type == CODEC_TYPE_VIDEO)
2159 // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD;
2160 }
2161 }
2162
2163 /* init pts */
2164 for(i=0;i<nb_istreams;i++) {
2165 AVStream *st;
2166 ist = ist_table[i];
2167 st= ist->st;
2168 ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0;
2169 ist->next_pts = AV_NOPTS_VALUE;
2170 ist->is_start = 1;
2171 }
2172
2173 /* set meta data information from input file if required */
2174 for (i=0;i<nb_meta_data_maps;i++) {
2175 AVFormatContext *out_file;
2176 AVFormatContext *in_file;
2177 AVMetadataTag *mtag;
2178
2179 int out_file_index = meta_data_maps[i].out_file;
2180 int in_file_index = meta_data_maps[i].in_file;
2181 if (out_file_index < 0 || out_file_index >= nb_output_files) {
2182 snprintf(error, sizeof(error), "Invalid output file index %d map_meta_data(%d,%d)",
2183 out_file_index, out_file_index, in_file_index);
2184 ret = AVERROR(EINVAL);
2185 goto dump_format;
2186 }
2187 if (in_file_index < 0 || in_file_index >= nb_input_files) {
2188 snprintf(error, sizeof(error), "Invalid input file index %d map_meta_data(%d,%d)",
2189 in_file_index, out_file_index, in_file_index);
2190 ret = AVERROR(EINVAL);
2191 goto dump_format;
2192 }
2193
2194 out_file = output_files[out_file_index];
2195 in_file = input_files[in_file_index];
2196
2197
2198 mtag=NULL;
2199 while((mtag=av_metadata_get(in_file->metadata, "", mtag, AV_METADATA_IGNORE_SUFFIX)))
2200 av_metadata_set(&out_file->metadata, mtag->key, mtag->value);
2201 av_metadata_conv(out_file, out_file->oformat->metadata_conv,
2202 in_file->iformat->metadata_conv);
2203 }
2204
2205 /* copy chapters from the first input file that has them*/
2206 for (i = 0; i < nb_input_files; i++) {
2207 if (!input_files[i]->nb_chapters)
2208 continue;
2209
2210 for (j = 0; j < nb_output_files; j++)
2211 if ((ret = copy_chapters(i, j)) < 0)
2212 goto dump_format;
2213 }
2214
2215 /* open files and write file headers */
2216 for(i=0;i<nb_output_files;i++) {
2217 os = output_files[i];
2218 if (av_write_header(os) < 0) {
2219 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2220 ret = AVERROR(EINVAL);
2221 goto dump_format;
2222 }
2223 if (strcmp(output_files[i]->oformat->name, "rtp")) {
2224 want_sdp = 0;
2225 }
2226 }
2227
2228 dump_format:
2229 /* dump the file output parameters - cannot be done before in case
2230 of stream copy */
2231 for(i=0;i<nb_output_files;i++) {
2232 dump_format(output_files[i], i, output_files[i]->filename, 1);
2233 }
2234
2235 /* dump the stream mapping */
2236 if (verbose >= 0) {
2237 fprintf(stderr, "Stream mapping:\n");
2238 for(i=0;i<nb_ostreams;i++) {
2239 ost = ost_table[i];
2240 fprintf(stderr, " Stream #%d.%d -> #%d.%d",
2241 ist_table[ost->source_index]->file_index,
2242 ist_table[ost->source_index]->index,
2243 ost->file_index,
2244 ost->index);
2245 if (ost->sync_ist != ist_table[ost->source_index])
2246 fprintf(stderr, " [sync #%d.%d]",
2247 ost->sync_ist->file_index,
2248 ost->sync_ist->index);
2249 fprintf(stderr, "\n");
2250 }
2251 }
2252
2253 if (ret) {
2254 fprintf(stderr, "%s\n", error);
2255 goto fail;
2256 }
2257
2258 if (want_sdp) {
2259 print_sdp(output_files, nb_output_files);
2260 }
2261
2262 if (!using_stdin && verbose >= 0) {
2263 fprintf(stderr, "Press [q] to stop encoding\n");
2264 url_set_interrupt_cb(decode_interrupt_cb);
2265 }
2266 term_init();
2267
2268 timer_start = av_gettime();
2269
2270 for(; received_sigterm == 0;) {
2271 int file_index, ist_index;
2272 AVPacket pkt;
2273 double ipts_min;
2274 double opts_min;
2275
2276 redo:
2277 ipts_min= 1e100;
2278 opts_min= 1e100;
2279 /* if 'q' pressed, exits */
2280 if (!using_stdin) {
2281 if (q_pressed)
2282 break;
2283 /* read_key() returns 0 on EOF */
2284 key = read_key();
2285 if (key == 'q')
2286 break;
2287 }
2288
2289 /* select the stream that we must read now by looking at the
2290 smallest output pts */
2291 file_index = -1;
2292 for(i=0;i<nb_ostreams;i++) {
2293 double ipts, opts;
2294 ost = ost_table[i];
2295 os = output_files[ost->file_index];
2296 ist = ist_table[ost->source_index];
2297 if(ist->is_past_recording_time || no_packet[ist->file_index])
2298 continue;
2299 opts = ost->st->pts.val * av_q2d(ost->st->time_base);
2300 ipts = (double)ist->pts;
2301 if (!file_table[ist->file_index].eof_reached){
2302 if(ipts < ipts_min) {
2303 ipts_min = ipts;
2304 if(input_sync ) file_index = ist->file_index;
2305 }
2306 if(opts < opts_min) {
2307 opts_min = opts;
2308 if(!input_sync) file_index = ist->file_index;
2309 }
2310 }
2311 if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){
2312 file_index= -1;
2313 break;
2314 }
2315 }
2316 /* if none, if is finished */
2317 if (file_index < 0) {
2318 if(no_packet_count){
2319 no_packet_count=0;
2320 memset(no_packet, 0, sizeof(no_packet));
2321 usleep(10000);
2322 continue;
2323 }
2324 break;
2325 }
2326
2327 /* finish if limit size exhausted */
2328 if (limit_filesize != 0 && limit_filesize < url_ftell(output_files[0]->pb))
2329 break;
2330
2331 /* read a frame from it and output it in the fifo */
2332 is = input_files[file_index];
2333 ret= av_read_frame(is, &pkt);
2334 if(ret == AVERROR(EAGAIN)){
2335 no_packet[file_index]=1;
2336 no_packet_count++;
2337 continue;
2338 }
2339 if (ret < 0) {
2340 file_table[file_index].eof_reached = 1;
2341 if (opt_shortest)
2342 break;
2343 else
2344 continue;
2345 }
2346
2347 no_packet_count=0;
2348 memset(no_packet, 0, sizeof(no_packet));
2349
2350 if (do_pkt_dump) {
2351 av_pkt_dump_log(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump);
2352 }
2353 /* the following test is needed in case new streams appear
2354 dynamically in stream : we ignore them */
2355 if (pkt.stream_index >= file_table[file_index].nb_streams)
2356 goto discard_packet;
2357 ist_index = file_table[file_index].ist_index + pkt.stream_index;
2358 ist = ist_table[ist_index];
2359 if (ist->discard)
2360 goto discard_packet;
2361
2362 if (pkt.dts != AV_NOPTS_VALUE)
2363 pkt.dts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base);
2364 if (pkt.pts != AV_NOPTS_VALUE)
2365 pkt.pts += av_rescale_q(input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ist->st->time_base);
2366
2367 if(input_files_ts_scale[file_index][pkt.stream_index]){
2368 if(pkt.pts != AV_NOPTS_VALUE)
2369 pkt.pts *= input_files_ts_scale[file_index][pkt.stream_index];
2370 if(pkt.dts != AV_NOPTS_VALUE)
2371 pkt.dts *= input_files_ts_scale[file_index][pkt.stream_index];
2372 }
2373
2374 // fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec->codec_type);
2375 if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
2376 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2377 int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2378 int64_t delta= pkt_dts - ist->next_pts;
2379 if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){
2380 input_files_ts_offset[ist->file_index]-= delta;
2381 if (verbose > 2)
2382 fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files_ts_offset[ist->file_index]);
2383 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2384 if(pkt.pts != AV_NOPTS_VALUE)
2385 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2386 }
2387 }
2388
2389 /* finish if recording time exhausted */
2390 if (recording_time != INT64_MAX &&
2391 av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) {
2392 ist->is_past_recording_time = 1;
2393 goto discard_packet;
2394 }
2395
2396 //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
2397 if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
2398
2399 if (verbose >= 0)
2400 fprintf(stderr, "Error while decoding stream #%d.%d\n",
2401 ist->file_index, ist->index);
2402 if (exit_on_error)
2403 av_exit(1);
2404 av_free_packet(&pkt);
2405 goto redo;
2406 }
2407
2408 discard_packet:
2409 av_free_packet(&pkt);
2410
2411 /* dump report by using the output first video and audio streams */
2412 print_report(output_files, ost_table, nb_ostreams, 0);
2413 }
2414
2415 /* at the end of stream, we must flush the decoder buffers */
2416 for(i=0;i<nb_istreams;i++) {
2417 ist = ist_table[i];
2418 if (ist->decoding_needed) {
2419 output_packet(ist, i, ost_table, nb_ostreams, NULL);
2420 }
2421 }
2422
2423 term_exit();
2424
2425 /* write the trailer if needed and close file */
2426 for(i=0;i<nb_output_files;i++) {
2427 os = output_files[i];
2428 av_write_trailer(os);
2429 }
2430
2431 /* dump report by using the first video and audio streams */
2432 print_report(output_files, ost_table, nb_ostreams, 1);
2433
2434 /* close each encoder */
2435 for(i=0;i<nb_ostreams;i++) {
2436 ost = ost_table[i];
2437 if (ost->encoding_needed) {
2438 av_freep(&ost->st->codec->stats_in);
2439 avcodec_close(ost->st->codec);
2440 }
2441 }
2442
2443 /* close each decoder */
2444 for(i=0;i<nb_istreams;i++) {
2445 ist = ist_table[i];
2446 if (ist->decoding_needed) {
2447 avcodec_close(ist->st->codec);
2448 }
2449 }
2450
2451 /* finished ! */
2452 ret = 0;
2453
2454 fail:
2455 av_freep(&bit_buffer);
2456 av_free(file_table);
2457
2458 if (ist_table) {
2459 for(i=0;i<nb_istreams;i++) {
2460 ist = ist_table[i];
2461 av_free(ist);
2462 }
2463 av_free(ist_table);
2464 }
2465 if (ost_table) {
2466 for(i=0;i<nb_ostreams;i++) {
2467 ost = ost_table[i];
2468 if (ost) {
2469 if (ost->logfile) {
2470 fclose(ost->logfile);
2471 ost->logfile = NULL;
2472 }
2473 av_fifo_free(ost->fifo); /* works even if fifo is not
2474 initialized but set to zero */
2475 av_free(ost->pict_tmp.data[0]);
2476 if (ost->video_resample)
2477 sws_freeContext(ost->img_resample_ctx);
2478 if (ost->resample)
2479 audio_resample_close(ost->resample);
2480 if (ost->reformat_ctx)
2481 av_audio_convert_free(ost->reformat_ctx);
2482 av_free(ost);
2483 }
2484 }
2485 av_free(ost_table);
2486 }
2487 return ret;
2488 }
2489
2490 static void opt_format(const char *arg)
2491 {
2492 /* compatibility stuff for pgmyuv */
2493 if (!strcmp(arg, "pgmyuv")) {
2494 pgmyuv_compatibility_hack=1;
2495 // opt_image_format(arg);
2496 arg = "image2";
2497 fprintf(stderr, "pgmyuv format is deprecated, use image2\n");
2498 }
2499
2500 last_asked_format = arg;
2501 }
2502
2503 static void opt_video_rc_override_string(const char *arg)
2504 {
2505 video_rc_override_string = arg;
2506 }
2507
2508 static int opt_me_threshold(const char *opt, const char *arg)
2509 {
2510 me_threshold = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2511 return 0;
2512 }
2513
2514 static int opt_verbose(const char *opt, const char *arg)
2515 {
2516 verbose = parse_number_or_die(opt, arg, OPT_INT64, -10, 10);
2517 return 0;
2518 }
2519
2520 static int opt_frame_rate(const char *opt, const char *arg)
2521 {
2522 if (av_parse_video_frame_rate(&frame_rate, arg) < 0) {
2523 fprintf(stderr, "Incorrect value for %s: %s\n", opt, arg);
2524 av_exit(1);
2525 }
2526 return 0;
2527 }
2528
2529 static int opt_bitrate(const char *opt, const char *arg)
2530 {
2531 int codec_type = opt[0]=='a' ? CODEC_TYPE_AUDIO : CODEC_TYPE_VIDEO;
2532
2533 opt_default(opt, arg);
2534
2535 if (av_get_int(avcodec_opts[codec_type], "b", NULL) < 1000)
2536 fprintf(stderr, "WARNING: The bitrate parameter is set too low. It takes bits/s as argument, not kbits/s\n");
2537
2538 return 0;
2539 }
2540
2541 static void opt_frame_crop_top(const char *arg)
2542 {
2543 frame_topBand = atoi(arg);
2544 if (frame_topBand < 0) {
2545 fprintf(stderr, "Incorrect top crop size\n");
2546 av_exit(1);
2547 }
2548 if ((frame_topBand) >= frame_height){
2549 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2550 av_exit(1);
2551 }
2552 frame_height -= frame_topBand;
2553 }
2554
2555 static void opt_frame_crop_bottom(const char *arg)
2556 {
2557 frame_bottomBand = atoi(arg);
2558 if (frame_bottomBand < 0) {
2559 fprintf(stderr, "Incorrect bottom crop size\n");
2560 av_exit(1);
2561 }
2562 if ((frame_bottomBand) >= frame_height){
2563 fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2564 av_exit(1);
2565 }
2566 frame_height -= frame_bottomBand;
2567 }
2568
2569 static void opt_frame_crop_left(const char *arg)
2570 {
2571 frame_leftBand = atoi(arg);
2572 if (frame_leftBand < 0) {
2573 fprintf(stderr, "Incorrect left crop size\n");
2574 av_exit(1);
2575 }
2576 if ((frame_leftBand) >= frame_width){
2577 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2578 av_exit(1);
2579 }
2580 frame_width -= frame_leftBand;
2581 }
2582
2583 static void opt_frame_crop_right(const char *arg)
2584 {
2585 frame_rightBand = atoi(arg);
2586 if (frame_rightBand < 0) {
2587 fprintf(stderr, "Incorrect right crop size\n");
2588 av_exit(1);
2589 }
2590 if ((frame_rightBand) >= frame_width){
2591 fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
2592 av_exit(1);
2593 }
2594 frame_width -= frame_rightBand;
2595 }
2596
2597 static void opt_frame_size(const char *arg)
2598 {
2599 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2600 fprintf(stderr, "Incorrect frame size\n");
2601 av_exit(1);
2602 }
2603 }
2604
2605 static void opt_pad_color(const char *arg) {
2606 /* Input is expected to be six hex digits similar to
2607 how colors are expressed in html tags (but without the #) */
2608 int rgb = strtol(arg, NULL, 16);
2609 int r,g,b;
2610
2611 r = (rgb >> 16);
2612 g = ((rgb >> 8) & 255);
2613 b = (rgb & 255);
2614
2615 padcolor[0] = RGB_TO_Y(r,g,b);
2616 padcolor[1] = RGB_TO_U(r,g,b,0);
2617 padcolor[2] = RGB_TO_V(r,g,b,0);
2618 }
2619
2620 static void opt_frame_pad_top(const char *arg)
2621 {
2622 frame_padtop = atoi(arg);
2623 if (frame_padtop < 0) {
2624 fprintf(stderr, "Incorrect top pad size\n");
2625 av_exit(1);
2626 }
2627 }
2628
2629 static void opt_frame_pad_bottom(const char *arg)
2630 {
2631 frame_padbottom = atoi(arg);
2632 if (frame_padbottom < 0) {
2633 fprintf(stderr, "Incorrect bottom pad size\n");
2634 av_exit(1);
2635 }
2636 }
2637
2638
2639 static void opt_frame_pad_left(const char *arg)
2640 {
2641 frame_padleft = atoi(arg);
2642 if (frame_padleft < 0) {
2643 fprintf(stderr, "Incorrect left pad size\n");
2644 av_exit(1);
2645 }
2646 }
2647
2648
2649 static void opt_frame_pad_right(const char *arg)
2650 {
2651 frame_padright = atoi(arg);
2652 if (frame_padright < 0) {
2653 fprintf(stderr, "Incorrect right pad size\n");
2654 av_exit(1);
2655 }
2656 }
2657
2658 static void opt_frame_pix_fmt(const char *arg)
2659 {
2660 if (strcmp(arg, "list")) {
2661 frame_pix_fmt = av_get_pix_fmt(arg);
2662 if (frame_pix_fmt == PIX_FMT_NONE) {
2663 fprintf(stderr, "Unknown pixel format requested: %s\n", arg);
2664 av_exit(1);
2665 }
2666 } else {
2667 show_pix_fmts();
2668 av_exit(0);
2669 }
2670 }
2671
2672 static void opt_frame_aspect_ratio(const char *arg)
2673 {
2674 int x = 0, y = 0;
2675 double ar = 0;
2676 const char *p;
2677 char *end;
2678
2679 p = strchr(arg, ':');
2680 if (p) {
2681 x = strtol(arg, &end, 10);
2682 if (end == p)
2683 y = strtol(end+1, &end, 10);
2684 if (x > 0 && y > 0)
2685 ar = (double)x / (double)y;
2686 } else
2687 ar = strtod(arg, NULL);
2688
2689 if (!ar) {
2690 fprintf(stderr, "Incorrect aspect ratio specification.\n");
2691 av_exit(1);
2692 }
2693 frame_aspect_ratio = ar;
2694 }
2695
2696 static int opt_metadata(const char *opt, const char *arg)
2697 {
2698 char *mid= strchr(arg, '=');
2699
2700 if(!mid){
2701 fprintf(stderr, "Missing =\n");
2702 av_exit(1);
2703 }
2704 *mid++= 0;
2705
2706 metadata_count++;
2707 metadata= av_realloc(metadata, sizeof(*metadata)*metadata_count);
2708 metadata[metadata_count-1].key = av_strdup(arg);
2709 metadata[metadata_count-1].value= av_strdup(mid);
2710
2711 return 0;
2712 }
2713
2714 static void opt_qscale(const char *arg)
2715 {
2716 video_qscale = atof(arg);
2717 if (video_qscale <= 0 ||
2718 video_qscale > 255) {
2719 fprintf(stderr, "qscale must be > 0.0 and <= 255\n");
2720 av_exit(1);
2721 }
2722 }
2723
2724 static void opt_top_field_first(const char *arg)
2725 {
2726 top_field_first= atoi(arg);
2727 }
2728
2729 static int opt_thread_count(const char *opt, const char *arg)
2730 {
2731 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2732 #if !HAVE_THREADS
2733 if (verbose >= 0)
2734 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2735 #endif
2736 return 0;
2737 }
2738
2739 static void opt_audio_sample_fmt(const char *arg)
2740 {
2741 if (strcmp(arg, "list"))
2742 audio_sample_fmt = avcodec_get_sample_fmt(arg);
2743 else {
2744 list_fmts(avcodec_sample_fmt_string, SAMPLE_FMT_NB);
2745 av_exit(0);
2746 }
2747 }
2748
2749 static int opt_audio_rate(const char *opt, const char *arg)
2750 {
2751 audio_sample_rate = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2752 return 0;
2753 }
2754
2755 static int opt_audio_channels(const char *opt, const char *arg)
2756 {
2757 audio_channels = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2758 return 0;
2759 }
2760
2761 static void opt_video_channel(const char *arg)
2762 {
2763 video_channel = strtol(arg, NULL, 0);
2764 }
2765
2766 static void opt_video_standard(const char *arg)
2767 {
2768 video_standard = av_strdup(arg);
2769 }
2770
2771 static void opt_codec(int *pstream_copy, char **pcodec_name,
2772 int codec_type, const char *arg)
2773 {
2774 av_freep(pcodec_name);
2775 if (!strcmp(arg, "copy")) {
2776 *pstream_copy = 1;
2777 } else {
2778 *pcodec_name = av_strdup(arg);
2779 }
2780 }
2781
2782 static void opt_audio_codec(const char *arg)
2783 {
2784 opt_codec(&audio_stream_copy, &audio_codec_name, CODEC_TYPE_AUDIO, arg);
2785 }
2786
2787 static void opt_audio_tag(const char *arg)
2788 {
2789 char *tail;
2790 audio_codec_tag= strtol(arg, &tail, 0);
2791
2792 if(!tail || *tail)
2793 audio_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2794 }
2795
2796 static void opt_video_tag(const char *arg)
2797 {
2798 char *tail;
2799 video_codec_tag= strtol(arg, &tail, 0);
2800
2801 if(!tail || *tail)
2802 video_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2803 }
2804
2805 static void opt_video_codec(const char *arg)
2806 {
2807 opt_codec(&video_stream_copy, &video_codec_name, CODEC_TYPE_VIDEO, arg);
2808 }
2809
2810 static void opt_subtitle_codec(const char *arg)
2811 {
2812 opt_codec(&subtitle_stream_copy, &subtitle_codec_name, CODEC_TYPE_SUBTITLE, arg);
2813 }
2814
2815 static void opt_subtitle_tag(const char *arg)
2816 {
2817 char *tail;
2818 subtitle_codec_tag= strtol(arg, &tail, 0);
2819
2820 if(!tail || *tail)
2821 subtitle_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
2822 }
2823
2824 static void opt_map(const char *arg)
2825 {
2826 AVStreamMap *m;
2827 char *p;
2828
2829 m = &stream_maps[nb_stream_maps++];
2830
2831 m->file_index = strtol(arg, &p, 0);
2832 if (*p)
2833 p++;
2834
2835 m->stream_index = strtol(p, &p, 0);
2836 if (*p) {
2837 p++;
2838 m->sync_file_index = strtol(p, &p, 0);
2839 if (*p)
2840 p++;
2841 m->sync_stream_index = strtol(p, &p, 0);
2842 } else {
2843 m->sync_file_index = m->file_index;
2844 m->sync_stream_index = m->stream_index;
2845 }
2846 }
2847
2848 static void opt_map_meta_data(const char *arg)
2849 {
2850 AVMetaDataMap *m;
2851 char *p;
2852
2853 m = &meta_data_maps[nb_meta_data_maps++];
2854
2855 m->out_file = strtol(arg, &p, 0);
2856 if (*p)
2857 p++;
2858
2859 m->in_file = strtol(p, &p, 0);
2860 }
2861
2862 static void opt_input_ts_scale(const char *arg)
2863 {
2864 unsigned int stream;
2865 double scale;
2866 char *p;
2867
2868 stream = strtol(arg, &p, 0);
2869 if (*p)
2870 p++;
2871 scale= strtod(p, &p);
2872
2873 if(stream >= MAX_STREAMS)
2874 av_exit(1);
2875
2876 input_files_ts_scale[nb_input_files][stream]= scale;
2877 }
2878
2879 static int opt_recording_time(const char *opt, const char *arg)
2880 {
2881 recording_time = parse_time_or_die(opt, arg, 1);
2882 return 0;
2883 }
2884
2885 static int opt_start_time(const char *opt, const char *arg)
2886 {
2887 start_time = parse_time_or_die(opt, arg, 1);
2888 return 0;
2889 }
2890
2891 static int opt_rec_timestamp(const char *opt, const char *arg)
2892 {
2893 rec_timestamp = parse_time_or_die(opt, arg, 0) / 1000000;
2894 return 0;
2895 }
2896
2897 static int opt_input_ts_offset(const char *opt, const char *arg)
2898 {
2899 input_ts_offset = parse_time_or_die(opt, arg, 1);
2900 return 0;
2901 }
2902
2903 static enum CodecID find_codec_or_die(const char *name, int type, int encoder)
2904 {
2905 const char *codec_string = encoder ? "encoder" : "decoder";
2906 AVCodec *codec;
2907
2908 if(!name)
2909 return CODEC_ID_NONE;
2910 codec = encoder ?
2911 avcodec_find_encoder_by_name(name) :
2912 avcodec_find_decoder_by_name(name);
2913 if(!codec) {
2914 fprintf(stderr, "Unknown %s '%s'\n", codec_string, name);
2915 av_exit(1);
2916 }
2917 if(codec->type != type) {
2918 fprintf(stderr, "Invalid %s type '%s'\n", codec_string, name);
2919 av_exit(1);
2920 }
2921 return codec->id;
2922 }
2923
2924 static void opt_input_file(const char *filename)
2925 {
2926 AVFormatContext *ic;
2927 AVFormatParameters params, *ap = &params;
2928 AVInputFormat *file_iformat = NULL;
2929 int err, i, ret, rfps, rfps_base;
2930 int64_t timestamp;
2931
2932 if (last_asked_format) {
2933 if (!(file_iformat = av_find_input_format(last_asked_format))) {
2934 fprintf(stderr, "Unknown input format: '%s'\n", last_asked_format);
2935 av_exit(1);
2936 }
2937 last_asked_format = NULL;
2938 }
2939
2940 if (!strcmp(filename, "-"))
2941 filename = "pipe:";
2942
2943 using_stdin |= !strncmp(filename, "pipe:", 5) ||
2944 !strcmp(filename, "/dev/stdin");
2945
2946 /* get default parameters from command line */
2947 ic = avformat_alloc_context();
2948 if (!ic) {
2949 print_error(filename, AVERROR(ENOMEM));
2950 av_exit(1);
2951 }
2952
2953 memset(ap, 0, sizeof(*ap));
2954 ap->prealloced_context = 1;
2955 ap->sample_rate = audio_sample_rate;
2956 ap->channels = audio_channels;
2957 ap->time_base.den = frame_rate.num;
2958 ap->time_base.num = frame_rate.den;
2959 ap->width = frame_width + frame_padleft + frame_padright;
2960 ap->height = frame_height + frame_padtop + frame_padbottom;
2961 ap->pix_fmt = frame_pix_fmt;
2962 // ap->sample_fmt = audio_sample_fmt; //FIXME:not implemented in libavformat
2963 ap->channel = video_channel;
2964 ap->standard = video_standard;
2965
2966 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2967
2968 ic->video_codec_id = find_codec_or_die(video_codec_name , CODEC_TYPE_VIDEO , 0);
2969 ic->audio_codec_id = find_codec_or_die(audio_codec_name , CODEC_TYPE_AUDIO , 0);
2970 ic->subtitle_codec_id= find_codec_or_die(subtitle_codec_name, CODEC_TYPE_SUBTITLE, 0);
2971 ic->flags |= AVFMT_FLAG_NONBLOCK;
2972
2973 if(pgmyuv_compatibility_hack)
2974 ic->video_codec_id= CODEC_ID_PGMYUV;
2975
2976 /* open the input file with generic libav function */
2977 err = av_open_input_file(&ic, filename, file_iformat, 0, ap);
2978 if (err < 0) {
2979 print_error(filename, err);
2980 av_exit(1);
2981 }
2982 if(opt_programid) {
2983 int i, j;
2984 int found=0;
2985 for(i=0; i<ic->nb_streams; i++){
2986 ic->streams[i]->discard= AVDISCARD_ALL;
2987 }
2988 for(i=0; i<ic->nb_programs; i++){
2989 AVProgram *p= ic->programs[i];
2990 if(p->id != opt_programid){
2991 p->discard = AVDISCARD_ALL;
2992 }else{
2993 found=1;
2994 for(j=0; j<p->nb_stream_indexes; j++){
2995 ic->streams[p->stream_index[j]]->discard= 0;
2996 }
2997 }
2998 }
2999 if(!found){
3000 fprintf(stderr, "Specified program id not found\n");
3001 av_exit(1);
3002 }
3003 opt_programid=0;
3004 }
3005
3006 ic->loop_input = loop_input;
3007
3008 /* If not enough info to get the stream parameters, we decode the
3009 first frames to get it. (used in mpeg case for example) */
3010 ret = av_find_stream_info(ic);
3011 if (ret < 0 && verbose >= 0) {
3012 fprintf(stderr, "%s: could not find codec parameters\n", filename);
3013 av_exit(1);
3014 }
3015
3016 timestamp = start_time;
3017 /* add the stream start time */
3018 if (ic->start_time != AV_NOPTS_VALUE)
3019 timestamp += ic->start_time;
3020
3021 /* if seeking requested, we execute it */
3022 if (start_time != 0) {
3023 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3024 if (ret < 0) {
3025 fprintf(stderr, "%s: could not seek to position %0.3f\n",
3026 filename, (double)timestamp / AV_TIME_BASE);
3027 }
3028 /* reset seek info */
3029 start_time = 0;
3030 }
3031
3032 /* update the current parameters so that they match the one of the input stream */
3033 for(i=0;i<ic->nb_streams;i++) {
3034 AVStream *st = ic->streams[i];
3035 AVCodecContext *enc = st->codec;
3036 avcodec_thread_init(enc, thread_count);
3037 switch(enc->codec_type) {
3038 case CODEC_TYPE_AUDIO:
3039 set_context_opts(enc, avcodec_opts[CODEC_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM);
3040 //fprintf(stderr, "\nInput Audio channels: %d", enc->channels);
3041 channel_layout = enc->channel_layout;
3042 audio_channels = enc->channels;
3043 audio_sample_rate = enc->sample_rate;
3044 audio_sample_fmt = enc->sample_fmt;
3045 input_codecs[nb_icodecs++] = avcodec_find_decoder_by_name(audio_codec_name);
3046 if(audio_disable)
3047 st->discard= AVDISCARD_ALL;
3048 break;
3049 case CODEC_TYPE_VIDEO:
3050 set_context_opts(enc, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM);
3051 frame_height = enc->height;
3052 frame_width = enc->width;
3053 if(ic->streams[i]->sample_aspect_ratio.num)
3054 frame_aspect_ratio=av_q2d(ic->streams[i]->sample_aspect_ratio);
3055 else
3056 frame_aspect_ratio=av_q2d(enc->sample_aspect_ratio);
3057 frame_aspect_ratio *= (float) enc->width / enc->height;
3058 frame_pix_fmt = enc->pix_fmt;
3059 rfps = ic->streams[i]->r_frame_rate.num;
3060 rfps_base = ic->streams[i]->r_frame_rate.den;
3061 if(enc->lowres) {
3062 enc->flags |= CODEC_FLAG_EMU_EDGE;
3063 frame_height >>= enc->lowres;
3064 frame_width >>= enc->lowres;
3065 }
3066 if(me_threshold)
3067 enc->debug |= FF_DEBUG_MV;
3068
3069 if (enc->time_base.den != rfps || enc->time_base.num != rfps_base) {
3070
3071 if (verbose >= 0)
3072 fprintf(stderr,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
3073 i, (float)enc->time_base.den / enc->time_base.num, enc->time_base.den, enc->time_base.num,
3074
3075 (float)rfps / rfps_base, rfps, rfps_base);
3076 }
3077 /* update the current frame rate to match the stream frame rate */
3078 frame_rate.num = rfps;
3079 frame_rate.den = rfps_base;
3080
3081 input_codecs[nb_icodecs++] = avcodec_find_decoder_by_name(video_codec_name);
3082 if(video_disable)
3083 st->discard= AVDISCARD_ALL;
3084 else if(video_discard)
3085 st->discard= video_discard;
3086 break;
3087 case CODEC_TYPE_DATA:
3088 break;
3089 case CODEC_TYPE_SUBTITLE:
3090 input_codecs[nb_icodecs++] = avcodec_find_decoder_by_name(subtitle_codec_name);
3091 if(subtitle_disable)
3092 st->discard = AVDISCARD_ALL;
3093 break;
3094 case CODEC_TYPE_ATTACHMENT:
3095 case CODEC_TYPE_UNKNOWN:
3096 nb_icodecs++;
3097 break;
3098 default:
3099 abort();
3100 }
3101 }
3102
3103 input_files[nb_input_files] = ic;
3104 input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp);
3105 /* dump the file content */
3106 if (verbose >= 0)
3107 dump_format(ic, nb_input_files, filename, 0);
3108
3109 nb_input_files++;
3110
3111 video_channel = 0;
3112
3113 av_freep(&video_codec_name);
3114 av_freep(&audio_codec_name);
3115 av_freep(&subtitle_codec_name);
3116 }
3117
3118 static void check_audio_video_sub_inputs(int *has_video_ptr, int *has_audio_ptr,
3119 int *has_subtitle_ptr)
3120 {
3121 int has_video, has_audio, has_subtitle, i, j;
3122 AVFormatContext *ic;
3123
3124 has_video = 0;
3125 has_audio = 0;
3126 has_subtitle = 0;
3127 for(j=0;j<nb_input_files;j++) {
3128 ic = input_files[j];
3129 for(i=0;i<ic->nb_streams;i++) {
3130 AVCodecContext *enc = ic->streams[i]->codec;
3131 switch(enc->codec_type) {
3132 case CODEC_TYPE_AUDIO:
3133 has_audio = 1;
3134 break;
3135 case CODEC_TYPE_VIDEO:
3136 has_video = 1;
3137 break;
3138 case CODEC_TYPE_SUBTITLE:
3139 has_subtitle = 1;
3140 break;
3141 case CODEC_TYPE_DATA:
3142 case CODEC_TYPE_ATTACHMENT:
3143 case CODEC_TYPE_UNKNOWN:
3144 break;
3145 default:
3146 abort();
3147 }
3148 }
3149 }
3150 *has_video_ptr = has_video;
3151 *has_audio_ptr = has_audio;
3152 *has_subtitle_ptr = has_subtitle;
3153 }
3154
3155 static void new_video_stream(AVFormatContext *oc)
3156 {
3157 AVStream *st;
3158 AVCodecContext *video_enc;
3159 enum CodecID codec_id;
3160
3161 st = av_new_stream(oc, oc->nb_streams);
3162 if (!st) {
3163 fprintf(stderr, "Could not alloc stream\n");
3164 av_exit(1);
3165 }
3166 avcodec_get_context_defaults2(st->codec, CODEC_TYPE_VIDEO);
3167 bitstream_filters[nb_output_files][oc->nb_streams - 1]= video_bitstream_filters;
3168 video_bitstream_filters= NULL;
3169
3170 avcodec_thread_init(st->codec, thread_count);
3171
3172 video_enc = st->codec;
3173
3174 if(video_codec_tag)
3175 video_enc->codec_tag= video_codec_tag;
3176
3177 if( (video_global_header&1)
3178 || (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))){
3179 video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3180 avcodec_opts[CODEC_TYPE_VIDEO]->flags|= CODEC_FLAG_GLOBAL_HEADER;
3181 }
3182 if(video_global_header&2){
3183 video_enc->flags2 |= CODEC_FLAG2_LOCAL_HEADER;
3184 avcodec_opts[CODEC_TYPE_VIDEO]->flags2|= CODEC_FLAG2_LOCAL_HEADER;
3185 }
3186
3187 if (video_stream_copy) {
3188 st->stream_copy = 1;
3189 video_enc->codec_type = CODEC_TYPE_VIDEO;
3190 video_enc->sample_aspect_ratio =
3191 st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
3192 } else {
3193 const char *p;
3194 int i;
3195 AVCodec *codec;
3196 AVRational fps= frame_rate.num ? frame_rate : (AVRational){25,1};
3197
3198 if (video_codec_name) {
3199 codec_id = find_codec_or_die(video_codec_name, CODEC_TYPE_VIDEO, 1);
3200 codec = avcodec_find_encoder_by_name(video_codec_name);
3201 output_codecs[nb_ocodecs] = codec;
3202 } else {
3203 codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, CODEC_TYPE_VIDEO);
3204 codec = avcodec_find_encoder(codec_id);
3205 }
3206
3207 video_enc->codec_id = codec_id;
3208
3209 set_context_opts(video_enc, avcodec_opts[CODEC_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM);
3210
3211 if (codec && codec->supported_framerates && !force_fps)
3212 fps = codec->supported_framerates[av_find_nearest_q_idx(fps, codec->supported_framerates)];
3213 video_enc->time_base.den = fps.num;
3214 video_enc->time_base.num = fps.den;
3215
3216 video_enc->width = frame_width + frame_padright + frame_padleft;
3217 video_enc->height = frame_height + frame_padtop + frame_padbottom;
3218 video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*video_enc->height/video_enc->width, 255);
3219 video_enc->pix_fmt = frame_pix_fmt;
3220 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3221
3222 choose_pixel_fmt(st, codec);
3223
3224 if (intra_only)
3225 video_enc->gop_size = 0;
3226 if (video_qscale || same_quality) {
3227 video_enc->flags |= CODEC_FLAG_QSCALE;
3228 video_enc->global_quality=
3229 st->quality = FF_QP2LAMBDA * video_qscale;
3230 }
3231
3232 if(intra_matrix)
3233 video_enc->intra_matrix = intra_matrix;
3234 if(inter_matrix)
3235 video_enc->inter_matrix = inter_matrix;
3236
3237 p= video_rc_override_string;
3238 for(i=0; p; i++){
3239 int start, end, q;
3240 int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
3241 if(e!=3){
3242 fprintf(stderr, "error parsing rc_override\n");
3243 av_exit(1);
3244 }
3245 video_enc->rc_override=
3246 av_realloc(video_enc->rc_override,
3247 sizeof(RcOverride)*(i+1));
3248 video_enc->rc_override[i].start_frame= start;
3249 video_enc->rc_override[i].end_frame = end;
3250 if(q>0){
3251 video_enc->rc_override[i].qscale= q;
3252 video_enc->rc_override[i].quality_factor= 1.0;
3253 }
3254 else{
3255 video_enc->rc_override[i].qscale= 0;
3256 video_enc->rc_override[i].quality_factor= -q/100.0;
3257 }
3258 p= strchr(p, '/');
3259 if(p) p++;
3260 }
3261 video_enc->rc_override_count=i;
3262 if (!video_enc->rc_initial_buffer_occupancy)
3263 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4;
3264 video_enc->me_threshold= me_threshold;
3265 video_enc->intra_dc_precision= intra_dc_precision - 8;
3266
3267 if (do_psnr)
3268 video_enc->flags|= CODEC_FLAG_PSNR;
3269
3270 /* two pass mode */
3271 if (do_pass) {
3272 if (do_pass == 1) {
3273 video_enc->flags |= CODEC_FLAG_PASS1;
3274 } else {
3275 video_enc->flags |= CODEC_FLAG_PASS2;
3276 }
3277 }
3278 }
3279 nb_ocodecs++;
3280 if (video_language) {
3281 av_metadata_set(&st->metadata, "language", video_language);
3282 av_freep(&video_language);
3283 }
3284
3285 /* reset some key parameters */
3286 video_disable = 0;
3287 av_freep(&video_codec_name);
3288 video_stream_copy = 0;
3289 frame_pix_fmt = PIX_FMT_NONE;
3290 }
3291
3292 static void new_audio_stream(AVFormatContext *oc)
3293 {
3294 AVStream *st;
3295 AVCodecContext *audio_enc;
3296 enum CodecID codec_id;
3297
3298 st = av_new_stream(oc, oc->nb_streams);
3299 if (!st) {
3300 fprintf(stderr, "Could not alloc stream\n");
3301 av_exit(1);
3302 }
3303 avcodec_get_context_defaults2(st->codec, CODEC_TYPE_AUDIO);
3304
3305 bitstream_filters[nb_output_files][oc->nb_streams - 1]= audio_bitstream_filters;
3306 audio_bitstream_filters= NULL;
3307
3308 avcodec_thread_init(st->codec, thread_count);
3309
3310 audio_enc = st->codec;
3311 audio_enc->codec_type = CODEC_TYPE_AUDIO;
3312
3313 if(audio_codec_tag)
3314 audio_enc->codec_tag= audio_codec_tag;
3315
3316 if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
3317 audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
3318 avcodec_opts[CODEC_TYPE_AUDIO]->flags|= CODEC_FLAG_GLOBAL_HEADER;
3319 }
3320 if (audio_stream_copy) {
3321 st->stream_copy = 1;
3322 audio_enc->channels = audio_channels;
3323 } else {
3324 AVCodec *codec;
3325
3326 set_context_opts(audio_enc, avcodec_opts[CODEC_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM);
3327
3328 if (audio_codec_name) {
3329 codec_id = find_codec_or_die(audio_codec_name, CODEC_TYPE_AUDIO, 1);
3330 codec = avcodec_find_encoder_by_name(audio_codec_name);
3331 output_codecs[nb_ocodecs] = codec;
3332 } else {
3333 codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, CODEC_TYPE_AUDIO);
3334 codec = avcodec_find_encoder(codec_id);
3335 }
3336 audio_enc->codec_id = codec_id;
3337
3338 if (audio_qscale > QSCALE_NONE) {
3339 audio_enc->flags |= CODEC_FLAG_QSCALE;
3340 audio_enc->global_quality = st->quality = FF_QP2LAMBDA * audio_qscale;
3341 }
3342 audio_enc->channels = audio_channels;
3343 audio_enc->sample_fmt = audio_sample_fmt;
3344 audio_enc->channel_layout = channel_layout;
3345 if (avcodec_channel_layout_num_channels(channel_layout) != audio_channels)
3346 audio_enc->channel_layout = 0;
3347 choose_sample_fmt(st, codec);
3348 }
3349 nb_ocodecs++;
3350 audio_enc->sample_rate = audio_sample_rate;
3351 audio_enc->time_base= (AVRational){1, audio_sample_rate};
3352 if (audio_language) {
3353 av_metadata_set(&st->metadata, "language", audio_language);
3354 av_freep(&audio_language);
3355 }
3356
3357 /* reset some key parameters */
3358 audio_disable = 0;
3359 av_freep(&audio_codec_name);
3360 audio_stream_copy = 0;
3361 }
3362
3363 static void new_subtitle_stream(AVFormatContext *oc)
3364 {
3365 AVStream *st;
3366 AVCodecContext *subtitle_enc;
3367
3368 st = av_new_stream(oc, oc->nb_streams);
3369 if (!st) {
3370 fprintf(stderr, "Could not alloc stream\n");
3371 av_exit(1);
3372 }
3373 avcodec_get_context_defaults2(st->codec, CODEC_TYPE_SUBTITLE);
3374
3375 bitstream_filters[nb_output_files][oc->nb_streams - 1]= subtitle_bitstream_filters;
3376 subtitle_bitstream_filters= NULL;
3377
3378 subtitle_enc = st->codec;
3379 subtitle_enc->codec_type = CODEC_TYPE_SUBTITLE;
3380
3381 if(subtitle_codec_tag)
3382 subtitle_enc->codec_tag= subtitle_codec_tag;
3383
3384 if (subtitle_stream_copy) {
3385 st->stream_copy = 1;
3386 } else {
3387 set_context_opts(avcodec_opts[CODEC_TYPE_SUBTITLE], subtitle_enc, AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_ENCODING_PARAM);
3388 subtitle_enc->codec_id = find_codec_or_die(subtitle_codec_name, CODEC_TYPE_SUBTITLE, 1);
3389 output_codecs[nb_ocodecs] = avcodec_find_encoder_by_name(subtitle_codec_name);
3390 }
3391 nb_ocodecs++;
3392
3393 if (subtitle_language) {
3394 av_metadata_set(&st->metadata, "language", subtitle_language);
3395 av_freep(&subtitle_language);
3396 }
3397
3398 subtitle_disable = 0;
3399 av_freep(&subtitle_codec_name);
3400 subtitle_stream_copy = 0;
3401 }
3402
3403 static void opt_new_audio_stream(void)
3404 {
3405 AVFormatContext *oc;
3406 if (nb_output_files <= 0) {
3407 fprintf(stderr, "At least one output file must be specified\n");
3408 av_exit(1);
3409 }
3410 oc = output_files[nb_output_files - 1];
3411 new_audio_stream(oc);
3412 }
3413
3414 static void opt_new_video_stream(void)
3415 {
3416 AVFormatContext *oc;
3417 if (nb_output_files <= 0) {
3418 fprintf(stderr, "At least one output file must be specified\n");
3419 av_exit(1);
3420 }
3421 oc = output_files[nb_output_files - 1];
3422 new_video_stream(oc);
3423 }
3424
3425 static void opt_new_subtitle_stream(void)
3426 {
3427 AVFormatContext *oc;
3428 if (nb_output_files <= 0) {
3429 fprintf(stderr, "At least one output file must be specified\n");
3430 av_exit(1);
3431 }
3432 oc = output_files[nb_output_files - 1];
3433 new_subtitle_stream(oc);
3434 }
3435
3436 static void opt_output_file(const char *filename)
3437 {
3438 AVFormatContext *oc;
3439 int use_video, use_audio, use_subtitle;
3440 int input_has_video, input_has_audio, input_has_subtitle;
3441 AVFormatParameters params, *ap = &params;
3442 AVOutputFormat *file_oformat;
3443
3444 if (!strcmp(filename, "-"))
3445 filename = "pipe:";
3446
3447 oc = avformat_alloc_context();
3448 if (!oc) {
3449 print_error(filename, AVERROR(ENOMEM));
3450 av_exit(1);
3451 }
3452
3453 if (last_asked_format) {
3454 file_oformat = av_guess_format(last_asked_format, NULL, NULL);
3455 if (!file_oformat) {
3456 fprintf(stderr, "Requested output format '%s' is not a suitable output format\n", last_asked_format);
3457 av_exit(1);
3458 }
3459 last_asked_format = NULL;
3460 } else {
3461 file_oformat = av_guess_format(NULL, filename, NULL);
3462 if (!file_oformat) {
3463 fprintf(stderr, "Unable to find a suitable output format for '%s'\n",
3464 filename);
3465 av_exit(1);
3466 }
3467 }
3468
3469 oc->oformat = file_oformat;
3470 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
3471
3472 if (!strcmp(file_oformat->name, "ffm") &&
3473 av_strstart(filename, "http:", NULL)) {
3474 /* special case for files sent to ffserver: we get the stream
3475 parameters from ffserver */
3476 int err = read_ffserver_streams(oc, filename);
3477 if (err < 0) {
3478 print_error(filename, err);
3479 av_exit(1);
3480 }
3481 } else {
3482 use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy || video_codec_name;
3483 use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy || audio_codec_name;
3484 use_subtitle = file_oformat->subtitle_codec != CODEC_ID_NONE || subtitle_stream_copy || subtitle_codec_name;
3485
3486 /* disable if no corresponding type found and at least one
3487 input file */
3488 if (nb_input_files > 0) {
3489 check_audio_video_sub_inputs(&input_has_video, &input_has_audio,
3490 &input_has_subtitle);
3491 if (!input_has_video)
3492 use_video = 0;
3493 if (!input_has_audio)
3494 use_audio = 0;
3495 if (!input_has_subtitle)
3496 use_subtitle = 0;
3497 }
3498
3499 /* manual disable */
3500 if (audio_disable) {
3501 use_audio = 0;
3502 }
3503 if (video_disable) {
3504 use_video = 0;
3505 }
3506 if (subtitle_disable) {
3507 use_subtitle = 0;
3508 }
3509
3510 if (use_video) {
3511 new_video_stream(oc);
3512 }
3513
3514 if (use_audio) {
3515 new_audio_stream(oc);
3516 }
3517
3518 if (use_subtitle) {
3519 new_subtitle_stream(oc);
3520 }
3521
3522 oc->timestamp = rec_timestamp;
3523
3524 for(; metadata_count>0; metadata_count--){
3525 av_metadata_set(&oc->metadata, metadata[metadata_count-1].key,
3526 metadata[metadata_count-1].value);
3527 }
3528 av_metadata_conv(oc, oc->oformat->metadata_conv, NULL);
3529 }
3530
3531 output_files[nb_output_files++] = oc;
3532
3533 /* check filename in case of an image number is expected */
3534 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
3535 if (!av_filename_number_test(oc->filename)) {
3536 print_error(oc->filename, AVERROR_NUMEXPECTED);
3537 av_exit(1);
3538 }
3539 }
3540
3541 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
3542 /* test if it already exists to avoid loosing precious files */
3543 if (!file_overwrite &&
3544 (strchr(filename, ':') == NULL ||
3545 filename[1] == ':' ||
3546 av_strstart(filename, "file:", NULL))) {
3547 if (url_exist(filename)) {
3548 if (!using_stdin) {
3549 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3550 fflush(stderr);
3551 if (!read_yesno()) {
3552 fprintf(stderr, "Not overwriting - exiting\n");
3553 av_exit(1);
3554 }
3555 }
3556 else {
3557 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3558 av_exit(1);
3559 }
3560 }
3561 }
3562
3563 /* open the file */
3564 if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
3565 fprintf(stderr, "Could not open '%s'\n", filename);
3566 av_exit(1);
3567 }
3568 }
3569
3570 memset(ap, 0, sizeof(*ap));
3571 if (av_set_parameters(oc, ap) < 0) {
3572 fprintf(stderr, "%s: Invalid encoding parameters\n",
3573 oc->filename);
3574 av_exit(1);
3575 }
3576
3577 oc->preload= (int)(mux_preload*AV_TIME_BASE);
3578 oc->max_delay= (int)(mux_max_delay*AV_TIME_BASE);
3579 oc->loop_output = loop_output;
3580 oc->flags |= AVFMT_FLAG_NONBLOCK;
3581
3582 set_context_opts(oc, avformat_opts, AV_OPT_FLAG_ENCODING_PARAM);
3583 }
3584
3585 /* same option as mencoder */
3586 static void opt_pass(const char *pass_str)
3587 {
3588 int pass;
3589 pass = atoi(pass_str);
3590 if (pass != 1 && pass != 2) {
3591 fprintf(stderr, "pass number can be only 1 or 2\n");
3592 av_exit(1);
3593 }
3594 do_pass = pass;
3595 }
3596
3597 static int64_t getutime(void)
3598 {
3599 #if HAVE_GETRUSAGE
3600 struct rusage rusage;
3601
3602 getrusage(RUSAGE_SELF, &rusage);
3603 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
3604 #elif HAVE_GETPROCESSTIMES
3605 HANDLE proc;
3606 FILETIME c, e, k, u;
3607 proc = GetCurrentProcess();
3608 GetProcessTimes(proc, &c, &e, &k, &u);
3609 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
3610 #else
3611 return av_gettime();
3612 #endif
3613 }
3614
3615 static int64_t getmaxrss(void)
3616 {
3617 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
3618 struct rusage rusage;
3619 getrusage(RUSAGE_SELF, &rusage);
3620 return (int64_t)rusage.ru_maxrss * 1024;
3621 #elif HAVE_GETPROCESSMEMORYINFO
3622 HANDLE proc;
3623 PROCESS_MEMORY_COUNTERS memcounters;
3624 proc = GetCurrentProcess();
3625 memcounters.cb = sizeof(memcounters);
3626 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
3627 return memcounters.PeakPagefileUsage;
3628 #else
3629 return 0;
3630 #endif
3631 }
3632
3633 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3634 {
3635 int i;
3636 const char *p = str;
3637 for(i = 0;; i++) {
3638 dest[i] = atoi(p);
3639 if(i == 63)
3640 break;
3641 p = strchr(p, ',');
3642 if(!p) {
3643 fprintf(stderr, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3644 av_exit(1);
3645 }
3646 p++;
3647 }
3648 }
3649
3650 static void opt_inter_matrix(const char *arg)
3651 {
3652 inter_matrix = av_mallocz(sizeof(uint16_t) * 64);
3653 parse_matrix_coeffs(inter_matrix, arg);
3654 }
3655
3656 static void opt_intra_matrix(const char *arg)
3657 {
3658 intra_matrix = av_mallocz(sizeof(uint16_t) * 64);
3659 parse_matrix_coeffs(intra_matrix, arg);
3660 }
3661
3662 /**
3663 * Trivial log callback.
3664 * Only suitable for show_help and similar since it lacks prefix handling.
3665 */
3666 static void log_callback_help(void* ptr, int level, const char* fmt, va_list vl)
3667 {
3668 vfprintf(stdout, fmt, vl);
3669 }
3670
3671 static void show_usage(void)
3672 {
3673 printf("Hyper fast Audio and Video encoder\n");
3674 printf("usage: ffmpeg [options] [[infile options] -i infile]... {[outfile options] outfile}...\n");
3675 printf("\n");
3676 }
3677
3678 static void show_help(void)
3679 {
3680 av_log_set_callback(log_callback_help);
3681 show_usage();
3682 show_help_options(options, "Main options:\n",
3683 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
3684 show_help_options(options, "\nAdvanced options:\n",
3685 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
3686 OPT_EXPERT);
3687 show_help_options(options, "\nVideo options:\n",
3688 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
3689 OPT_VIDEO);
3690 show_help_options(options, "\nAdvanced Video options:\n",
3691 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
3692 OPT_VIDEO | OPT_EXPERT);
3693 show_help_options(options, "\nAudio options:\n",
3694 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
3695 OPT_AUDIO);
3696 show_help_options(options, "\nAdvanced Audio options:\n",
3697 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
3698 OPT_AUDIO | OPT_EXPERT);
3699 show_help_options(options, "\nSubtitle options:\n",
3700 OPT_SUBTITLE | OPT_GRAB,
3701 OPT_SUBTITLE);
3702 show_help_options(options, "\nAudio/Video grab options:\n",
3703 OPT_GRAB,
3704 OPT_GRAB);
3705 printf("\n");
3706 av_opt_show(avcodec_opts[0], NULL);
3707 print