avconv: Use av_gettime_relative
[libav.git] / avconv.c
1 /*
2 * avconv main
3 * Copyright (c) 2000-2011 The libav developers.
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include <stdint.h>
31
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavresample/avresample.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/channel_layout.h"
38 #include "libavutil/parseutils.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavutil/time.h"
49 #include "libavformat/os_support.h"
50
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54
55 #if HAVE_SYS_RESOURCE_H
56 #include <sys/time.h>
57 #include <sys/types.h>
58 #include <sys/resource.h>
59 #elif HAVE_GETPROCESSTIMES
60 #include <windows.h>
61 #endif
62 #if HAVE_GETPROCESSMEMORYINFO
63 #include <windows.h>
64 #include <psapi.h>
65 #endif
66
67 #if HAVE_SYS_SELECT_H
68 #include <sys/select.h>
69 #endif
70
71 #if HAVE_PTHREADS
72 #include <pthread.h>
73 #endif
74
75 #include <time.h>
76
77 #include "avconv.h"
78 #include "cmdutils.h"
79
80 #include "libavutil/avassert.h"
81
82 const char program_name[] = "avconv";
83 const int program_birth_year = 2000;
84
85 static FILE *vstats_file;
86
87 static int nb_frames_drop = 0;
88
89
90
91 #if HAVE_PTHREADS
92 /* signal to input threads that they should exit; set by the main thread */
93 static int transcoding_finished;
94 #endif
95
96 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
97
98 InputStream **input_streams = NULL;
99 int nb_input_streams = 0;
100 InputFile **input_files = NULL;
101 int nb_input_files = 0;
102
103 OutputStream **output_streams = NULL;
104 int nb_output_streams = 0;
105 OutputFile **output_files = NULL;
106 int nb_output_files = 0;
107
108 FilterGraph **filtergraphs;
109 int nb_filtergraphs;
110
111 static void term_exit(void)
112 {
113 av_log(NULL, AV_LOG_QUIET, "");
114 }
115
116 static volatile int received_sigterm = 0;
117 static volatile int received_nb_signals = 0;
118
119 static void
120 sigterm_handler(int sig)
121 {
122 received_sigterm = sig;
123 received_nb_signals++;
124 term_exit();
125 }
126
127 static void term_init(void)
128 {
129 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
130 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
131 #ifdef SIGXCPU
132 signal(SIGXCPU, sigterm_handler);
133 #endif
134 }
135
136 static int decode_interrupt_cb(void *ctx)
137 {
138 return received_nb_signals > 1;
139 }
140
141 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
142
143 static void avconv_cleanup(int ret)
144 {
145 int i, j;
146
147 for (i = 0; i < nb_filtergraphs; i++) {
148 FilterGraph *fg = filtergraphs[i];
149 avfilter_graph_free(&fg->graph);
150 for (j = 0; j < fg->nb_inputs; j++) {
151 av_freep(&fg->inputs[j]->name);
152 av_freep(&fg->inputs[j]);
153 }
154 av_freep(&fg->inputs);
155 for (j = 0; j < fg->nb_outputs; j++) {
156 av_freep(&fg->outputs[j]->name);
157 av_freep(&fg->outputs[j]);
158 }
159 av_freep(&fg->outputs);
160 av_freep(&fg->graph_desc);
161
162 av_freep(&filtergraphs[i]);
163 }
164 av_freep(&filtergraphs);
165
166 /* close files */
167 for (i = 0; i < nb_output_files; i++) {
168 OutputFile *of = output_files[i];
169 AVFormatContext *s = of->ctx;
170 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
171 avio_close(s->pb);
172 avformat_free_context(s);
173 av_dict_free(&of->opts);
174
175 av_freep(&output_files[i]);
176 }
177 for (i = 0; i < nb_output_streams; i++) {
178 OutputStream *ost = output_streams[i];
179 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
180 while (bsfc) {
181 AVBitStreamFilterContext *next = bsfc->next;
182 av_bitstream_filter_close(bsfc);
183 bsfc = next;
184 }
185 ost->bitstream_filters = NULL;
186 av_frame_free(&ost->filtered_frame);
187
188 av_parser_close(ost->parser);
189
190 av_freep(&ost->forced_keyframes);
191 av_freep(&ost->avfilter);
192 av_freep(&ost->logfile_prefix);
193
194 avcodec_free_context(&ost->enc_ctx);
195
196 av_freep(&output_streams[i]);
197 }
198 for (i = 0; i < nb_input_files; i++) {
199 avformat_close_input(&input_files[i]->ctx);
200 av_freep(&input_files[i]);
201 }
202 for (i = 0; i < nb_input_streams; i++) {
203 InputStream *ist = input_streams[i];
204
205 av_frame_free(&ist->decoded_frame);
206 av_frame_free(&ist->filter_frame);
207 av_dict_free(&ist->decoder_opts);
208 av_freep(&ist->filters);
209 av_freep(&ist->hwaccel_device);
210
211 avcodec_free_context(&ist->dec_ctx);
212
213 av_freep(&input_streams[i]);
214 }
215
216 if (vstats_file)
217 fclose(vstats_file);
218 av_free(vstats_filename);
219
220 av_freep(&input_streams);
221 av_freep(&input_files);
222 av_freep(&output_streams);
223 av_freep(&output_files);
224
225 uninit_opts();
226
227 avformat_network_deinit();
228
229 if (received_sigterm) {
230 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
231 (int) received_sigterm);
232 exit (255);
233 }
234 }
235
236 void assert_avoptions(AVDictionary *m)
237 {
238 AVDictionaryEntry *t;
239 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
240 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
241 exit_program(1);
242 }
243 }
244
245 static void abort_codec_experimental(AVCodec *c, int encoder)
246 {
247 const char *codec_string = encoder ? "encoder" : "decoder";
248 AVCodec *codec;
249 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
250 "results.\nAdd '-strict experimental' if you want to use it.\n",
251 codec_string, c->name);
252 codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
253 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
254 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
255 codec_string, codec->name);
256 exit_program(1);
257 }
258
259 /*
260 * Update the requested input sample format based on the output sample format.
261 * This is currently only used to request float output from decoders which
262 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
263 * Ideally this will be removed in the future when decoders do not do format
264 * conversion and only output in their native format.
265 */
266 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
267 AVCodecContext *enc)
268 {
269 /* if sample formats match or a decoder sample format has already been
270 requested, just return */
271 if (enc->sample_fmt == dec->sample_fmt ||
272 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
273 return;
274
275 /* if decoder supports more than one output format */
276 if (dec_codec && dec_codec->sample_fmts &&
277 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
278 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
279 const enum AVSampleFormat *p;
280 int min_dec = INT_MAX, min_inc = INT_MAX;
281 enum AVSampleFormat dec_fmt = AV_SAMPLE_FMT_NONE;
282 enum AVSampleFormat inc_fmt = AV_SAMPLE_FMT_NONE;
283
284 /* find a matching sample format in the encoder */
285 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
286 if (*p == enc->sample_fmt) {
287 dec->request_sample_fmt = *p;
288 return;
289 } else {
290 enum AVSampleFormat dfmt = av_get_packed_sample_fmt(*p);
291 enum AVSampleFormat efmt = av_get_packed_sample_fmt(enc->sample_fmt);
292 int fmt_diff = 32 * abs(dfmt - efmt);
293 if (av_sample_fmt_is_planar(*p) !=
294 av_sample_fmt_is_planar(enc->sample_fmt))
295 fmt_diff++;
296 if (dfmt == efmt) {
297 min_inc = fmt_diff;
298 inc_fmt = *p;
299 } else if (dfmt > efmt) {
300 if (fmt_diff < min_inc) {
301 min_inc = fmt_diff;
302 inc_fmt = *p;
303 }
304 } else {
305 if (fmt_diff < min_dec) {
306 min_dec = fmt_diff;
307 dec_fmt = *p;
308 }
309 }
310 }
311 }
312
313 /* if none match, provide the one that matches quality closest */
314 dec->request_sample_fmt = min_inc != INT_MAX ? inc_fmt : dec_fmt;
315 }
316 }
317
318 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
319 {
320 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
321 AVCodecContext *avctx = ost->enc_ctx;
322 int ret;
323
324 /*
325 * Audio encoders may split the packets -- #frames in != #packets out.
326 * But there is no reordering, so we can limit the number of output packets
327 * by simply dropping them here.
328 * Counting encoded video frames needs to be done separately because of
329 * reordering, see do_video_out()
330 */
331 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
332 if (ost->frame_number >= ost->max_frames) {
333 av_free_packet(pkt);
334 return;
335 }
336 ost->frame_number++;
337 }
338
339 while (bsfc) {
340 AVPacket new_pkt = *pkt;
341 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
342 &new_pkt.data, &new_pkt.size,
343 pkt->data, pkt->size,
344 pkt->flags & AV_PKT_FLAG_KEY);
345 if (a > 0) {
346 av_free_packet(pkt);
347 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
348 av_buffer_default_free, NULL, 0);
349 if (!new_pkt.buf)
350 exit_program(1);
351 } else if (a < 0) {
352 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
353 bsfc->filter->name, pkt->stream_index,
354 avctx->codec ? avctx->codec->name : "copy");
355 print_error("", a);
356 if (exit_on_error)
357 exit_program(1);
358 }
359 *pkt = new_pkt;
360
361 bsfc = bsfc->next;
362 }
363
364 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
365 ost->last_mux_dts != AV_NOPTS_VALUE &&
366 pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
367 av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
368 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
369 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
370 if (exit_on_error) {
371 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
372 exit_program(1);
373 }
374 av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
375 "in incorrect timestamps in the output file.\n",
376 ost->last_mux_dts + 1);
377 pkt->dts = ost->last_mux_dts + 1;
378 if (pkt->pts != AV_NOPTS_VALUE)
379 pkt->pts = FFMAX(pkt->pts, pkt->dts);
380 }
381 ost->last_mux_dts = pkt->dts;
382
383 ost->data_size += pkt->size;
384 ost->packets_written++;
385
386 pkt->stream_index = ost->index;
387 ret = av_interleaved_write_frame(s, pkt);
388 if (ret < 0) {
389 print_error("av_interleaved_write_frame()", ret);
390 exit_program(1);
391 }
392 }
393
394 static int check_recording_time(OutputStream *ost)
395 {
396 OutputFile *of = output_files[ost->file_index];
397
398 if (of->recording_time != INT64_MAX &&
399 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
400 AV_TIME_BASE_Q) >= 0) {
401 ost->finished = 1;
402 return 0;
403 }
404 return 1;
405 }
406
407 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
408 AVFrame *frame)
409 {
410 AVCodecContext *enc = ost->enc_ctx;
411 AVPacket pkt;
412 int got_packet = 0;
413
414 av_init_packet(&pkt);
415 pkt.data = NULL;
416 pkt.size = 0;
417
418 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
419 frame->pts = ost->sync_opts;
420 ost->sync_opts = frame->pts + frame->nb_samples;
421
422 ost->samples_encoded += frame->nb_samples;
423 ost->frames_encoded++;
424
425 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
426 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
427 exit_program(1);
428 }
429
430 if (got_packet) {
431 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
432 write_frame(s, &pkt, ost);
433 }
434 }
435
436 static void do_subtitle_out(AVFormatContext *s,
437 OutputStream *ost,
438 InputStream *ist,
439 AVSubtitle *sub,
440 int64_t pts)
441 {
442 static uint8_t *subtitle_out = NULL;
443 int subtitle_out_max_size = 1024 * 1024;
444 int subtitle_out_size, nb, i;
445 AVCodecContext *enc;
446 AVPacket pkt;
447
448 if (pts == AV_NOPTS_VALUE) {
449 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
450 if (exit_on_error)
451 exit_program(1);
452 return;
453 }
454
455 enc = ost->enc_ctx;
456
457 if (!subtitle_out) {
458 subtitle_out = av_malloc(subtitle_out_max_size);
459 }
460
461 /* Note: DVB subtitle need one packet to draw them and one other
462 packet to clear them */
463 /* XXX: signal it in the codec context ? */
464 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
465 nb = 2;
466 else
467 nb = 1;
468
469 for (i = 0; i < nb; i++) {
470 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
471 if (!check_recording_time(ost))
472 return;
473
474 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
475 // start_display_time is required to be 0
476 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
477 sub->end_display_time -= sub->start_display_time;
478 sub->start_display_time = 0;
479
480 ost->frames_encoded++;
481
482 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
483 subtitle_out_max_size, sub);
484 if (subtitle_out_size < 0) {
485 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
486 exit_program(1);
487 }
488
489 av_init_packet(&pkt);
490 pkt.data = subtitle_out;
491 pkt.size = subtitle_out_size;
492 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
493 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
494 /* XXX: the pts correction is handled here. Maybe handling
495 it in the codec would be better */
496 if (i == 0)
497 pkt.pts += 90 * sub->start_display_time;
498 else
499 pkt.pts += 90 * sub->end_display_time;
500 }
501 write_frame(s, &pkt, ost);
502 }
503 }
504
505 static void do_video_out(AVFormatContext *s,
506 OutputStream *ost,
507 AVFrame *in_picture,
508 int *frame_size)
509 {
510 int ret, format_video_sync;
511 AVPacket pkt;
512 AVCodecContext *enc = ost->enc_ctx;
513
514 *frame_size = 0;
515
516 format_video_sync = video_sync_method;
517 if (format_video_sync == VSYNC_AUTO)
518 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
519 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
520 if (format_video_sync != VSYNC_PASSTHROUGH &&
521 ost->frame_number &&
522 in_picture->pts != AV_NOPTS_VALUE &&
523 in_picture->pts < ost->sync_opts) {
524 nb_frames_drop++;
525 av_log(NULL, AV_LOG_WARNING,
526 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
527 ost->frame_number, ost->st->index, in_picture->pts);
528 return;
529 }
530
531 if (in_picture->pts == AV_NOPTS_VALUE)
532 in_picture->pts = ost->sync_opts;
533 ost->sync_opts = in_picture->pts;
534
535
536 if (!ost->frame_number)
537 ost->first_pts = in_picture->pts;
538
539 av_init_packet(&pkt);
540 pkt.data = NULL;
541 pkt.size = 0;
542
543 if (ost->frame_number >= ost->max_frames)
544 return;
545
546 if (s->oformat->flags & AVFMT_RAWPICTURE &&
547 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
548 /* raw pictures are written as AVPicture structure to
549 avoid any copies. We support temporarily the older
550 method. */
551 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
552 enc->coded_frame->top_field_first = in_picture->top_field_first;
553 pkt.data = (uint8_t *)in_picture;
554 pkt.size = sizeof(AVPicture);
555 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
556 pkt.flags |= AV_PKT_FLAG_KEY;
557
558 write_frame(s, &pkt, ost);
559 } else {
560 int got_packet;
561
562 if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
563 ost->top_field_first >= 0)
564 in_picture->top_field_first = !!ost->top_field_first;
565
566 in_picture->quality = enc->global_quality;
567 in_picture->pict_type = 0;
568 if (ost->forced_kf_index < ost->forced_kf_count &&
569 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
570 in_picture->pict_type = AV_PICTURE_TYPE_I;
571 ost->forced_kf_index++;
572 }
573
574 ost->frames_encoded++;
575
576 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
577 if (ret < 0) {
578 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
579 exit_program(1);
580 }
581
582 if (got_packet) {
583 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
584 write_frame(s, &pkt, ost);
585 *frame_size = pkt.size;
586
587 /* if two pass, output log */
588 if (ost->logfile && enc->stats_out) {
589 fprintf(ost->logfile, "%s", enc->stats_out);
590 }
591 }
592 }
593 ost->sync_opts++;
594 /*
595 * For video, number of frames in == number of packets out.
596 * But there may be reordering, so we can't throw away frames on encoder
597 * flush, we need to limit them here, before they go into encoder.
598 */
599 ost->frame_number++;
600 }
601
602 static double psnr(double d)
603 {
604 return -10.0 * log(d) / log(10.0);
605 }
606
607 static void do_video_stats(OutputStream *ost, int frame_size)
608 {
609 AVCodecContext *enc;
610 int frame_number;
611 double ti1, bitrate, avg_bitrate;
612
613 /* this is executed just the first time do_video_stats is called */
614 if (!vstats_file) {
615 vstats_file = fopen(vstats_filename, "w");
616 if (!vstats_file) {
617 perror("fopen");
618 exit_program(1);
619 }
620 }
621
622 enc = ost->enc_ctx;
623 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
624 frame_number = ost->frame_number;
625 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
626 if (enc->flags&CODEC_FLAG_PSNR)
627 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
628
629 fprintf(vstats_file,"f_size= %6d ", frame_size);
630 /* compute pts value */
631 ti1 = ost->sync_opts * av_q2d(enc->time_base);
632 if (ti1 < 0.01)
633 ti1 = 0.01;
634
635 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
636 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
637 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
638 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
639 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
640 }
641 }
642
643 /*
644 * Read one frame for lavfi output for ost and encode it.
645 */
646 static int poll_filter(OutputStream *ost)
647 {
648 OutputFile *of = output_files[ost->file_index];
649 AVFrame *filtered_frame = NULL;
650 int frame_size, ret;
651
652 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
653 return AVERROR(ENOMEM);
654 }
655 filtered_frame = ost->filtered_frame;
656
657 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
658 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
659 ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
660 ost->enc_ctx->frame_size);
661 else
662 ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
663
664 if (ret < 0)
665 return ret;
666
667 if (filtered_frame->pts != AV_NOPTS_VALUE) {
668 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
669 filtered_frame->pts = av_rescale_q(filtered_frame->pts,
670 ost->filter->filter->inputs[0]->time_base,
671 ost->enc_ctx->time_base) -
672 av_rescale_q(start_time,
673 AV_TIME_BASE_Q,
674 ost->enc_ctx->time_base);
675 }
676
677 switch (ost->filter->filter->inputs[0]->type) {
678 case AVMEDIA_TYPE_VIDEO:
679 if (!ost->frame_aspect_ratio)
680 ost->enc_ctx->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
681
682 do_video_out(of->ctx, ost, filtered_frame, &frame_size);
683 if (vstats_filename && frame_size)
684 do_video_stats(ost, frame_size);
685 break;
686 case AVMEDIA_TYPE_AUDIO:
687 do_audio_out(of->ctx, ost, filtered_frame);
688 break;
689 default:
690 // TODO support subtitle filters
691 av_assert0(0);
692 }
693
694 av_frame_unref(filtered_frame);
695
696 return 0;
697 }
698
699 static void finish_output_stream(OutputStream *ost)
700 {
701 OutputFile *of = output_files[ost->file_index];
702 int i;
703
704 ost->finished = 1;
705
706 if (of->shortest) {
707 for (i = 0; i < of->ctx->nb_streams; i++)
708 output_streams[of->ost_index + i]->finished = 1;
709 }
710 }
711
712 /*
713 * Read as many frames from possible from lavfi and encode them.
714 *
715 * Always read from the active stream with the lowest timestamp. If no frames
716 * are available for it then return EAGAIN and wait for more input. This way we
717 * can use lavfi sources that generate unlimited amount of frames without memory
718 * usage exploding.
719 */
720 static int poll_filters(void)
721 {
722 int i, ret = 0;
723
724 while (ret >= 0 && !received_sigterm) {
725 OutputStream *ost = NULL;
726 int64_t min_pts = INT64_MAX;
727
728 /* choose output stream with the lowest timestamp */
729 for (i = 0; i < nb_output_streams; i++) {
730 int64_t pts = output_streams[i]->sync_opts;
731
732 if (!output_streams[i]->filter || output_streams[i]->finished)
733 continue;
734
735 pts = av_rescale_q(pts, output_streams[i]->enc_ctx->time_base,
736 AV_TIME_BASE_Q);
737 if (pts < min_pts) {
738 min_pts = pts;
739 ost = output_streams[i];
740 }
741 }
742
743 if (!ost)
744 break;
745
746 ret = poll_filter(ost);
747
748 if (ret == AVERROR_EOF) {
749 finish_output_stream(ost);
750 ret = 0;
751 } else if (ret == AVERROR(EAGAIN))
752 return 0;
753 }
754
755 return ret;
756 }
757
758 static void print_final_stats(int64_t total_size)
759 {
760 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
761 uint64_t data_size = 0;
762 float percent = -1.0;
763 int i, j;
764
765 for (i = 0; i < nb_output_streams; i++) {
766 OutputStream *ost = output_streams[i];
767 switch (ost->enc_ctx->codec_type) {
768 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
769 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
770 default: other_size += ost->data_size; break;
771 }
772 extra_size += ost->enc_ctx->extradata_size;
773 data_size += ost->data_size;
774 }
775
776 if (data_size && total_size >= data_size)
777 percent = 100.0 * (total_size - data_size) / data_size;
778
779 av_log(NULL, AV_LOG_INFO, "\n");
780 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
781 video_size / 1024.0,
782 audio_size / 1024.0,
783 other_size / 1024.0,
784 extra_size / 1024.0);
785 if (percent >= 0.0)
786 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
787 else
788 av_log(NULL, AV_LOG_INFO, "unknown");
789 av_log(NULL, AV_LOG_INFO, "\n");
790
791 /* print verbose per-stream stats */
792 for (i = 0; i < nb_input_files; i++) {
793 InputFile *f = input_files[i];
794 uint64_t total_packets = 0, total_size = 0;
795
796 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
797 i, f->ctx->filename);
798
799 for (j = 0; j < f->nb_streams; j++) {
800 InputStream *ist = input_streams[f->ist_index + j];
801 enum AVMediaType type = ist->dec_ctx->codec_type;
802
803 total_size += ist->data_size;
804 total_packets += ist->nb_packets;
805
806 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
807 i, j, media_type_string(type));
808 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
809 ist->nb_packets, ist->data_size);
810
811 if (ist->decoding_needed) {
812 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
813 ist->frames_decoded);
814 if (type == AVMEDIA_TYPE_AUDIO)
815 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
816 av_log(NULL, AV_LOG_VERBOSE, "; ");
817 }
818
819 av_log(NULL, AV_LOG_VERBOSE, "\n");
820 }
821
822 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
823 total_packets, total_size);
824 }
825
826 for (i = 0; i < nb_output_files; i++) {
827 OutputFile *of = output_files[i];
828 uint64_t total_packets = 0, total_size = 0;
829
830 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
831 i, of->ctx->filename);
832
833 for (j = 0; j < of->ctx->nb_streams; j++) {
834 OutputStream *ost = output_streams[of->ost_index + j];
835 enum AVMediaType type = ost->enc_ctx->codec_type;
836
837 total_size += ost->data_size;
838 total_packets += ost->packets_written;
839
840 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
841 i, j, media_type_string(type));
842 if (ost->encoding_needed) {
843 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
844 ost->frames_encoded);
845 if (type == AVMEDIA_TYPE_AUDIO)
846 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
847 av_log(NULL, AV_LOG_VERBOSE, "; ");
848 }
849
850 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
851 ost->packets_written, ost->data_size);
852
853 av_log(NULL, AV_LOG_VERBOSE, "\n");
854 }
855
856 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
857 total_packets, total_size);
858 }
859 }
860
861 static void print_report(int is_last_report, int64_t timer_start)
862 {
863 char buf[1024];
864 OutputStream *ost;
865 AVFormatContext *oc;
866 int64_t total_size;
867 AVCodecContext *enc;
868 int frame_number, vid, i;
869 double bitrate, ti1, pts;
870 static int64_t last_time = -1;
871 static int qp_histogram[52];
872
873 if (!print_stats && !is_last_report)
874 return;
875
876 if (!is_last_report) {
877 int64_t cur_time;
878 /* display the report every 0.5 seconds */
879 cur_time = av_gettime_relative();
880 if (last_time == -1) {
881 last_time = cur_time;
882 return;
883 }
884 if ((cur_time - last_time) < 500000)
885 return;
886 last_time = cur_time;
887 }
888
889
890 oc = output_files[0]->ctx;
891
892 total_size = avio_size(oc->pb);
893 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
894 total_size = avio_tell(oc->pb);
895 if (total_size < 0) {
896 char errbuf[128];
897 av_strerror(total_size, errbuf, sizeof(errbuf));
898 av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
899 "avio_tell() failed: %s\n", errbuf);
900 total_size = 0;
901 }
902
903 buf[0] = '\0';
904 ti1 = 1e10;
905 vid = 0;
906 for (i = 0; i < nb_output_streams; i++) {
907 float q = -1;
908 ost = output_streams[i];
909 enc = ost->enc_ctx;
910 if (!ost->stream_copy && enc->coded_frame)
911 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
912 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
913 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
914 }
915 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
916 float t = (av_gettime_relative() - timer_start) / 1000000.0;
917
918 frame_number = ost->frame_number;
919 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
920 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
921 if (is_last_report)
922 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
923 if (qp_hist) {
924 int j;
925 int qp = lrintf(q);
926 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
927 qp_histogram[qp]++;
928 for (j = 0; j < 32; j++)
929 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
930 }
931 if (enc->flags&CODEC_FLAG_PSNR) {
932 int j;
933 double error, error_sum = 0;
934 double scale, scale_sum = 0;
935 char type[3] = { 'Y','U','V' };
936 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
937 for (j = 0; j < 3; j++) {
938 if (is_last_report) {
939 error = enc->error[j];
940 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
941 } else {
942 error = enc->coded_frame->error[j];
943 scale = enc->width * enc->height * 255.0 * 255.0;
944 }
945 if (j)
946 scale /= 4;
947 error_sum += error;
948 scale_sum += scale;
949 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
950 }
951 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
952 }
953 vid = 1;
954 }
955 /* compute min output value */
956 pts = (double)ost->last_mux_dts * av_q2d(ost->st->time_base);
957 if ((pts < ti1) && (pts > 0))
958 ti1 = pts;
959 }
960 if (ti1 < 0.01)
961 ti1 = 0.01;
962
963 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
964
965 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
966 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
967 (double)total_size / 1024, ti1, bitrate);
968
969 if (nb_frames_drop)
970 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " drop=%d",
971 nb_frames_drop);
972
973 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
974
975 fflush(stderr);
976
977 if (is_last_report)
978 print_final_stats(total_size);
979
980 }
981
982 static void flush_encoders(void)
983 {
984 int i, ret;
985
986 for (i = 0; i < nb_output_streams; i++) {
987 OutputStream *ost = output_streams[i];
988 AVCodecContext *enc = ost->enc_ctx;
989 AVFormatContext *os = output_files[ost->file_index]->ctx;
990 int stop_encoding = 0;
991
992 if (!ost->encoding_needed)
993 continue;
994
995 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
996 continue;
997 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
998 continue;
999
1000 for (;;) {
1001 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1002 const char *desc;
1003
1004 switch (enc->codec_type) {
1005 case AVMEDIA_TYPE_AUDIO:
1006 encode = avcodec_encode_audio2;
1007 desc = "Audio";
1008 break;
1009 case AVMEDIA_TYPE_VIDEO:
1010 encode = avcodec_encode_video2;
1011 desc = "Video";
1012 break;
1013 default:
1014 stop_encoding = 1;
1015 }
1016
1017 if (encode) {
1018 AVPacket pkt;
1019 int got_packet;
1020 av_init_packet(&pkt);
1021 pkt.data = NULL;
1022 pkt.size = 0;
1023
1024 ret = encode(enc, &pkt, NULL, &got_packet);
1025 if (ret < 0) {
1026 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1027 exit_program(1);
1028 }
1029 if (ost->logfile && enc->stats_out) {
1030 fprintf(ost->logfile, "%s", enc->stats_out);
1031 }
1032 if (!got_packet) {
1033 stop_encoding = 1;
1034 break;
1035 }
1036 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1037 write_frame(os, &pkt, ost);
1038 }
1039
1040 if (stop_encoding)
1041 break;
1042 }
1043 }
1044 }
1045
1046 /*
1047 * Check whether a packet from ist should be written into ost at this time
1048 */
1049 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1050 {
1051 OutputFile *of = output_files[ost->file_index];
1052 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1053
1054 if (ost->source_index != ist_index)
1055 return 0;
1056
1057 if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
1058 return 0;
1059
1060 return 1;
1061 }
1062
1063 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1064 {
1065 OutputFile *of = output_files[ost->file_index];
1066 InputFile *f = input_files [ist->file_index];
1067 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1068 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1069 AVPacket opkt;
1070
1071 av_init_packet(&opkt);
1072
1073 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1074 !ost->copy_initial_nonkeyframes)
1075 return;
1076
1077 if (of->recording_time != INT64_MAX &&
1078 ist->last_dts >= of->recording_time + start_time) {
1079 ost->finished = 1;
1080 return;
1081 }
1082
1083 if (f->recording_time != INT64_MAX) {
1084 start_time = f->ctx->start_time;
1085 if (f->start_time != AV_NOPTS_VALUE)
1086 start_time += f->start_time;
1087 if (ist->last_dts >= f->recording_time + start_time) {
1088 ost->finished = 1;
1089 return;
1090 }
1091 }
1092
1093 /* force the input stream PTS */
1094 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1095 ost->sync_opts++;
1096
1097 if (pkt->pts != AV_NOPTS_VALUE)
1098 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1099 else
1100 opkt.pts = AV_NOPTS_VALUE;
1101
1102 if (pkt->dts == AV_NOPTS_VALUE)
1103 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1104 else
1105 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1106 opkt.dts -= ost_tb_start_time;
1107
1108 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1109 opkt.flags = pkt->flags;
1110
1111 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1112 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1113 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1114 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1115 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1116 ) {
1117 if (av_parser_change(ost->parser, ost->st->codec,
1118 &opkt.data, &opkt.size,
1119 pkt->data, pkt->size,
1120 pkt->flags & AV_PKT_FLAG_KEY)) {
1121 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1122 if (!opkt.buf)
1123 exit_program(1);
1124 }
1125 } else {
1126 opkt.data = pkt->data;
1127 opkt.size = pkt->size;
1128 }
1129
1130 write_frame(of->ctx, &opkt, ost);
1131 }
1132
1133 int guess_input_channel_layout(InputStream *ist)
1134 {
1135 AVCodecContext *dec = ist->dec_ctx;
1136
1137 if (!dec->channel_layout) {
1138 char layout_name[256];
1139
1140 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1141 if (!dec->channel_layout)
1142 return 0;
1143 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1144 dec->channels, dec->channel_layout);
1145 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1146 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1147 }
1148 return 1;
1149 }
1150
1151 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1152 {
1153 AVFrame *decoded_frame, *f;
1154 AVCodecContext *avctx = ist->dec_ctx;
1155 int i, ret, err = 0, resample_changed;
1156
1157 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1158 return AVERROR(ENOMEM);
1159 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1160 return AVERROR(ENOMEM);
1161 decoded_frame = ist->decoded_frame;
1162
1163 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1164 if (!*got_output || ret < 0) {
1165 if (!pkt->size) {
1166 for (i = 0; i < ist->nb_filters; i++)
1167 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1168 }
1169 return ret;
1170 }
1171
1172 ist->samples_decoded += decoded_frame->nb_samples;
1173 ist->frames_decoded++;
1174
1175 /* if the decoder provides a pts, use it instead of the last packet pts.
1176 the decoder could be delaying output by a packet or more. */
1177 if (decoded_frame->pts != AV_NOPTS_VALUE)
1178 ist->next_dts = decoded_frame->pts;
1179 else if (pkt->pts != AV_NOPTS_VALUE)
1180 decoded_frame->pts = pkt->pts;
1181 pkt->pts = AV_NOPTS_VALUE;
1182
1183 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1184 ist->resample_channels != avctx->channels ||
1185 ist->resample_channel_layout != decoded_frame->channel_layout ||
1186 ist->resample_sample_rate != decoded_frame->sample_rate;
1187 if (resample_changed) {
1188 char layout1[64], layout2[64];
1189
1190 if (!guess_input_channel_layout(ist)) {
1191 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1192 "layout for Input Stream #%d.%d\n", ist->file_index,
1193 ist->st->index);
1194 exit_program(1);
1195 }
1196 decoded_frame->channel_layout = avctx->channel_layout;
1197
1198 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1199 ist->resample_channel_layout);
1200 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1201 decoded_frame->channel_layout);
1202
1203 av_log(NULL, AV_LOG_INFO,
1204 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1205 ist->file_index, ist->st->index,
1206 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1207 ist->resample_channels, layout1,
1208 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1209 avctx->channels, layout2);
1210
1211 ist->resample_sample_fmt = decoded_frame->format;
1212 ist->resample_sample_rate = decoded_frame->sample_rate;
1213 ist->resample_channel_layout = decoded_frame->channel_layout;
1214 ist->resample_channels = avctx->channels;
1215
1216 for (i = 0; i < nb_filtergraphs; i++)
1217 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1218 configure_filtergraph(filtergraphs[i]) < 0) {
1219 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1220 exit_program(1);
1221 }
1222 }
1223
1224 if (decoded_frame->pts != AV_NOPTS_VALUE)
1225 decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1226 ist->st->time_base,
1227 (AVRational){1, avctx->sample_rate});
1228 for (i = 0; i < ist->nb_filters; i++) {
1229 if (i < ist->nb_filters - 1) {
1230 f = ist->filter_frame;
1231 err = av_frame_ref(f, decoded_frame);
1232 if (err < 0)
1233 break;
1234 } else
1235 f = decoded_frame;
1236
1237 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1238 if (err < 0)
1239 break;
1240 }
1241
1242 av_frame_unref(ist->filter_frame);
1243 av_frame_unref(decoded_frame);
1244 return err < 0 ? err : ret;
1245 }
1246
1247 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1248 {
1249 AVFrame *decoded_frame, *f;
1250 int i, ret = 0, err = 0, resample_changed;
1251
1252 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1253 return AVERROR(ENOMEM);
1254 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1255 return AVERROR(ENOMEM);
1256 decoded_frame = ist->decoded_frame;
1257
1258 ret = avcodec_decode_video2(ist->dec_ctx,
1259 decoded_frame, got_output, pkt);
1260 if (!*got_output || ret < 0) {
1261 if (!pkt->size) {
1262 for (i = 0; i < ist->nb_filters; i++)
1263 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1264 }
1265 return ret;
1266 }
1267
1268 ist->frames_decoded++;
1269
1270 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
1271 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
1272 if (err < 0)
1273 goto fail;
1274 }
1275 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
1276
1277 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1278 decoded_frame->pkt_dts);
1279 pkt->size = 0;
1280
1281 if (ist->st->sample_aspect_ratio.num)
1282 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1283
1284 resample_changed = ist->resample_width != decoded_frame->width ||
1285 ist->resample_height != decoded_frame->height ||
1286 ist->resample_pix_fmt != decoded_frame->format;
1287 if (resample_changed) {
1288 av_log(NULL, AV_LOG_INFO,
1289 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1290 ist->file_index, ist->st->index,
1291 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
1292 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1293
1294 ret = poll_filters();
1295 if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)))
1296 av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
1297
1298 ist->resample_width = decoded_frame->width;
1299 ist->resample_height = decoded_frame->height;
1300 ist->resample_pix_fmt = decoded_frame->format;
1301
1302 for (i = 0; i < nb_filtergraphs; i++)
1303 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1304 configure_filtergraph(filtergraphs[i]) < 0) {
1305 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1306 exit_program(1);
1307 }
1308 }
1309
1310 for (i = 0; i < ist->nb_filters; i++) {
1311 if (i < ist->nb_filters - 1) {
1312 f = ist->filter_frame;
1313 err = av_frame_ref(f, decoded_frame);
1314 if (err < 0)
1315 break;
1316 } else
1317 f = decoded_frame;
1318
1319 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1320 if (err < 0)
1321 break;
1322 }
1323
1324 fail:
1325 av_frame_unref(ist->filter_frame);
1326 av_frame_unref(decoded_frame);
1327 return err < 0 ? err : ret;
1328 }
1329
1330 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1331 {
1332 AVSubtitle subtitle;
1333 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
1334 &subtitle, got_output, pkt);
1335 if (ret < 0)
1336 return ret;
1337 if (!*got_output)
1338 return ret;
1339
1340 ist->frames_decoded++;
1341
1342 for (i = 0; i < nb_output_streams; i++) {
1343 OutputStream *ost = output_streams[i];
1344
1345 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1346 continue;
1347
1348 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
1349 }
1350
1351 avsubtitle_free(&subtitle);
1352 return ret;
1353 }
1354
1355 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1356 static int process_input_packet(InputStream *ist, const AVPacket *pkt)
1357 {
1358 int i;
1359 int got_output;
1360 AVPacket avpkt;
1361
1362 if (ist->next_dts == AV_NOPTS_VALUE)
1363 ist->next_dts = ist->last_dts;
1364
1365 if (!pkt) {
1366 /* EOF handling */
1367 av_init_packet(&avpkt);
1368 avpkt.data = NULL;
1369 avpkt.size = 0;
1370 goto handle_eof;
1371 } else {
1372 avpkt = *pkt;
1373 }
1374
1375 if (pkt->dts != AV_NOPTS_VALUE)
1376 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1377
1378 // while we have more to decode or while the decoder did output something on EOF
1379 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1380 int ret = 0;
1381 handle_eof:
1382
1383 ist->last_dts = ist->next_dts;
1384
1385 if (avpkt.size && avpkt.size != pkt->size &&
1386 !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
1387 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
1388 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1389 ist->showed_multi_packet_warning = 1;
1390 }
1391
1392 switch (ist->dec_ctx->codec_type) {
1393 case AVMEDIA_TYPE_AUDIO:
1394 ret = decode_audio (ist, &avpkt, &got_output);
1395 break;
1396 case AVMEDIA_TYPE_VIDEO:
1397 ret = decode_video (ist, &avpkt, &got_output);
1398 if (avpkt.duration)
1399 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1400 else if (ist->st->avg_frame_rate.num)
1401 ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1402 AV_TIME_BASE_Q);
1403 else if (ist->dec_ctx->framerate.num != 0) {
1404 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1405 ist->dec_ctx->ticks_per_frame;
1406 ist->next_dts += av_rescale_q(ticks, ist->dec_ctx->framerate, AV_TIME_BASE_Q);
1407 }
1408 break;
1409 case AVMEDIA_TYPE_SUBTITLE:
1410 ret = transcode_subtitles(ist, &avpkt, &got_output);
1411 break;
1412 default:
1413 return -1;
1414 }
1415
1416 if (ret < 0)
1417 return ret;
1418 // touch data and size only if not EOF
1419 if (pkt) {
1420 avpkt.data += ret;
1421 avpkt.size -= ret;
1422 }
1423 if (!got_output) {
1424 continue;
1425 }
1426 }
1427
1428 /* handle stream copy */
1429 if (!ist->decoding_needed) {
1430 ist->last_dts = ist->next_dts;
1431 switch (ist->dec_ctx->codec_type) {
1432 case AVMEDIA_TYPE_AUDIO:
1433 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
1434 ist->dec_ctx->sample_rate;
1435 break;
1436 case AVMEDIA_TYPE_VIDEO:
1437 if (ist->dec_ctx->framerate.num != 0) {
1438 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
1439 ist->next_dts += ((int64_t)AV_TIME_BASE *
1440 ist->dec_ctx->framerate.den * ticks) /
1441 ist->dec_ctx->framerate.num;
1442 }
1443 break;
1444 }
1445 }
1446 for (i = 0; pkt && i < nb_output_streams; i++) {
1447 OutputStream *ost = output_streams[i];
1448
1449 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1450 continue;
1451
1452 do_streamcopy(ist, ost, pkt);
1453 }
1454
1455 return 0;
1456 }
1457
1458 static void print_sdp(void)
1459 {
1460 char sdp[16384];
1461 int i;
1462 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1463
1464 if (!avc)
1465 exit_program(1);
1466 for (i = 0; i < nb_output_files; i++)
1467 avc[i] = output_files[i]->ctx;
1468
1469 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1470 printf("SDP:\n%s\n", sdp);
1471 fflush(stdout);
1472 av_freep(&avc);
1473 }
1474
1475 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
1476 {
1477 int i;
1478 for (i = 0; hwaccels[i].name; i++)
1479 if (hwaccels[i].pix_fmt == pix_fmt)
1480 return &hwaccels[i];
1481 return NULL;
1482 }
1483
1484 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
1485 {
1486 InputStream *ist = s->opaque;
1487 const enum AVPixelFormat *p;
1488 int ret;
1489
1490 for (p = pix_fmts; *p != -1; p++) {
1491 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
1492 const HWAccel *hwaccel;
1493
1494 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1495 break;
1496
1497 hwaccel = get_hwaccel(*p);
1498 if (!hwaccel ||
1499 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
1500 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
1501 continue;
1502
1503 ret = hwaccel->init(s);
1504 if (ret < 0) {
1505 if (ist->hwaccel_id == hwaccel->id) {
1506 av_log(NULL, AV_LOG_FATAL,
1507 "%s hwaccel requested for input stream #%d:%d, "
1508 "but cannot be initialized.\n", hwaccel->name,
1509 ist->file_index, ist->st->index);
1510 exit_program(1);
1511 }
1512 continue;
1513 }
1514 ist->active_hwaccel_id = hwaccel->id;
1515 ist->hwaccel_pix_fmt = *p;
1516 break;
1517 }
1518
1519 return *p;
1520 }
1521
1522 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
1523 {
1524 InputStream *ist = s->opaque;
1525
1526 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
1527 return ist->hwaccel_get_buffer(s, frame, flags);
1528
1529 return avcodec_default_get_buffer2(s, frame, flags);
1530 }
1531
1532 static int init_input_stream(int ist_index, char *error, int error_len)
1533 {
1534 int i, ret;
1535 InputStream *ist = input_streams[ist_index];
1536 if (ist->decoding_needed) {
1537 AVCodec *codec = ist->dec;
1538 if (!codec) {
1539 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1540 ist->dec_ctx->codec_id, ist->file_index, ist->st->index);
1541 return AVERROR(EINVAL);
1542 }
1543
1544 /* update requested sample format for the decoder based on the
1545 corresponding encoder sample format */
1546 for (i = 0; i < nb_output_streams; i++) {
1547 OutputStream *ost = output_streams[i];
1548 if (ost->source_index == ist_index) {
1549 update_sample_fmt(ist->dec_ctx, codec, ost->enc_ctx);
1550 break;
1551 }
1552 }
1553
1554 ist->dec_ctx->opaque = ist;
1555 ist->dec_ctx->get_format = get_format;
1556 ist->dec_ctx->get_buffer2 = get_buffer;
1557 ist->dec_ctx->thread_safe_callbacks = 1;
1558
1559 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
1560
1561 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
1562 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
1563 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
1564 char errbuf[128];
1565 if (ret == AVERROR_EXPERIMENTAL)
1566 abort_codec_experimental(codec, 0);
1567
1568 av_strerror(ret, errbuf, sizeof(errbuf));
1569
1570 snprintf(error, error_len,
1571 "Error while opening decoder for input stream "
1572 "#%d:%d : %s",
1573 ist->file_index, ist->st->index, errbuf);
1574 return ret;
1575 }
1576 assert_avoptions(ist->decoder_opts);
1577 }
1578
1579 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1580 ist->next_dts = AV_NOPTS_VALUE;
1581 init_pts_correction(&ist->pts_ctx);
1582
1583 return 0;
1584 }
1585
1586 static InputStream *get_input_stream(OutputStream *ost)
1587 {
1588 if (ost->source_index >= 0)
1589 return input_streams[ost->source_index];
1590
1591 if (ost->filter) {
1592 FilterGraph *fg = ost->filter->graph;
1593 int i;
1594
1595 for (i = 0; i < fg->nb_inputs; i++)
1596 if (fg->inputs[i]->ist->dec_ctx->codec_type == ost->enc_ctx->codec_type)
1597 return fg->inputs[i]->ist;
1598 }
1599
1600 return NULL;
1601 }
1602
1603 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1604 AVCodecContext *avctx)
1605 {
1606 char *p;
1607 int n = 1, i;
1608 int64_t t;
1609
1610 for (p = kf; *p; p++)
1611 if (*p == ',')
1612 n++;
1613 ost->forced_kf_count = n;
1614 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1615 if (!ost->forced_kf_pts) {
1616 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1617 exit_program(1);
1618 }
1619
1620 p = kf;
1621 for (i = 0; i < n; i++) {
1622 char *next = strchr(p, ',');
1623
1624 if (next)
1625 *next++ = 0;
1626
1627 t = parse_time_or_die("force_key_frames", p, 1);
1628 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1629
1630 p = next;
1631 }
1632 }
1633
1634 static void set_encoder_id(OutputFile *of, OutputStream *ost)
1635 {
1636 AVDictionaryEntry *e;
1637
1638 uint8_t *encoder_string;
1639 int encoder_string_len;
1640 int format_flags = 0;
1641
1642 e = av_dict_get(of->opts, "fflags", NULL, 0);
1643 if (e) {
1644 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
1645 if (!o)
1646 return;
1647 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
1648 }
1649
1650 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
1651 encoder_string = av_mallocz(encoder_string_len);
1652 if (!encoder_string)
1653 exit_program(1);
1654
1655 if (!(format_flags & AVFMT_FLAG_BITEXACT))
1656 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
1657 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
1658 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
1659 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
1660 }
1661
1662 static int transcode_init(void)
1663 {
1664 int ret = 0, i, j, k;
1665 AVFormatContext *oc;
1666 OutputStream *ost;
1667 InputStream *ist;
1668 char error[1024];
1669 int want_sdp = 1;
1670
1671 /* init framerate emulation */
1672 for (i = 0; i < nb_input_files; i++) {
1673 InputFile *ifile = input_files[i];
1674 if (ifile->rate_emu)
1675 for (j = 0; j < ifile->nb_streams; j++)
1676 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
1677 }
1678
1679 /* output stream init */
1680 for (i = 0; i < nb_output_files; i++) {
1681 oc = output_files[i]->ctx;
1682 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
1683 av_dump_format(oc, i, oc->filename, 1);
1684 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
1685 return AVERROR(EINVAL);
1686 }
1687 }
1688
1689 /* init complex filtergraphs */
1690 for (i = 0; i < nb_filtergraphs; i++)
1691 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
1692 return ret;
1693
1694 /* for each output stream, we compute the right encoding parameters */
1695 for (i = 0; i < nb_output_streams; i++) {
1696 AVCodecContext *enc_ctx;
1697 AVCodecContext *dec_ctx = NULL;
1698 ost = output_streams[i];
1699 oc = output_files[ost->file_index]->ctx;
1700 ist = get_input_stream(ost);
1701
1702 if (ost->attachment_filename)
1703 continue;
1704
1705 enc_ctx = ost->enc_ctx;
1706
1707 if (ist) {
1708 dec_ctx = ist->dec_ctx;
1709
1710 ost->st->disposition = ist->st->disposition;
1711 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
1712 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
1713 }
1714
1715 if (ost->stream_copy) {
1716 AVRational sar;
1717 uint64_t extra_size;
1718
1719 av_assert0(ist && !ost->filter);
1720
1721 extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
1722
1723 if (extra_size > INT_MAX) {
1724 return AVERROR(EINVAL);
1725 }
1726
1727 /* if stream_copy is selected, no need to decode or encode */
1728 enc_ctx->codec_id = dec_ctx->codec_id;
1729 enc_ctx->codec_type = dec_ctx->codec_type;
1730
1731 if (!enc_ctx->codec_tag) {
1732 if (!oc->oformat->codec_tag ||
1733 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
1734 av_codec_get_tag(oc->oformat->codec_tag, dec_ctx->codec_id) <= 0)
1735 enc_ctx->codec_tag = dec_ctx->codec_tag;
1736 }
1737
1738 enc_ctx->bit_rate = dec_ctx->bit_rate;
1739 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
1740 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
1741 enc_ctx->field_order = dec_ctx->field_order;
1742 enc_ctx->extradata = av_mallocz(extra_size);
1743 if (!enc_ctx->extradata) {
1744 return AVERROR(ENOMEM);
1745 }
1746 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
1747 enc_ctx->extradata_size = dec_ctx->extradata_size;
1748 if (!copy_tb) {
1749 enc_ctx->time_base = dec_ctx->time_base;
1750 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
1751 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
1752 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
1753 } else
1754 enc_ctx->time_base = ist->st->time_base;
1755
1756 ost->parser = av_parser_init(enc_ctx->codec_id);
1757
1758 switch (enc_ctx->codec_type) {
1759 case AVMEDIA_TYPE_AUDIO:
1760 if (audio_volume != 256) {
1761 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1762 exit_program(1);
1763 }
1764 enc_ctx->channel_layout = dec_ctx->channel_layout;
1765 enc_ctx->sample_rate = dec_ctx->sample_rate;
1766 enc_ctx->channels = dec_ctx->channels;
1767 enc_ctx->frame_size = dec_ctx->frame_size;
1768 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
1769 enc_ctx->block_align = dec_ctx->block_align;
1770 break;
1771 case AVMEDIA_TYPE_VIDEO:
1772 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
1773 enc_ctx->width = dec_ctx->width;
1774 enc_ctx->height = dec_ctx->height;
1775 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
1776 if (ost->frame_aspect_ratio)
1777 sar = av_d2q(ost->frame_aspect_ratio * enc_ctx->height / enc_ctx->width, 255);
1778 else if (ist->st->sample_aspect_ratio.num)
1779 sar = ist->st->sample_aspect_ratio;
1780 else
1781 sar = dec_ctx->sample_aspect_ratio;
1782 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
1783 break;
1784 case AVMEDIA_TYPE_SUBTITLE:
1785 enc_ctx->width = dec_ctx->width;
1786 enc_ctx->height = dec_ctx->height;
1787 break;
1788 case AVMEDIA_TYPE_DATA:
1789 case AVMEDIA_TYPE_ATTACHMENT:
1790 break;
1791 default:
1792 abort();
1793 }
1794 } else {
1795 if (!ost->enc) {
1796 /* should only happen when a default codec is not present. */
1797 snprintf(error, sizeof(error), "Automatic encoder selection "
1798 "failed for output stream #%d:%d. Default encoder for "
1799 "format %s is probably disabled. Please choose an "
1800 "encoder manually.\n", ost->file_index, ost->index,
1801 oc->oformat->name);
1802 ret = AVERROR(EINVAL);
1803 goto dump_format;
1804 }
1805
1806 if (ist)
1807 ist->decoding_needed = 1;
1808 ost->encoding_needed = 1;
1809
1810 set_encoder_id(output_files[ost->file_index], ost);
1811
1812 /*
1813 * We want CFR output if and only if one of those is true:
1814 * 1) user specified output framerate with -r
1815 * 2) user specified -vsync cfr
1816 * 3) output format is CFR and the user didn't force vsync to
1817 * something else than CFR
1818 *
1819 * in such a case, set ost->frame_rate
1820 */
1821 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1822 !ost->frame_rate.num && ist &&
1823 (video_sync_method == VSYNC_CFR ||
1824 (video_sync_method == VSYNC_AUTO &&
1825 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
1826 if (ist->framerate.num)
1827 ost->frame_rate = ist->framerate;
1828 else if (ist->st->avg_frame_rate.num)
1829 ost->frame_rate = ist->st->avg_frame_rate;
1830 else {
1831 av_log(NULL, AV_LOG_WARNING, "Constant framerate requested "
1832 "for the output stream #%d:%d, but no information "
1833 "about the input framerate is available. Falling "
1834 "back to a default value of 25fps. Use the -r option "
1835 "if you want a different framerate.\n",
1836 ost->file_index, ost->index);
1837 ost->frame_rate = (AVRational){ 25, 1 };
1838 }
1839
1840 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
1841 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
1842 ost->frame_rate = ost->enc->supported_framerates[idx];
1843 }
1844 }
1845
1846 if (!ost->filter &&
1847 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
1848 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
1849 FilterGraph *fg;
1850 fg = init_simple_filtergraph(ist, ost);
1851 if (configure_filtergraph(fg)) {
1852 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1853 exit_program(1);
1854 }
1855 }
1856
1857 switch (enc_ctx->codec_type) {
1858 case AVMEDIA_TYPE_AUDIO:
1859 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
1860 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
1861 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
1862 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
1863 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
1864 break;
1865 case AVMEDIA_TYPE_VIDEO:
1866 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
1867
1868 enc_ctx->width = ost->filter->filter->inputs[0]->w;
1869 enc_ctx->height = ost->filter->filter->inputs[0]->h;
1870 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1871 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1872 av_d2q(ost->frame_aspect_ratio * enc_ctx->height/enc_ctx->width, 255) :
1873 ost->filter->filter->inputs[0]->sample_aspect_ratio;
1874 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
1875
1876 ost->st->avg_frame_rate = ost->frame_rate;
1877
1878 if (dec_ctx &&
1879 (enc_ctx->width != dec_ctx->width ||
1880 enc_ctx->height != dec_ctx->height ||
1881 enc_ctx->pix_fmt != dec_ctx->pix_fmt)) {
1882 enc_ctx->bits_per_raw_sample = 0;
1883 }
1884
1885 if (ost->forced_keyframes)
1886 parse_forced_key_frames(ost->forced_keyframes, ost,
1887 ost->enc_ctx);
1888 break;
1889 case AVMEDIA_TYPE_SUBTITLE:
1890 enc_ctx->time_base = (AVRational){1, 1000};
1891 break;
1892 default:
1893 abort();
1894 break;
1895 }
1896 /* two pass mode */
1897 if ((enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1898 char logfilename[1024];
1899 FILE *f;
1900
1901 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1902 ost->logfile_prefix ? ost->logfile_prefix :
1903 DEFAULT_PASS_LOGFILENAME_PREFIX,
1904 i);
1905 if (!strcmp(ost->enc->name, "libx264")) {
1906 av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
1907 } else {
1908 if (enc_ctx->flags & CODEC_FLAG_PASS1) {
1909 f = fopen(logfilename, "wb");
1910 if (!f) {
1911 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
1912 logfilename, strerror(errno));
1913 exit_program(1);
1914 }
1915 ost->logfile = f;
1916 } else {
1917 char *logbuffer;
1918 size_t logbuffer_size;
1919 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
1920 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
1921 logfilename);
1922 exit_program(1);
1923 }
1924 enc_ctx->stats_in = logbuffer;
1925 }
1926 }
1927 }
1928 }
1929 }
1930
1931 /* open each encoder */
1932 for (i = 0; i < nb_output_streams; i++) {
1933 ost = output_streams[i];
1934 if (ost->encoding_needed) {
1935 AVCodec *codec = ost->enc;
1936 AVCodecContext *dec = NULL;
1937
1938 if ((ist = get_input_stream(ost)))
1939 dec = ist->dec_ctx;
1940 if (dec && dec->subtitle_header) {
1941 ost->enc_ctx->subtitle_header = av_malloc(dec->subtitle_header_size);
1942 if (!ost->enc_ctx->subtitle_header) {
1943 ret = AVERROR(ENOMEM);
1944 goto dump_format;
1945 }
1946 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
1947 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
1948 }
1949 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
1950 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
1951 av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
1952
1953 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
1954 if (ret == AVERROR_EXPERIMENTAL)
1955 abort_codec_experimental(codec, 1);
1956 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
1957 ost->file_index, ost->index);
1958 goto dump_format;
1959 }
1960 assert_avoptions(ost->encoder_opts);
1961 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
1962 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
1963 "It takes bits/s as argument, not kbits/s\n");
1964 } else {
1965 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
1966 if (ret < 0)
1967 return ret;
1968 }
1969
1970 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
1971 if (ret < 0) {
1972 av_log(NULL, AV_LOG_FATAL,
1973 "Error initializing the output stream codec context.\n");
1974 exit_program(1);
1975 }
1976
1977 ost->st->time_base = ost->enc_ctx->time_base;
1978 }
1979
1980 /* init input streams */
1981 for (i = 0; i < nb_input_streams; i++)
1982 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
1983 goto dump_format;
1984
1985 /* discard unused programs */
1986 for (i = 0; i < nb_input_files; i++) {
1987 InputFile *ifile = input_files[i];
1988 for (j = 0; j < ifile->ctx->nb_programs; j++) {
1989 AVProgram *p = ifile->ctx->programs[j];
1990 int discard = AVDISCARD_ALL;
1991
1992 for (k = 0; k < p->nb_stream_indexes; k++)
1993 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
1994 discard = AVDISCARD_DEFAULT;
1995 break;
1996 }
1997 p->discard = discard;
1998 }
1999 }
2000
2001 /* open files and write file headers */
2002 for (i = 0; i < nb_output_files; i++) {
2003 oc = output_files[i]->ctx;
2004 oc->interrupt_callback = int_cb;
2005 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
2006 char errbuf[128];
2007 av_strerror(ret, errbuf, sizeof(errbuf));
2008 snprintf(error, sizeof(error),
2009 "Could not write header for output file #%d "
2010 "(incorrect codec parameters ?): %s",
2011 i, errbuf);
2012 ret = AVERROR(EINVAL);
2013 goto dump_format;
2014 }
2015 assert_avoptions(output_files[i]->opts);
2016 if (strcmp(oc->oformat->name, "rtp")) {
2017 want_sdp = 0;
2018 }
2019 }
2020
2021 dump_format:
2022 /* dump the file output parameters - cannot be done before in case
2023 of stream copy */
2024 for (i = 0; i < nb_output_files; i++) {
2025 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2026 }
2027
2028 /* dump the stream mapping */
2029 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2030 for (i = 0; i < nb_input_streams; i++) {
2031 ist = input_streams[i];
2032
2033 for (j = 0; j < ist->nb_filters; j++) {
2034 if (ist->filters[j]->graph->graph_desc) {
2035 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2036 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2037 ist->filters[j]->name);
2038 if (nb_filtergraphs > 1)
2039 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2040 av_log(NULL, AV_LOG_INFO, "\n");
2041 }
2042 }
2043 }
2044
2045 for (i = 0; i < nb_output_streams; i++) {
2046 ost = output_streams[i];
2047
2048 if (ost->attachment_filename) {
2049 /* an attached file */
2050 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2051 ost->attachment_filename, ost->file_index, ost->index);
2052 continue;
2053 }
2054
2055 if (ost->filter && ost->filter->graph->graph_desc) {
2056 /* output from a complex graph */
2057 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
2058 if (nb_filtergraphs > 1)
2059 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2060
2061 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2062 ost->index, ost->enc ? ost->enc->name : "?");
2063 continue;
2064 }
2065
2066 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2067 input_streams[ost->source_index]->file_index,
2068 input_streams[ost->source_index]->st->index,
2069 ost->file_index,
2070 ost->index);
2071 if (ost->sync_ist != input_streams[ost->source_index])
2072 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2073 ost->sync_ist->file_index,
2074 ost->sync_ist->st->index);
2075 if (ost->stream_copy)
2076 av_log(NULL, AV_LOG_INFO, " (copy)");
2077 else {
2078 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
2079 const AVCodec *out_codec = ost->enc;
2080 const char *decoder_name = "?";
2081 const char *in_codec_name = "?";
2082 const char *encoder_name = "?";
2083 const char *out_codec_name = "?";
2084
2085 if (in_codec) {
2086 decoder_name = in_codec->name;
2087 in_codec_name = avcodec_descriptor_get(in_codec->id)->name;
2088 if (!strcmp(decoder_name, in_codec_name))
2089 decoder_name = "native";
2090 }
2091
2092 if (out_codec) {
2093 encoder_name = out_codec->name;
2094 out_codec_name = avcodec_descriptor_get(out_codec->id)->name;
2095 if (!strcmp(encoder_name, out_codec_name))
2096 encoder_name = "native";
2097 }
2098
2099 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
2100 in_codec_name, decoder_name,
2101 out_codec_name, encoder_name);
2102 }
2103 av_log(NULL, AV_LOG_INFO, "\n");
2104 }
2105
2106 if (ret) {
2107 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2108 return ret;
2109 }
2110
2111 if (want_sdp) {
2112 print_sdp();
2113 }
2114
2115 return 0;
2116 }
2117
2118 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2119 static int need_output(void)
2120 {
2121 int i;
2122
2123 for (i = 0; i < nb_output_streams; i++) {
2124 OutputStream *ost = output_streams[i];
2125 OutputFile *of = output_files[ost->file_index];
2126 AVFormatContext *os = output_files[ost->file_index]->ctx;
2127
2128 if (ost->finished ||
2129 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2130 continue;
2131 if (ost->frame_number >= ost->max_frames) {
2132 int j;
2133 for (j = 0; j < of->ctx->nb_streams; j++)
2134 output_streams[of->ost_index + j]->finished = 1;
2135 continue;
2136 }
2137
2138 return 1;
2139 }
2140
2141 return 0;
2142 }
2143
2144 static InputFile *select_input_file(void)
2145 {
2146 InputFile *ifile = NULL;
2147 int64_t ipts_min = INT64_MAX;
2148 int i;
2149
2150 for (i = 0; i < nb_input_streams; i++) {
2151 InputStream *ist = input_streams[i];
2152 int64_t ipts = ist->last_dts;
2153
2154 if (ist->discard || input_files[ist->file_index]->eagain)
2155 continue;
2156 if (!input_files[ist->file_index]->eof_reached) {
2157 if (ipts < ipts_min) {
2158 ipts_min = ipts;
2159 ifile = input_files[ist->file_index];
2160 }
2161 }
2162 }
2163
2164 return ifile;
2165 }
2166
2167 #if HAVE_PTHREADS
2168 static void *input_thread(void *arg)
2169 {
2170 InputFile *f = arg;
2171 int ret = 0;
2172
2173 while (!transcoding_finished && ret >= 0) {
2174 AVPacket pkt;
2175 ret = av_read_frame(f->ctx, &pkt);
2176
2177 if (ret == AVERROR(EAGAIN)) {
2178 av_usleep(10000);
2179 ret = 0;
2180 continue;
2181 } else if (ret < 0)
2182 break;
2183
2184 pthread_mutex_lock(&f->fifo_lock);
2185 while (!av_fifo_space(f->fifo))
2186 pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
2187
2188 av_dup_packet(&pkt);
2189 av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2190
2191 pthread_mutex_unlock(&f->fifo_lock);
2192 }
2193
2194 f->finished = 1;
2195 return NULL;
2196 }
2197
2198 static void free_input_threads(void)
2199 {
2200 int i;
2201
2202 if (nb_input_files == 1)
2203 return;
2204
2205 transcoding_finished = 1;
2206
2207 for (i = 0; i < nb_input_files; i++) {
2208 InputFile *f = input_files[i];
2209 AVPacket pkt;
2210
2211 if (!f->fifo || f->joined)
2212 continue;
2213
2214 pthread_mutex_lock(&f->fifo_lock);
2215 while (av_fifo_size(f->fifo)) {
2216 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2217 av_free_packet(&pkt);
2218 }
2219 pthread_cond_signal(&f->fifo_cond);
2220 pthread_mutex_unlock(&f->fifo_lock);
2221
2222 pthread_join(f->thread, NULL);
2223 f->joined = 1;
2224
2225 while (av_fifo_size(f->fifo)) {
2226 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2227 av_free_packet(&pkt);
2228 }
2229 av_fifo_free(f->fifo);
2230 }
2231 }
2232
2233 static int init_input_threads(void)
2234 {
2235 int i, ret;
2236
2237 if (nb_input_files == 1)
2238 return 0;
2239
2240 for (i = 0; i < nb_input_files; i++) {
2241 InputFile *f = input_files[i];
2242
2243 if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2244 return AVERROR(ENOMEM);
2245
2246 pthread_mutex_init(&f->fifo_lock, NULL);
2247 pthread_cond_init (&f->fifo_cond, NULL);
2248
2249 if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2250 return AVERROR(ret);
2251 }
2252 return 0;
2253 }
2254
2255 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2256 {
2257 int ret = 0;
2258
2259 pthread_mutex_lock(&f->fifo_lock);
2260
2261 if (av_fifo_size(f->fifo)) {
2262 av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2263 pthread_cond_signal(&f->fifo_cond);
2264 } else {
2265 if (f->finished)
2266 ret = AVERROR_EOF;
2267 else
2268 ret = AVERROR(EAGAIN);
2269 }
2270
2271 pthread_mutex_unlock(&f->fifo_lock);
2272
2273 return ret;
2274 }
2275 #endif
2276
2277 static int get_input_packet(InputFile *f, AVPacket *pkt)
2278 {
2279 if (f->rate_emu) {
2280 int i;
2281 for (i = 0; i < f->nb_streams; i++) {
2282 InputStream *ist = input_streams[f->ist_index + i];
2283 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2284 int64_t now = av_gettime_relative() - ist->start;
2285 if (pts > now)
2286 return AVERROR(EAGAIN);
2287 }
2288 }
2289
2290 #if HAVE_PTHREADS
2291 if (nb_input_files > 1)
2292 return get_input_packet_mt(f, pkt);
2293 #endif
2294 return av_read_frame(f->ctx, pkt);
2295 }
2296
2297 static int got_eagain(void)
2298 {
2299 int i;
2300 for (i = 0; i < nb_input_files; i++)
2301 if (input_files[i]->eagain)
2302 return 1;
2303 return 0;
2304 }
2305
2306 static void reset_eagain(void)
2307 {
2308 int i;
2309 for (i = 0; i < nb_input_files; i++)
2310 input_files[i]->eagain = 0;
2311 }
2312
2313 /*
2314 * Read one packet from an input file and send it for
2315 * - decoding -> lavfi (audio/video)
2316 * - decoding -> encoding -> muxing (subtitles)
2317 * - muxing (streamcopy)
2318 *
2319 * Return
2320 * - 0 -- one packet was read and processed
2321 * - AVERROR(EAGAIN) -- no packets were available for selected file,
2322 * this function should be called again
2323 * - AVERROR_EOF -- this function should not be called again
2324 */
2325 static int process_input(void)
2326 {
2327 InputFile *ifile;
2328 AVFormatContext *is;
2329 InputStream *ist;
2330 AVPacket pkt;
2331 int ret, i, j;
2332
2333 /* select the stream that we must read now */
2334 ifile = select_input_file();
2335 /* if none, if is finished */
2336 if (!ifile) {
2337 if (got_eagain()) {
2338 reset_eagain();
2339 av_usleep(10000);
2340 return AVERROR(EAGAIN);
2341 }
2342 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
2343 return AVERROR_EOF;
2344 }
2345
2346 is = ifile->ctx;
2347 ret = get_input_packet(ifile, &pkt);
2348
2349 if (ret == AVERROR(EAGAIN)) {
2350 ifile->eagain = 1;
2351 return ret;
2352 }
2353 if (ret < 0) {
2354 if (ret != AVERROR_EOF) {
2355 print_error(is->filename, ret);
2356 if (exit_on_error)
2357 exit_program(1);
2358 }
2359 ifile->eof_reached = 1;
2360
2361 for (i = 0; i < ifile->nb_streams; i++) {
2362 ist = input_streams[ifile->ist_index + i];
2363 if (ist->decoding_needed)
2364 process_input_packet(ist, NULL);
2365
2366 /* mark all outputs that don't go through lavfi as finished */
2367 for (j = 0; j < nb_output_streams; j++) {
2368 OutputStream *ost = output_streams[j];
2369
2370 if (ost->source_index == ifile->ist_index + i &&
2371 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2372 finish_output_stream(ost);
2373 }
2374 }
2375
2376 return AVERROR(EAGAIN);
2377 }
2378
2379 reset_eagain();
2380
2381 if (do_pkt_dump) {
2382 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2383 is->streams[pkt.stream_index]);
2384 }
2385 /* the following test is needed in case new streams appear
2386 dynamically in stream : we ignore them */
2387 if (pkt.stream_index >= ifile->nb_streams)
2388 goto discard_packet;
2389
2390 ist = input_streams[ifile->ist_index + pkt.stream_index];
2391
2392 ist->data_size += pkt.size;
2393 ist->nb_packets++;
2394
2395 if (ist->discard)
2396 goto discard_packet;
2397
2398 /* add the stream-global side data to the first packet */
2399 if (ist->nb_packets == 1)
2400 for (i = 0; i < ist->st->nb_side_data; i++) {
2401 AVPacketSideData *src_sd = &ist->st->side_data[i];
2402 uint8_t *dst_data;
2403
2404 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
2405 continue;
2406
2407 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
2408 if (!dst_data)
2409 exit_program(1);
2410
2411 memcpy(dst_data, src_sd->data, src_sd->size);
2412 }
2413
2414 if (pkt.dts != AV_NOPTS_VALUE)
2415 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2416 if (pkt.pts != AV_NOPTS_VALUE)
2417 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2418
2419 if (pkt.pts != AV_NOPTS_VALUE)
2420 pkt.pts *= ist->ts_scale;
2421 if (pkt.dts != AV_NOPTS_VALUE)
2422 pkt.dts *= ist->ts_scale;
2423
2424 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2425 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
2426 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2427 (is->iformat->flags & AVFMT_TS_DISCONT)) {
2428 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2429 int64_t delta = pkt_dts - ist->next_dts;
2430
2431 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2432 ifile->ts_offset -= delta;
2433 av_log(NULL, AV_LOG_DEBUG,
2434 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2435 delta, ifile->ts_offset);
2436 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2437 if (pkt.pts != AV_NOPTS_VALUE)
2438 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2439 }
2440 }
2441
2442 ret = process_input_packet(ist, &pkt);
2443 if (ret < 0) {
2444 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2445 ist->file_index, ist->st->index);
2446 if (exit_on_error)
2447 exit_program(1);
2448 }
2449
2450 discard_packet:
2451 av_free_packet(&pkt);
2452
2453 return 0;
2454 }
2455
2456 /*
2457 * The following code is the main loop of the file converter
2458 */
2459 static int transcode(void)
2460 {
2461 int ret, i, need_input = 1;
2462 AVFormatContext *os;
2463 OutputStream *ost;
2464 InputStream *ist;
2465 int64_t timer_start;
2466
2467 ret = transcode_init();
2468 if (ret < 0)
2469 goto fail;
2470
2471 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2472 term_init();
2473
2474 timer_start = av_gettime_relative();
2475
2476 #if HAVE_PTHREADS
2477 if ((ret = init_input_threads()) < 0)
2478 goto fail;
2479 #endif
2480
2481 while (!received_sigterm) {
2482 /* check if there's any stream where output is still needed */
2483 if (!need_output()) {
2484 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2485 break;
2486 }
2487
2488 /* read and process one input packet if needed */
2489 if (need_input) {
2490 ret = process_input();
2491 if (ret == AVERROR_EOF)
2492 need_input = 0;
2493 }
2494
2495 ret = poll_filters();
2496 if (ret < 0) {
2497 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
2498 continue;
2499
2500 av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
2501 break;
2502 }
2503
2504 /* dump report by using the output first video and audio streams */
2505 print_report(0, timer_start);
2506 }
2507 #if HAVE_PTHREADS
2508 free_input_threads();
2509 #endif
2510
2511 /* at the end of stream, we must flush the decoder buffers */
2512 for (i = 0; i < nb_input_streams; i++) {
2513 ist = input_streams[i];
2514 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2515 process_input_packet(ist, NULL);
2516 }
2517 }
2518 poll_filters();
2519 flush_encoders();
2520
2521 term_exit();
2522
2523 /* write the trailer if needed and close file */
2524 for (i = 0; i < nb_output_files; i++) {
2525 os = output_files[i]->ctx;
2526 av_write_trailer(os);
2527 }
2528
2529 /* dump report by using the first video and audio streams */
2530 print_report(1, timer_start);
2531
2532 /* close each encoder */
2533 for (i = 0; i < nb_output_streams; i++) {
2534 ost = output_streams[i];
2535 if (ost->encoding_needed) {
2536 av_freep(&ost->enc_ctx->stats_in);
2537 }
2538 }
2539
2540 /* close each decoder */
2541 for (i = 0; i < nb_input_streams; i++) {
2542 ist = input_streams[i];
2543 if (ist->decoding_needed) {
2544 avcodec_close(ist->dec_ctx);
2545 if (ist->hwaccel_uninit)
2546 ist->hwaccel_uninit(ist->dec_ctx);
2547 }
2548 }
2549
2550 /* finished ! */
2551 ret = 0;
2552
2553 fail:
2554 #if HAVE_PTHREADS
2555 free_input_threads();
2556 #endif
2557
2558 if (output_streams) {
2559 for (i = 0; i < nb_output_streams; i++) {
2560 ost = output_streams[i];
2561 if (ost) {
2562 if (ost->logfile) {
2563 fclose(ost->logfile);
2564 ost->logfile = NULL;
2565 }
2566 av_free(ost->forced_kf_pts);
2567 av_dict_free(&ost->encoder_opts);
2568 av_dict_free(&ost->resample_opts);
2569 }
2570 }
2571 }
2572 return ret;
2573 }
2574
2575 static int64_t getutime(void)
2576 {
2577 #if HAVE_GETRUSAGE
2578 struct rusage rusage;
2579
2580 getrusage(RUSAGE_SELF, &rusage);
2581 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2582 #elif HAVE_GETPROCESSTIMES
2583 HANDLE proc;
2584 FILETIME c, e, k, u;
2585 proc = GetCurrentProcess();
2586 GetProcessTimes(proc, &c, &e, &k, &u);
2587 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2588 #else
2589 return av_gettime_relative();
2590 #endif
2591 }
2592
2593 static int64_t getmaxrss(void)
2594 {
2595 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2596 struct rusage rusage;
2597 getrusage(RUSAGE_SELF, &rusage);
2598 return (int64_t)rusage.ru_maxrss * 1024;
2599 #elif HAVE_GETPROCESSMEMORYINFO
2600 HANDLE proc;
2601 PROCESS_MEMORY_COUNTERS memcounters;
2602 proc = GetCurrentProcess();
2603 memcounters.cb = sizeof(memcounters);
2604 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2605 return memcounters.PeakPagefileUsage;
2606 #else
2607 return 0;
2608 #endif
2609 }
2610
2611 int main(int argc, char **argv)
2612 {
2613 int ret;
2614 int64_t ti;
2615
2616 register_exit(avconv_cleanup);
2617
2618 av_log_set_flags(AV_LOG_SKIP_REPEATED);
2619 parse_loglevel(argc, argv, options);
2620
2621 avcodec_register_all();
2622 #if CONFIG_AVDEVICE
2623 avdevice_register_all();
2624 #endif
2625 avfilter_register_all();
2626 av_register_all();
2627 avformat_network_init();
2628
2629 show_banner();
2630
2631 /* parse options and open all input/output files */
2632 ret = avconv_parse_options(argc, argv);
2633 if (ret < 0)
2634 exit_program(1);
2635
2636 if (nb_output_files <= 0 && nb_input_files == 0) {
2637 show_usage();
2638 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2639 exit_program(1);
2640 }
2641
2642 /* file converter / grab */
2643 if (nb_output_files <= 0) {
2644 fprintf(stderr, "At least one output file must be specified\n");
2645 exit_program(1);
2646 }
2647
2648 ti = getutime();
2649 if (transcode() < 0)
2650 exit_program(1);
2651 ti = getutime() - ti;
2652 if (do_benchmark) {
2653 int maxrss = getmaxrss() / 1024;
2654 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
2655 }
2656
2657 exit_program(0);
2658 return 0;
2659 }