0c0f3bf1cf5a36dec373738a4d3da1918d2068d3
[libav.git] / avconv.c
1 /*
2 * avconv main
3 * Copyright (c) 2000-2011 The libav developers.
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include "libavformat/avformat.h"
31 #include "libavdevice/avdevice.h"
32 #include "libswscale/swscale.h"
33 #include "libavresample/avresample.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/channel_layout.h"
36 #include "libavutil/parseutils.h"
37 #include "libavutil/samplefmt.h"
38 #include "libavutil/fifo.h"
39 #include "libavutil/intreadwrite.h"
40 #include "libavutil/dict.h"
41 #include "libavutil/mathematics.h"
42 #include "libavutil/pixdesc.h"
43 #include "libavutil/avstring.h"
44 #include "libavutil/libm.h"
45 #include "libavutil/imgutils.h"
46 #include "libavutil/time.h"
47 #include "libavformat/os_support.h"
48
49 # include "libavfilter/avfilter.h"
50 # include "libavfilter/buffersrc.h"
51 # include "libavfilter/buffersink.h"
52
53 #if HAVE_SYS_RESOURCE_H
54 #include <sys/time.h>
55 #include <sys/types.h>
56 #include <sys/resource.h>
57 #elif HAVE_GETPROCESSTIMES
58 #include <windows.h>
59 #endif
60 #if HAVE_GETPROCESSMEMORYINFO
61 #include <windows.h>
62 #include <psapi.h>
63 #endif
64
65 #if HAVE_SYS_SELECT_H
66 #include <sys/select.h>
67 #endif
68
69 #if HAVE_PTHREADS
70 #include <pthread.h>
71 #endif
72
73 #include <time.h>
74
75 #include "avconv.h"
76 #include "cmdutils.h"
77
78 #include "libavutil/avassert.h"
79
80 const char program_name[] = "avconv";
81 const int program_birth_year = 2000;
82
83 static FILE *vstats_file;
84
85 static int64_t video_size = 0;
86 static int64_t audio_size = 0;
87 static int64_t extra_size = 0;
88 static int nb_frames_dup = 0;
89 static int nb_frames_drop = 0;
90
91
92
93 #if HAVE_PTHREADS
94 /* signal to input threads that they should exit; set by the main thread */
95 static int transcoding_finished;
96 #endif
97
98 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
99
100 InputStream **input_streams = NULL;
101 int nb_input_streams = 0;
102 InputFile **input_files = NULL;
103 int nb_input_files = 0;
104
105 OutputStream **output_streams = NULL;
106 int nb_output_streams = 0;
107 OutputFile **output_files = NULL;
108 int nb_output_files = 0;
109
110 FilterGraph **filtergraphs;
111 int nb_filtergraphs;
112
113 static void term_exit(void)
114 {
115 av_log(NULL, AV_LOG_QUIET, "");
116 }
117
118 static volatile int received_sigterm = 0;
119 static volatile int received_nb_signals = 0;
120
121 static void
122 sigterm_handler(int sig)
123 {
124 received_sigterm = sig;
125 received_nb_signals++;
126 term_exit();
127 }
128
129 static void term_init(void)
130 {
131 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
132 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
133 #ifdef SIGXCPU
134 signal(SIGXCPU, sigterm_handler);
135 #endif
136 }
137
138 static int decode_interrupt_cb(void *ctx)
139 {
140 return received_nb_signals > 1;
141 }
142
143 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
144
145 static void avconv_cleanup(int ret)
146 {
147 int i, j;
148
149 for (i = 0; i < nb_filtergraphs; i++) {
150 avfilter_graph_free(&filtergraphs[i]->graph);
151 for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
152 av_freep(&filtergraphs[i]->inputs[j]->name);
153 av_freep(&filtergraphs[i]->inputs[j]);
154 }
155 av_freep(&filtergraphs[i]->inputs);
156 for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
157 av_freep(&filtergraphs[i]->outputs[j]->name);
158 av_freep(&filtergraphs[i]->outputs[j]);
159 }
160 av_freep(&filtergraphs[i]->outputs);
161 av_freep(&filtergraphs[i]->graph_desc);
162 av_freep(&filtergraphs[i]);
163 }
164 av_freep(&filtergraphs);
165
166 /* close files */
167 for (i = 0; i < nb_output_files; i++) {
168 AVFormatContext *s = output_files[i]->ctx;
169 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
170 avio_close(s->pb);
171 avformat_free_context(s);
172 av_dict_free(&output_files[i]->opts);
173 av_freep(&output_files[i]);
174 }
175 for (i = 0; i < nb_output_streams; i++) {
176 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
177 while (bsfc) {
178 AVBitStreamFilterContext *next = bsfc->next;
179 av_bitstream_filter_close(bsfc);
180 bsfc = next;
181 }
182 output_streams[i]->bitstream_filters = NULL;
183 avcodec_free_frame(&output_streams[i]->filtered_frame);
184
185 av_freep(&output_streams[i]->forced_keyframes);
186 av_freep(&output_streams[i]->avfilter);
187 av_freep(&output_streams[i]->logfile_prefix);
188 av_freep(&output_streams[i]);
189 }
190 for (i = 0; i < nb_input_files; i++) {
191 avformat_close_input(&input_files[i]->ctx);
192 av_freep(&input_files[i]);
193 }
194 for (i = 0; i < nb_input_streams; i++) {
195 av_frame_free(&input_streams[i]->decoded_frame);
196 av_frame_free(&input_streams[i]->filter_frame);
197 av_dict_free(&input_streams[i]->opts);
198 av_freep(&input_streams[i]->filters);
199 av_freep(&input_streams[i]);
200 }
201
202 if (vstats_file)
203 fclose(vstats_file);
204 av_free(vstats_filename);
205
206 av_freep(&input_streams);
207 av_freep(&input_files);
208 av_freep(&output_streams);
209 av_freep(&output_files);
210
211 uninit_opts();
212
213 avformat_network_deinit();
214
215 if (received_sigterm) {
216 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
217 (int) received_sigterm);
218 exit (255);
219 }
220 }
221
222 void assert_avoptions(AVDictionary *m)
223 {
224 AVDictionaryEntry *t;
225 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
226 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
227 exit_program(1);
228 }
229 }
230
231 static void abort_codec_experimental(AVCodec *c, int encoder)
232 {
233 const char *codec_string = encoder ? "encoder" : "decoder";
234 AVCodec *codec;
235 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
236 "results.\nAdd '-strict experimental' if you want to use it.\n",
237 codec_string, c->name);
238 codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
239 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
240 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
241 codec_string, codec->name);
242 exit_program(1);
243 }
244
245 /*
246 * Update the requested input sample format based on the output sample format.
247 * This is currently only used to request float output from decoders which
248 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
249 * Ideally this will be removed in the future when decoders do not do format
250 * conversion and only output in their native format.
251 */
252 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
253 AVCodecContext *enc)
254 {
255 /* if sample formats match or a decoder sample format has already been
256 requested, just return */
257 if (enc->sample_fmt == dec->sample_fmt ||
258 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
259 return;
260
261 /* if decoder supports more than one output format */
262 if (dec_codec && dec_codec->sample_fmts &&
263 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
264 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
265 const enum AVSampleFormat *p;
266 int min_dec = INT_MAX, min_inc = INT_MAX;
267 enum AVSampleFormat dec_fmt = AV_SAMPLE_FMT_NONE;
268 enum AVSampleFormat inc_fmt = AV_SAMPLE_FMT_NONE;
269
270 /* find a matching sample format in the encoder */
271 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
272 if (*p == enc->sample_fmt) {
273 dec->request_sample_fmt = *p;
274 return;
275 } else {
276 enum AVSampleFormat dfmt = av_get_packed_sample_fmt(*p);
277 enum AVSampleFormat efmt = av_get_packed_sample_fmt(enc->sample_fmt);
278 int fmt_diff = 32 * abs(dfmt - efmt);
279 if (av_sample_fmt_is_planar(*p) !=
280 av_sample_fmt_is_planar(enc->sample_fmt))
281 fmt_diff++;
282 if (dfmt == efmt) {
283 min_inc = fmt_diff;
284 inc_fmt = *p;
285 } else if (dfmt > efmt) {
286 if (fmt_diff < min_inc) {
287 min_inc = fmt_diff;
288 inc_fmt = *p;
289 }
290 } else {
291 if (fmt_diff < min_dec) {
292 min_dec = fmt_diff;
293 dec_fmt = *p;
294 }
295 }
296 }
297 }
298
299 /* if none match, provide the one that matches quality closest */
300 dec->request_sample_fmt = min_inc != INT_MAX ? inc_fmt : dec_fmt;
301 }
302 }
303
304 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
305 {
306 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
307 AVCodecContext *avctx = ost->st->codec;
308 int ret;
309
310 /*
311 * Audio encoders may split the packets -- #frames in != #packets out.
312 * But there is no reordering, so we can limit the number of output packets
313 * by simply dropping them here.
314 * Counting encoded video frames needs to be done separately because of
315 * reordering, see do_video_out()
316 */
317 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
318 if (ost->frame_number >= ost->max_frames) {
319 av_free_packet(pkt);
320 return;
321 }
322 ost->frame_number++;
323 }
324
325 while (bsfc) {
326 AVPacket new_pkt = *pkt;
327 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
328 &new_pkt.data, &new_pkt.size,
329 pkt->data, pkt->size,
330 pkt->flags & AV_PKT_FLAG_KEY);
331 if (a > 0) {
332 av_free_packet(pkt);
333 new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
334 av_buffer_default_free, NULL, 0);
335 if (!new_pkt.buf)
336 exit_program(1);
337 } else if (a < 0) {
338 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
339 bsfc->filter->name, pkt->stream_index,
340 avctx->codec ? avctx->codec->name : "copy");
341 print_error("", a);
342 if (exit_on_error)
343 exit_program(1);
344 }
345 *pkt = new_pkt;
346
347 bsfc = bsfc->next;
348 }
349
350 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
351 ost->last_mux_dts != AV_NOPTS_VALUE &&
352 pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
353 av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
354 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
355 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
356 if (exit_on_error) {
357 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
358 exit_program(1);
359 }
360 av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
361 "in incorrect timestamps in the output file.\n",
362 ost->last_mux_dts + 1);
363 pkt->dts = ost->last_mux_dts + 1;
364 if (pkt->pts != AV_NOPTS_VALUE)
365 pkt->pts = FFMAX(pkt->pts, pkt->dts);
366 }
367 ost->last_mux_dts = pkt->dts;
368
369 pkt->stream_index = ost->index;
370 ret = av_interleaved_write_frame(s, pkt);
371 if (ret < 0) {
372 print_error("av_interleaved_write_frame()", ret);
373 exit_program(1);
374 }
375 }
376
377 static int check_recording_time(OutputStream *ost)
378 {
379 OutputFile *of = output_files[ost->file_index];
380
381 if (of->recording_time != INT64_MAX &&
382 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
383 AV_TIME_BASE_Q) >= 0) {
384 ost->finished = 1;
385 return 0;
386 }
387 return 1;
388 }
389
390 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
391 AVFrame *frame)
392 {
393 AVCodecContext *enc = ost->st->codec;
394 AVPacket pkt;
395 int got_packet = 0;
396
397 av_init_packet(&pkt);
398 pkt.data = NULL;
399 pkt.size = 0;
400
401 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
402 frame->pts = ost->sync_opts;
403 ost->sync_opts = frame->pts + frame->nb_samples;
404
405 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
406 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
407 exit_program(1);
408 }
409
410 if (got_packet) {
411 if (pkt.pts != AV_NOPTS_VALUE)
412 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
413 if (pkt.dts != AV_NOPTS_VALUE)
414 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
415 if (pkt.duration > 0)
416 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
417
418 write_frame(s, &pkt, ost);
419
420 audio_size += pkt.size;
421 }
422 }
423
424 static void do_subtitle_out(AVFormatContext *s,
425 OutputStream *ost,
426 InputStream *ist,
427 AVSubtitle *sub,
428 int64_t pts)
429 {
430 static uint8_t *subtitle_out = NULL;
431 int subtitle_out_max_size = 1024 * 1024;
432 int subtitle_out_size, nb, i;
433 AVCodecContext *enc;
434 AVPacket pkt;
435
436 if (pts == AV_NOPTS_VALUE) {
437 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
438 if (exit_on_error)
439 exit_program(1);
440 return;
441 }
442
443 enc = ost->st->codec;
444
445 if (!subtitle_out) {
446 subtitle_out = av_malloc(subtitle_out_max_size);
447 }
448
449 /* Note: DVB subtitle need one packet to draw them and one other
450 packet to clear them */
451 /* XXX: signal it in the codec context ? */
452 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
453 nb = 2;
454 else
455 nb = 1;
456
457 for (i = 0; i < nb; i++) {
458 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
459 if (!check_recording_time(ost))
460 return;
461
462 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
463 // start_display_time is required to be 0
464 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
465 sub->end_display_time -= sub->start_display_time;
466 sub->start_display_time = 0;
467 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
468 subtitle_out_max_size, sub);
469 if (subtitle_out_size < 0) {
470 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
471 exit_program(1);
472 }
473
474 av_init_packet(&pkt);
475 pkt.data = subtitle_out;
476 pkt.size = subtitle_out_size;
477 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
478 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
479 /* XXX: the pts correction is handled here. Maybe handling
480 it in the codec would be better */
481 if (i == 0)
482 pkt.pts += 90 * sub->start_display_time;
483 else
484 pkt.pts += 90 * sub->end_display_time;
485 }
486 write_frame(s, &pkt, ost);
487 }
488 }
489
490 static void do_video_out(AVFormatContext *s,
491 OutputStream *ost,
492 AVFrame *in_picture,
493 int *frame_size)
494 {
495 int ret, format_video_sync;
496 AVPacket pkt;
497 AVCodecContext *enc = ost->st->codec;
498
499 *frame_size = 0;
500
501 format_video_sync = video_sync_method;
502 if (format_video_sync == VSYNC_AUTO)
503 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
504 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
505 if (format_video_sync != VSYNC_PASSTHROUGH &&
506 ost->frame_number &&
507 in_picture->pts != AV_NOPTS_VALUE &&
508 in_picture->pts < ost->sync_opts) {
509 nb_frames_drop++;
510 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
511 return;
512 }
513
514 if (in_picture->pts == AV_NOPTS_VALUE)
515 in_picture->pts = ost->sync_opts;
516 ost->sync_opts = in_picture->pts;
517
518
519 if (!ost->frame_number)
520 ost->first_pts = in_picture->pts;
521
522 av_init_packet(&pkt);
523 pkt.data = NULL;
524 pkt.size = 0;
525
526 if (ost->frame_number >= ost->max_frames)
527 return;
528
529 if (s->oformat->flags & AVFMT_RAWPICTURE &&
530 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
531 /* raw pictures are written as AVPicture structure to
532 avoid any copies. We support temporarily the older
533 method. */
534 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
535 enc->coded_frame->top_field_first = in_picture->top_field_first;
536 pkt.data = (uint8_t *)in_picture;
537 pkt.size = sizeof(AVPicture);
538 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
539 pkt.flags |= AV_PKT_FLAG_KEY;
540
541 write_frame(s, &pkt, ost);
542 } else {
543 int got_packet;
544
545 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
546 ost->top_field_first >= 0)
547 in_picture->top_field_first = !!ost->top_field_first;
548
549 in_picture->quality = ost->st->codec->global_quality;
550 if (!enc->me_threshold)
551 in_picture->pict_type = 0;
552 if (ost->forced_kf_index < ost->forced_kf_count &&
553 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
554 in_picture->pict_type = AV_PICTURE_TYPE_I;
555 ost->forced_kf_index++;
556 }
557 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
558 if (ret < 0) {
559 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
560 exit_program(1);
561 }
562
563 if (got_packet) {
564 if (pkt.pts != AV_NOPTS_VALUE)
565 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
566 if (pkt.dts != AV_NOPTS_VALUE)
567 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
568
569 write_frame(s, &pkt, ost);
570 *frame_size = pkt.size;
571 video_size += pkt.size;
572
573 /* if two pass, output log */
574 if (ost->logfile && enc->stats_out) {
575 fprintf(ost->logfile, "%s", enc->stats_out);
576 }
577 }
578 }
579 ost->sync_opts++;
580 /*
581 * For video, number of frames in == number of packets out.
582 * But there may be reordering, so we can't throw away frames on encoder
583 * flush, we need to limit them here, before they go into encoder.
584 */
585 ost->frame_number++;
586 }
587
588 static double psnr(double d)
589 {
590 return -10.0 * log(d) / log(10.0);
591 }
592
593 static void do_video_stats(OutputStream *ost, int frame_size)
594 {
595 AVCodecContext *enc;
596 int frame_number;
597 double ti1, bitrate, avg_bitrate;
598
599 /* this is executed just the first time do_video_stats is called */
600 if (!vstats_file) {
601 vstats_file = fopen(vstats_filename, "w");
602 if (!vstats_file) {
603 perror("fopen");
604 exit_program(1);
605 }
606 }
607
608 enc = ost->st->codec;
609 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
610 frame_number = ost->frame_number;
611 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
612 if (enc->flags&CODEC_FLAG_PSNR)
613 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
614
615 fprintf(vstats_file,"f_size= %6d ", frame_size);
616 /* compute pts value */
617 ti1 = ost->sync_opts * av_q2d(enc->time_base);
618 if (ti1 < 0.01)
619 ti1 = 0.01;
620
621 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
622 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
623 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
624 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
625 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
626 }
627 }
628
629 /*
630 * Read one frame for lavfi output for ost and encode it.
631 */
632 static int poll_filter(OutputStream *ost)
633 {
634 OutputFile *of = output_files[ost->file_index];
635 AVFrame *filtered_frame = NULL;
636 int frame_size, ret;
637
638 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
639 return AVERROR(ENOMEM);
640 } else
641 avcodec_get_frame_defaults(ost->filtered_frame);
642 filtered_frame = ost->filtered_frame;
643
644 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
645 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
646 ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
647 ost->st->codec->frame_size);
648 else
649 ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
650
651 if (ret < 0)
652 return ret;
653
654 if (filtered_frame->pts != AV_NOPTS_VALUE) {
655 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
656 filtered_frame->pts = av_rescale_q(filtered_frame->pts,
657 ost->filter->filter->inputs[0]->time_base,
658 ost->st->codec->time_base) -
659 av_rescale_q(start_time,
660 AV_TIME_BASE_Q,
661 ost->st->codec->time_base);
662 }
663
664 switch (ost->filter->filter->inputs[0]->type) {
665 case AVMEDIA_TYPE_VIDEO:
666 if (!ost->frame_aspect_ratio)
667 ost->st->codec->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
668
669 do_video_out(of->ctx, ost, filtered_frame, &frame_size);
670 if (vstats_filename && frame_size)
671 do_video_stats(ost, frame_size);
672 break;
673 case AVMEDIA_TYPE_AUDIO:
674 do_audio_out(of->ctx, ost, filtered_frame);
675 break;
676 default:
677 // TODO support subtitle filters
678 av_assert0(0);
679 }
680
681 av_frame_unref(filtered_frame);
682
683 return 0;
684 }
685
686 /*
687 * Read as many frames from possible from lavfi and encode them.
688 *
689 * Always read from the active stream with the lowest timestamp. If no frames
690 * are available for it then return EAGAIN and wait for more input. This way we
691 * can use lavfi sources that generate unlimited amount of frames without memory
692 * usage exploding.
693 */
694 static int poll_filters(void)
695 {
696 int i, j, ret = 0;
697
698 while (ret >= 0 && !received_sigterm) {
699 OutputStream *ost = NULL;
700 int64_t min_pts = INT64_MAX;
701
702 /* choose output stream with the lowest timestamp */
703 for (i = 0; i < nb_output_streams; i++) {
704 int64_t pts = output_streams[i]->sync_opts;
705
706 if (!output_streams[i]->filter || output_streams[i]->finished)
707 continue;
708
709 pts = av_rescale_q(pts, output_streams[i]->st->codec->time_base,
710 AV_TIME_BASE_Q);
711 if (pts < min_pts) {
712 min_pts = pts;
713 ost = output_streams[i];
714 }
715 }
716
717 if (!ost)
718 break;
719
720 ret = poll_filter(ost);
721
722 if (ret == AVERROR_EOF) {
723 OutputFile *of = output_files[ost->file_index];
724
725 ost->finished = 1;
726
727 if (of->shortest) {
728 for (j = 0; j < of->ctx->nb_streams; j++)
729 output_streams[of->ost_index + j]->finished = 1;
730 }
731
732 ret = 0;
733 } else if (ret == AVERROR(EAGAIN))
734 return 0;
735 }
736
737 return ret;
738 }
739
740 static void print_report(int is_last_report, int64_t timer_start)
741 {
742 char buf[1024];
743 OutputStream *ost;
744 AVFormatContext *oc;
745 int64_t total_size;
746 AVCodecContext *enc;
747 int frame_number, vid, i;
748 double bitrate, ti1, pts;
749 static int64_t last_time = -1;
750 static int qp_histogram[52];
751
752 if (!print_stats && !is_last_report)
753 return;
754
755 if (!is_last_report) {
756 int64_t cur_time;
757 /* display the report every 0.5 seconds */
758 cur_time = av_gettime();
759 if (last_time == -1) {
760 last_time = cur_time;
761 return;
762 }
763 if ((cur_time - last_time) < 500000)
764 return;
765 last_time = cur_time;
766 }
767
768
769 oc = output_files[0]->ctx;
770
771 total_size = avio_size(oc->pb);
772 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
773 total_size = avio_tell(oc->pb);
774 if (total_size < 0) {
775 char errbuf[128];
776 av_strerror(total_size, errbuf, sizeof(errbuf));
777 av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
778 "avio_tell() failed: %s\n", errbuf);
779 total_size = 0;
780 }
781
782 buf[0] = '\0';
783 ti1 = 1e10;
784 vid = 0;
785 for (i = 0; i < nb_output_streams; i++) {
786 float q = -1;
787 ost = output_streams[i];
788 enc = ost->st->codec;
789 if (!ost->stream_copy && enc->coded_frame)
790 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
791 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
792 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
793 }
794 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
795 float t = (av_gettime() - timer_start) / 1000000.0;
796
797 frame_number = ost->frame_number;
798 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
799 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
800 if (is_last_report)
801 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
802 if (qp_hist) {
803 int j;
804 int qp = lrintf(q);
805 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
806 qp_histogram[qp]++;
807 for (j = 0; j < 32; j++)
808 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
809 }
810 if (enc->flags&CODEC_FLAG_PSNR) {
811 int j;
812 double error, error_sum = 0;
813 double scale, scale_sum = 0;
814 char type[3] = { 'Y','U','V' };
815 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
816 for (j = 0; j < 3; j++) {
817 if (is_last_report) {
818 error = enc->error[j];
819 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
820 } else {
821 error = enc->coded_frame->error[j];
822 scale = enc->width * enc->height * 255.0 * 255.0;
823 }
824 if (j)
825 scale /= 4;
826 error_sum += error;
827 scale_sum += scale;
828 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
829 }
830 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
831 }
832 vid = 1;
833 }
834 /* compute min output value */
835 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
836 if ((pts < ti1) && (pts > 0))
837 ti1 = pts;
838 }
839 if (ti1 < 0.01)
840 ti1 = 0.01;
841
842 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
843
844 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
845 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
846 (double)total_size / 1024, ti1, bitrate);
847
848 if (nb_frames_dup || nb_frames_drop)
849 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
850 nb_frames_dup, nb_frames_drop);
851
852 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
853
854 fflush(stderr);
855
856 if (is_last_report) {
857 int64_t raw= audio_size + video_size + extra_size;
858 av_log(NULL, AV_LOG_INFO, "\n");
859 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
860 video_size / 1024.0,
861 audio_size / 1024.0,
862 extra_size / 1024.0,
863 100.0 * (total_size - raw) / raw
864 );
865 }
866 }
867
868 static void flush_encoders(void)
869 {
870 int i, ret;
871
872 for (i = 0; i < nb_output_streams; i++) {
873 OutputStream *ost = output_streams[i];
874 AVCodecContext *enc = ost->st->codec;
875 AVFormatContext *os = output_files[ost->file_index]->ctx;
876 int stop_encoding = 0;
877
878 if (!ost->encoding_needed)
879 continue;
880
881 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
882 continue;
883 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
884 continue;
885
886 for (;;) {
887 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
888 const char *desc;
889 int64_t *size;
890
891 switch (ost->st->codec->codec_type) {
892 case AVMEDIA_TYPE_AUDIO:
893 encode = avcodec_encode_audio2;
894 desc = "Audio";
895 size = &audio_size;
896 break;
897 case AVMEDIA_TYPE_VIDEO:
898 encode = avcodec_encode_video2;
899 desc = "Video";
900 size = &video_size;
901 break;
902 default:
903 stop_encoding = 1;
904 }
905
906 if (encode) {
907 AVPacket pkt;
908 int got_packet;
909 av_init_packet(&pkt);
910 pkt.data = NULL;
911 pkt.size = 0;
912
913 ret = encode(enc, &pkt, NULL, &got_packet);
914 if (ret < 0) {
915 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
916 exit_program(1);
917 }
918 *size += ret;
919 if (ost->logfile && enc->stats_out) {
920 fprintf(ost->logfile, "%s", enc->stats_out);
921 }
922 if (!got_packet) {
923 stop_encoding = 1;
924 break;
925 }
926 if (pkt.pts != AV_NOPTS_VALUE)
927 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
928 if (pkt.dts != AV_NOPTS_VALUE)
929 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
930 if (pkt.duration > 0)
931 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
932 write_frame(os, &pkt, ost);
933 }
934
935 if (stop_encoding)
936 break;
937 }
938 }
939 }
940
941 /*
942 * Check whether a packet from ist should be written into ost at this time
943 */
944 static int check_output_constraints(InputStream *ist, OutputStream *ost)
945 {
946 OutputFile *of = output_files[ost->file_index];
947 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
948
949 if (ost->source_index != ist_index)
950 return 0;
951
952 if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
953 return 0;
954
955 return 1;
956 }
957
958 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
959 {
960 OutputFile *of = output_files[ost->file_index];
961 InputFile *f = input_files [ist->file_index];
962 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
963 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
964 AVPacket opkt;
965
966 av_init_packet(&opkt);
967
968 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
969 !ost->copy_initial_nonkeyframes)
970 return;
971
972 if (of->recording_time != INT64_MAX &&
973 ist->last_dts >= of->recording_time + start_time) {
974 ost->finished = 1;
975 return;
976 }
977
978 if (f->recording_time != INT64_MAX) {
979 start_time = f->ctx->start_time;
980 if (f->start_time != AV_NOPTS_VALUE)
981 start_time += f->start_time;
982 if (ist->last_dts >= f->recording_time + start_time) {
983 ost->finished = 1;
984 return;
985 }
986 }
987
988 /* force the input stream PTS */
989 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
990 audio_size += pkt->size;
991 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
992 video_size += pkt->size;
993 ost->sync_opts++;
994 }
995
996 if (pkt->pts != AV_NOPTS_VALUE)
997 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
998 else
999 opkt.pts = AV_NOPTS_VALUE;
1000
1001 if (pkt->dts == AV_NOPTS_VALUE)
1002 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1003 else
1004 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1005 opkt.dts -= ost_tb_start_time;
1006
1007 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1008 opkt.flags = pkt->flags;
1009
1010 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1011 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1012 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1013 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1014 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1015 ) {
1016 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY)) {
1017 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1018 if (!opkt.buf)
1019 exit_program(1);
1020 }
1021 } else {
1022 opkt.data = pkt->data;
1023 opkt.size = pkt->size;
1024 }
1025
1026 write_frame(of->ctx, &opkt, ost);
1027 ost->st->codec->frame_number++;
1028 }
1029
1030 int guess_input_channel_layout(InputStream *ist)
1031 {
1032 AVCodecContext *dec = ist->st->codec;
1033
1034 if (!dec->channel_layout) {
1035 char layout_name[256];
1036
1037 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1038 if (!dec->channel_layout)
1039 return 0;
1040 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1041 dec->channels, dec->channel_layout);
1042 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1043 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1044 }
1045 return 1;
1046 }
1047
1048 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1049 {
1050 AVFrame *decoded_frame, *f;
1051 AVCodecContext *avctx = ist->st->codec;
1052 int i, ret, err = 0, resample_changed;
1053
1054 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1055 return AVERROR(ENOMEM);
1056 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1057 return AVERROR(ENOMEM);
1058 decoded_frame = ist->decoded_frame;
1059
1060 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1061 if (!*got_output || ret < 0) {
1062 if (!pkt->size) {
1063 for (i = 0; i < ist->nb_filters; i++)
1064 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1065 }
1066 return ret;
1067 }
1068
1069 /* if the decoder provides a pts, use it instead of the last packet pts.
1070 the decoder could be delaying output by a packet or more. */
1071 if (decoded_frame->pts != AV_NOPTS_VALUE)
1072 ist->next_dts = decoded_frame->pts;
1073 else if (pkt->pts != AV_NOPTS_VALUE) {
1074 decoded_frame->pts = pkt->pts;
1075 pkt->pts = AV_NOPTS_VALUE;
1076 }
1077
1078 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1079 ist->resample_channels != avctx->channels ||
1080 ist->resample_channel_layout != decoded_frame->channel_layout ||
1081 ist->resample_sample_rate != decoded_frame->sample_rate;
1082 if (resample_changed) {
1083 char layout1[64], layout2[64];
1084
1085 if (!guess_input_channel_layout(ist)) {
1086 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1087 "layout for Input Stream #%d.%d\n", ist->file_index,
1088 ist->st->index);
1089 exit_program(1);
1090 }
1091 decoded_frame->channel_layout = avctx->channel_layout;
1092
1093 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1094 ist->resample_channel_layout);
1095 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1096 decoded_frame->channel_layout);
1097
1098 av_log(NULL, AV_LOG_INFO,
1099 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1100 ist->file_index, ist->st->index,
1101 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1102 ist->resample_channels, layout1,
1103 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1104 avctx->channels, layout2);
1105
1106 ist->resample_sample_fmt = decoded_frame->format;
1107 ist->resample_sample_rate = decoded_frame->sample_rate;
1108 ist->resample_channel_layout = decoded_frame->channel_layout;
1109 ist->resample_channels = avctx->channels;
1110
1111 for (i = 0; i < nb_filtergraphs; i++)
1112 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1113 configure_filtergraph(filtergraphs[i]) < 0) {
1114 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1115 exit_program(1);
1116 }
1117 }
1118
1119 if (decoded_frame->pts != AV_NOPTS_VALUE)
1120 decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1121 ist->st->time_base,
1122 (AVRational){1, ist->st->codec->sample_rate});
1123 for (i = 0; i < ist->nb_filters; i++) {
1124 if (i < ist->nb_filters - 1) {
1125 f = ist->filter_frame;
1126 err = av_frame_ref(f, decoded_frame);
1127 if (err < 0)
1128 break;
1129 } else
1130 f = decoded_frame;
1131
1132 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1133 if (err < 0)
1134 break;
1135 }
1136
1137 av_frame_unref(ist->filter_frame);
1138 av_frame_unref(decoded_frame);
1139 return err < 0 ? err : ret;
1140 }
1141
1142 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1143 {
1144 AVFrame *decoded_frame, *f;
1145 int i, ret = 0, err = 0, resample_changed;
1146
1147 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1148 return AVERROR(ENOMEM);
1149 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1150 return AVERROR(ENOMEM);
1151 decoded_frame = ist->decoded_frame;
1152
1153 ret = avcodec_decode_video2(ist->st->codec,
1154 decoded_frame, got_output, pkt);
1155 if (!*got_output || ret < 0) {
1156 if (!pkt->size) {
1157 for (i = 0; i < ist->nb_filters; i++)
1158 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1159 }
1160 return ret;
1161 }
1162
1163 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1164 decoded_frame->pkt_dts);
1165 pkt->size = 0;
1166
1167 if (ist->st->sample_aspect_ratio.num)
1168 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1169
1170 resample_changed = ist->resample_width != decoded_frame->width ||
1171 ist->resample_height != decoded_frame->height ||
1172 ist->resample_pix_fmt != decoded_frame->format;
1173 if (resample_changed) {
1174 av_log(NULL, AV_LOG_INFO,
1175 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1176 ist->file_index, ist->st->index,
1177 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
1178 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1179
1180 ret = poll_filters();
1181 if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)))
1182 av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
1183
1184 ist->resample_width = decoded_frame->width;
1185 ist->resample_height = decoded_frame->height;
1186 ist->resample_pix_fmt = decoded_frame->format;
1187
1188 for (i = 0; i < nb_filtergraphs; i++)
1189 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1190 configure_filtergraph(filtergraphs[i]) < 0) {
1191 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1192 exit_program(1);
1193 }
1194 }
1195
1196 for (i = 0; i < ist->nb_filters; i++) {
1197 if (i < ist->nb_filters - 1) {
1198 f = ist->filter_frame;
1199 err = av_frame_ref(f, decoded_frame);
1200 if (err < 0)
1201 break;
1202 } else
1203 f = decoded_frame;
1204
1205 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1206 if (err < 0)
1207 break;
1208 }
1209
1210 av_frame_unref(ist->filter_frame);
1211 av_frame_unref(decoded_frame);
1212 return err < 0 ? err : ret;
1213 }
1214
1215 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1216 {
1217 AVSubtitle subtitle;
1218 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
1219 &subtitle, got_output, pkt);
1220 if (ret < 0)
1221 return ret;
1222 if (!*got_output)
1223 return ret;
1224
1225 for (i = 0; i < nb_output_streams; i++) {
1226 OutputStream *ost = output_streams[i];
1227
1228 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1229 continue;
1230
1231 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
1232 }
1233
1234 avsubtitle_free(&subtitle);
1235 return ret;
1236 }
1237
1238 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1239 static int output_packet(InputStream *ist, const AVPacket *pkt)
1240 {
1241 int i;
1242 int got_output;
1243 AVPacket avpkt;
1244
1245 if (ist->next_dts == AV_NOPTS_VALUE)
1246 ist->next_dts = ist->last_dts;
1247
1248 if (pkt == NULL) {
1249 /* EOF handling */
1250 av_init_packet(&avpkt);
1251 avpkt.data = NULL;
1252 avpkt.size = 0;
1253 goto handle_eof;
1254 } else {
1255 avpkt = *pkt;
1256 }
1257
1258 if (pkt->dts != AV_NOPTS_VALUE)
1259 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1260
1261 // while we have more to decode or while the decoder did output something on EOF
1262 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
1263 int ret = 0;
1264 handle_eof:
1265
1266 ist->last_dts = ist->next_dts;
1267
1268 if (avpkt.size && avpkt.size != pkt->size) {
1269 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
1270 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
1271 ist->showed_multi_packet_warning = 1;
1272 }
1273
1274 switch (ist->st->codec->codec_type) {
1275 case AVMEDIA_TYPE_AUDIO:
1276 ret = decode_audio (ist, &avpkt, &got_output);
1277 break;
1278 case AVMEDIA_TYPE_VIDEO:
1279 ret = decode_video (ist, &avpkt, &got_output);
1280 if (avpkt.duration)
1281 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
1282 else if (ist->st->avg_frame_rate.num)
1283 ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1284 AV_TIME_BASE_Q);
1285 else if (ist->st->codec->time_base.num != 0) {
1286 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1287 ist->st->codec->ticks_per_frame;
1288 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
1289 }
1290 break;
1291 case AVMEDIA_TYPE_SUBTITLE:
1292 ret = transcode_subtitles(ist, &avpkt, &got_output);
1293 break;
1294 default:
1295 return -1;
1296 }
1297
1298 if (ret < 0)
1299 return ret;
1300 // touch data and size only if not EOF
1301 if (pkt) {
1302 avpkt.data += ret;
1303 avpkt.size -= ret;
1304 }
1305 if (!got_output) {
1306 continue;
1307 }
1308 }
1309
1310 /* handle stream copy */
1311 if (!ist->decoding_needed) {
1312 ist->last_dts = ist->next_dts;
1313 switch (ist->st->codec->codec_type) {
1314 case AVMEDIA_TYPE_AUDIO:
1315 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
1316 ist->st->codec->sample_rate;
1317 break;
1318 case AVMEDIA_TYPE_VIDEO:
1319 if (ist->st->codec->time_base.num != 0) {
1320 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
1321 ist->next_dts += ((int64_t)AV_TIME_BASE *
1322 ist->st->codec->time_base.num * ticks) /
1323 ist->st->codec->time_base.den;
1324 }
1325 break;
1326 }
1327 }
1328 for (i = 0; pkt && i < nb_output_streams; i++) {
1329 OutputStream *ost = output_streams[i];
1330
1331 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1332 continue;
1333
1334 do_streamcopy(ist, ost, pkt);
1335 }
1336
1337 return 0;
1338 }
1339
1340 static void print_sdp(void)
1341 {
1342 char sdp[16384];
1343 int i;
1344 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
1345
1346 if (!avc)
1347 exit_program(1);
1348 for (i = 0; i < nb_output_files; i++)
1349 avc[i] = output_files[i]->ctx;
1350
1351 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1352 printf("SDP:\n%s\n", sdp);
1353 fflush(stdout);
1354 av_freep(&avc);
1355 }
1356
1357 static int init_input_stream(int ist_index, char *error, int error_len)
1358 {
1359 int i, ret;
1360 InputStream *ist = input_streams[ist_index];
1361 if (ist->decoding_needed) {
1362 AVCodec *codec = ist->dec;
1363 if (!codec) {
1364 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1365 ist->st->codec->codec_id, ist->file_index, ist->st->index);
1366 return AVERROR(EINVAL);
1367 }
1368
1369 /* update requested sample format for the decoder based on the
1370 corresponding encoder sample format */
1371 for (i = 0; i < nb_output_streams; i++) {
1372 OutputStream *ost = output_streams[i];
1373 if (ost->source_index == ist_index) {
1374 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
1375 break;
1376 }
1377 }
1378
1379 av_opt_set_int(ist->st->codec, "refcounted_frames", 1, 0);
1380
1381 if (!av_dict_get(ist->opts, "threads", NULL, 0))
1382 av_dict_set(&ist->opts, "threads", "auto", 0);
1383 if ((ret = avcodec_open2(ist->st->codec, codec, &ist->opts)) < 0) {
1384 char errbuf[128];
1385 if (ret == AVERROR_EXPERIMENTAL)
1386 abort_codec_experimental(codec, 0);
1387
1388 av_strerror(ret, errbuf, sizeof(errbuf));
1389
1390 snprintf(error, error_len,
1391 "Error while opening decoder for input stream "
1392 "#%d:%d : %s",
1393 ist->file_index, ist->st->index, errbuf);
1394 return ret;
1395 }
1396 assert_avoptions(ist->opts);
1397 }
1398
1399 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1400 ist->next_dts = AV_NOPTS_VALUE;
1401 init_pts_correction(&ist->pts_ctx);
1402 ist->is_start = 1;
1403
1404 return 0;
1405 }
1406
1407 static InputStream *get_input_stream(OutputStream *ost)
1408 {
1409 if (ost->source_index >= 0)
1410 return input_streams[ost->source_index];
1411
1412 if (ost->filter) {
1413 FilterGraph *fg = ost->filter->graph;
1414 int i;
1415
1416 for (i = 0; i < fg->nb_inputs; i++)
1417 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
1418 return fg->inputs[i]->ist;
1419 }
1420
1421 return NULL;
1422 }
1423
1424 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1425 AVCodecContext *avctx)
1426 {
1427 char *p;
1428 int n = 1, i;
1429 int64_t t;
1430
1431 for (p = kf; *p; p++)
1432 if (*p == ',')
1433 n++;
1434 ost->forced_kf_count = n;
1435 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1436 if (!ost->forced_kf_pts) {
1437 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1438 exit_program(1);
1439 }
1440
1441 p = kf;
1442 for (i = 0; i < n; i++) {
1443 char *next = strchr(p, ',');
1444
1445 if (next)
1446 *next++ = 0;
1447
1448 t = parse_time_or_die("force_key_frames", p, 1);
1449 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1450
1451 p = next;
1452 }
1453 }
1454
1455 static int transcode_init(void)
1456 {
1457 int ret = 0, i, j, k;
1458 AVFormatContext *oc;
1459 AVCodecContext *codec;
1460 OutputStream *ost;
1461 InputStream *ist;
1462 char error[1024];
1463 int want_sdp = 1;
1464
1465 /* init framerate emulation */
1466 for (i = 0; i < nb_input_files; i++) {
1467 InputFile *ifile = input_files[i];
1468 if (ifile->rate_emu)
1469 for (j = 0; j < ifile->nb_streams; j++)
1470 input_streams[j + ifile->ist_index]->start = av_gettime();
1471 }
1472
1473 /* output stream init */
1474 for (i = 0; i < nb_output_files; i++) {
1475 oc = output_files[i]->ctx;
1476 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
1477 av_dump_format(oc, i, oc->filename, 1);
1478 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
1479 return AVERROR(EINVAL);
1480 }
1481 }
1482
1483 /* init complex filtergraphs */
1484 for (i = 0; i < nb_filtergraphs; i++)
1485 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
1486 return ret;
1487
1488 /* for each output stream, we compute the right encoding parameters */
1489 for (i = 0; i < nb_output_streams; i++) {
1490 AVCodecContext *icodec = NULL;
1491 ost = output_streams[i];
1492 oc = output_files[ost->file_index]->ctx;
1493 ist = get_input_stream(ost);
1494
1495 if (ost->attachment_filename)
1496 continue;
1497
1498 codec = ost->st->codec;
1499
1500 if (ist) {
1501 icodec = ist->st->codec;
1502
1503 ost->st->disposition = ist->st->disposition;
1504 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
1505 codec->chroma_sample_location = icodec->chroma_sample_location;
1506 }
1507
1508 if (ost->stream_copy) {
1509 AVRational sar;
1510 uint64_t extra_size;
1511
1512 av_assert0(ist && !ost->filter);
1513
1514 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
1515
1516 if (extra_size > INT_MAX) {
1517 return AVERROR(EINVAL);
1518 }
1519
1520 /* if stream_copy is selected, no need to decode or encode */
1521 codec->codec_id = icodec->codec_id;
1522 codec->codec_type = icodec->codec_type;
1523
1524 if (!codec->codec_tag) {
1525 if (!oc->oformat->codec_tag ||
1526 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
1527 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
1528 codec->codec_tag = icodec->codec_tag;
1529 }
1530
1531 codec->bit_rate = icodec->bit_rate;
1532 codec->rc_max_rate = icodec->rc_max_rate;
1533 codec->rc_buffer_size = icodec->rc_buffer_size;
1534 codec->field_order = icodec->field_order;
1535 codec->extradata = av_mallocz(extra_size);
1536 if (!codec->extradata) {
1537 return AVERROR(ENOMEM);
1538 }
1539 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
1540 codec->extradata_size = icodec->extradata_size;
1541 if (!copy_tb) {
1542 codec->time_base = icodec->time_base;
1543 codec->time_base.num *= icodec->ticks_per_frame;
1544 av_reduce(&codec->time_base.num, &codec->time_base.den,
1545 codec->time_base.num, codec->time_base.den, INT_MAX);
1546 } else
1547 codec->time_base = ist->st->time_base;
1548
1549 switch (codec->codec_type) {
1550 case AVMEDIA_TYPE_AUDIO:
1551 if (audio_volume != 256) {
1552 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1553 exit_program(1);
1554 }
1555 codec->channel_layout = icodec->channel_layout;
1556 codec->sample_rate = icodec->sample_rate;
1557 codec->channels = icodec->channels;
1558 codec->frame_size = icodec->frame_size;
1559 codec->audio_service_type = icodec->audio_service_type;
1560 codec->block_align = icodec->block_align;
1561 break;
1562 case AVMEDIA_TYPE_VIDEO:
1563 codec->pix_fmt = icodec->pix_fmt;
1564 codec->width = icodec->width;
1565 codec->height = icodec->height;
1566 codec->has_b_frames = icodec->has_b_frames;
1567 if (ost->frame_aspect_ratio)
1568 sar = av_d2q(ost->frame_aspect_ratio * codec->height / codec->width, 255);
1569 else if (ist->st->sample_aspect_ratio.num)
1570 sar = ist->st->sample_aspect_ratio;
1571 else
1572 sar = icodec->sample_aspect_ratio;
1573 ost->st->sample_aspect_ratio = codec->sample_aspect_ratio = sar;
1574 break;
1575 case AVMEDIA_TYPE_SUBTITLE:
1576 codec->width = icodec->width;
1577 codec->height = icodec->height;
1578 break;
1579 case AVMEDIA_TYPE_DATA:
1580 case AVMEDIA_TYPE_ATTACHMENT:
1581 break;
1582 default:
1583 abort();
1584 }
1585 } else {
1586 if (!ost->enc) {
1587 /* should only happen when a default codec is not present. */
1588 snprintf(error, sizeof(error), "Automatic encoder selection "
1589 "failed for output stream #%d:%d. Default encoder for "
1590 "format %s is probably disabled. Please choose an "
1591 "encoder manually.\n", ost->file_index, ost->index,
1592 oc->oformat->name);
1593 ret = AVERROR(EINVAL);
1594 goto dump_format;
1595 }
1596
1597 if (ist)
1598 ist->decoding_needed = 1;
1599 ost->encoding_needed = 1;
1600
1601 /*
1602 * We want CFR output if and only if one of those is true:
1603 * 1) user specified output framerate with -r
1604 * 2) user specified -vsync cfr
1605 * 3) output format is CFR and the user didn't force vsync to
1606 * something else than CFR
1607 *
1608 * in such a case, set ost->frame_rate
1609 */
1610 if (codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1611 !ost->frame_rate.num && ist &&
1612 (video_sync_method == VSYNC_CFR ||
1613 (video_sync_method == VSYNC_AUTO &&
1614 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
1615 ost->frame_rate = ist->framerate.num ? ist->framerate :
1616 ist->st->avg_frame_rate.num ?
1617 ist->st->avg_frame_rate :
1618 (AVRational){25, 1};
1619
1620 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
1621 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
1622 ost->frame_rate = ost->enc->supported_framerates[idx];
1623 }
1624 }
1625
1626 if (!ost->filter &&
1627 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
1628 codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
1629 FilterGraph *fg;
1630 fg = init_simple_filtergraph(ist, ost);
1631 if (configure_filtergraph(fg)) {
1632 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1633 exit_program(1);
1634 }
1635 }
1636
1637 switch (codec->codec_type) {
1638 case AVMEDIA_TYPE_AUDIO:
1639 codec->sample_fmt = ost->filter->filter->inputs[0]->format;
1640 codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
1641 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
1642 codec->channels = av_get_channel_layout_nb_channels(codec->channel_layout);
1643 codec->time_base = (AVRational){ 1, codec->sample_rate };
1644 break;
1645 case AVMEDIA_TYPE_VIDEO:
1646 codec->time_base = ost->filter->filter->inputs[0]->time_base;
1647
1648 codec->width = ost->filter->filter->inputs[0]->w;
1649 codec->height = ost->filter->filter->inputs[0]->h;
1650 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1651 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1652 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
1653 ost->filter->filter->inputs[0]->sample_aspect_ratio;
1654 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
1655
1656 if (icodec &&
1657 (codec->width != icodec->width ||
1658 codec->height != icodec->height ||
1659 codec->pix_fmt != icodec->pix_fmt)) {
1660 codec->bits_per_raw_sample = 0;
1661 }
1662
1663 if (ost->forced_keyframes)
1664 parse_forced_key_frames(ost->forced_keyframes, ost,
1665 ost->st->codec);
1666 break;
1667 case AVMEDIA_TYPE_SUBTITLE:
1668 codec->time_base = (AVRational){1, 1000};
1669 break;
1670 default:
1671 abort();
1672 break;
1673 }
1674 /* two pass mode */
1675 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
1676 char logfilename[1024];
1677 FILE *f;
1678
1679 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
1680 ost->logfile_prefix ? ost->logfile_prefix :
1681 DEFAULT_PASS_LOGFILENAME_PREFIX,
1682 i);
1683 if (!strcmp(ost->enc->name, "libx264")) {
1684 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
1685 } else {
1686 if (codec->flags & CODEC_FLAG_PASS1) {
1687 f = fopen(logfilename, "wb");
1688 if (!f) {
1689 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
1690 logfilename, strerror(errno));
1691 exit_program(1);
1692 }
1693 ost->logfile = f;
1694 } else {
1695 char *logbuffer;
1696 size_t logbuffer_size;
1697 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
1698 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
1699 logfilename);
1700 exit_program(1);
1701 }
1702 codec->stats_in = logbuffer;
1703 }
1704 }
1705 }
1706 }
1707 }
1708
1709 /* open each encoder */
1710 for (i = 0; i < nb_output_streams; i++) {
1711 ost = output_streams[i];
1712 if (ost->encoding_needed) {
1713 AVCodec *codec = ost->enc;
1714 AVCodecContext *dec = NULL;
1715
1716 if ((ist = get_input_stream(ost)))
1717 dec = ist->st->codec;
1718 if (dec && dec->subtitle_header) {
1719 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
1720 if (!ost->st->codec->subtitle_header) {
1721 ret = AVERROR(ENOMEM);
1722 goto dump_format;
1723 }
1724 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
1725 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
1726 }
1727 if (!av_dict_get(ost->opts, "threads", NULL, 0))
1728 av_dict_set(&ost->opts, "threads", "auto", 0);
1729 if ((ret = avcodec_open2(ost->st->codec, codec, &ost->opts)) < 0) {
1730 if (ret == AVERROR_EXPERIMENTAL)
1731 abort_codec_experimental(codec, 1);
1732 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
1733 ost->file_index, ost->index);
1734 goto dump_format;
1735 }
1736 assert_avoptions(ost->opts);
1737 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
1738 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
1739 "It takes bits/s as argument, not kbits/s\n");
1740 extra_size += ost->st->codec->extradata_size;
1741
1742 if (ost->st->codec->me_threshold)
1743 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
1744 } else {
1745 av_opt_set_dict(ost->st->codec, &ost->opts);
1746 }
1747 }
1748
1749 /* init input streams */
1750 for (i = 0; i < nb_input_streams; i++)
1751 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
1752 goto dump_format;
1753
1754 /* discard unused programs */
1755 for (i = 0; i < nb_input_files; i++) {
1756 InputFile *ifile = input_files[i];
1757 for (j = 0; j < ifile->ctx->nb_programs; j++) {
1758 AVProgram *p = ifile->ctx->programs[j];
1759 int discard = AVDISCARD_ALL;
1760
1761 for (k = 0; k < p->nb_stream_indexes; k++)
1762 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
1763 discard = AVDISCARD_DEFAULT;
1764 break;
1765 }
1766 p->discard = discard;
1767 }
1768 }
1769
1770 /* open files and write file headers */
1771 for (i = 0; i < nb_output_files; i++) {
1772 oc = output_files[i]->ctx;
1773 oc->interrupt_callback = int_cb;
1774 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
1775 char errbuf[128];
1776 av_strerror(ret, errbuf, sizeof(errbuf));
1777 snprintf(error, sizeof(error),
1778 "Could not write header for output file #%d "
1779 "(incorrect codec parameters ?): %s",
1780 i, errbuf);
1781 ret = AVERROR(EINVAL);
1782 goto dump_format;
1783 }
1784 assert_avoptions(output_files[i]->opts);
1785 if (strcmp(oc->oformat->name, "rtp")) {
1786 want_sdp = 0;
1787 }
1788 }
1789
1790 dump_format:
1791 /* dump the file output parameters - cannot be done before in case
1792 of stream copy */
1793 for (i = 0; i < nb_output_files; i++) {
1794 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
1795 }
1796
1797 /* dump the stream mapping */
1798 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
1799 for (i = 0; i < nb_input_streams; i++) {
1800 ist = input_streams[i];
1801
1802 for (j = 0; j < ist->nb_filters; j++) {
1803 if (ist->filters[j]->graph->graph_desc) {
1804 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
1805 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
1806 ist->filters[j]->name);
1807 if (nb_filtergraphs > 1)
1808 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
1809 av_log(NULL, AV_LOG_INFO, "\n");
1810 }
1811 }
1812 }
1813
1814 for (i = 0; i < nb_output_streams; i++) {
1815 ost = output_streams[i];
1816
1817 if (ost->attachment_filename) {
1818 /* an attached file */
1819 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
1820 ost->attachment_filename, ost->file_index, ost->index);
1821 continue;
1822 }
1823
1824 if (ost->filter && ost->filter->graph->graph_desc) {
1825 /* output from a complex graph */
1826 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
1827 if (nb_filtergraphs > 1)
1828 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
1829
1830 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
1831 ost->index, ost->enc ? ost->enc->name : "?");
1832 continue;
1833 }
1834
1835 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
1836 input_streams[ost->source_index]->file_index,
1837 input_streams[ost->source_index]->st->index,
1838 ost->file_index,
1839 ost->index);
1840 if (ost->sync_ist != input_streams[ost->source_index])
1841 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
1842 ost->sync_ist->file_index,
1843 ost->sync_ist->st->index);
1844 if (ost->stream_copy)
1845 av_log(NULL, AV_LOG_INFO, " (copy)");
1846 else
1847 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
1848 input_streams[ost->source_index]->dec->name : "?",
1849 ost->enc ? ost->enc->name : "?");
1850 av_log(NULL, AV_LOG_INFO, "\n");
1851 }
1852
1853 if (ret) {
1854 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
1855 return ret;
1856 }
1857
1858 if (want_sdp) {
1859 print_sdp();
1860 }
1861
1862 return 0;
1863 }
1864
1865 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
1866 static int need_output(void)
1867 {
1868 int i;
1869
1870 for (i = 0; i < nb_output_streams; i++) {
1871 OutputStream *ost = output_streams[i];
1872 OutputFile *of = output_files[ost->file_index];
1873 AVFormatContext *os = output_files[ost->file_index]->ctx;
1874
1875 if (ost->finished ||
1876 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
1877 continue;
1878 if (ost->frame_number >= ost->max_frames) {
1879 int j;
1880 for (j = 0; j < of->ctx->nb_streams; j++)
1881 output_streams[of->ost_index + j]->finished = 1;
1882 continue;
1883 }
1884
1885 return 1;
1886 }
1887
1888 return 0;
1889 }
1890
1891 static InputFile *select_input_file(void)
1892 {
1893 InputFile *ifile = NULL;
1894 int64_t ipts_min = INT64_MAX;
1895 int i;
1896
1897 for (i = 0; i < nb_input_streams; i++) {
1898 InputStream *ist = input_streams[i];
1899 int64_t ipts = ist->last_dts;
1900
1901 if (ist->discard || input_files[ist->file_index]->eagain)
1902 continue;
1903 if (!input_files[ist->file_index]->eof_reached) {
1904 if (ipts < ipts_min) {
1905 ipts_min = ipts;
1906 ifile = input_files[ist->file_index];
1907 }
1908 }
1909 }
1910
1911 return ifile;
1912 }
1913
1914 #if HAVE_PTHREADS
1915 static void *input_thread(void *arg)
1916 {
1917 InputFile *f = arg;
1918 int ret = 0;
1919
1920 while (!transcoding_finished && ret >= 0) {
1921 AVPacket pkt;
1922 ret = av_read_frame(f->ctx, &pkt);
1923
1924 if (ret == AVERROR(EAGAIN)) {
1925 av_usleep(10000);
1926 ret = 0;
1927 continue;
1928 } else if (ret < 0)
1929 break;
1930
1931 pthread_mutex_lock(&f->fifo_lock);
1932 while (!av_fifo_space(f->fifo))
1933 pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
1934
1935 av_dup_packet(&pkt);
1936 av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
1937
1938 pthread_mutex_unlock(&f->fifo_lock);
1939 }
1940
1941 f->finished = 1;
1942 return NULL;
1943 }
1944
1945 static void free_input_threads(void)
1946 {
1947 int i;
1948
1949 if (nb_input_files == 1)
1950 return;
1951
1952 transcoding_finished = 1;
1953
1954 for (i = 0; i < nb_input_files; i++) {
1955 InputFile *f = input_files[i];
1956 AVPacket pkt;
1957
1958 if (!f->fifo || f->joined)
1959 continue;
1960
1961 pthread_mutex_lock(&f->fifo_lock);
1962 while (av_fifo_size(f->fifo)) {
1963 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
1964 av_free_packet(&pkt);
1965 }
1966 pthread_cond_signal(&f->fifo_cond);
1967 pthread_mutex_unlock(&f->fifo_lock);
1968
1969 pthread_join(f->thread, NULL);
1970 f->joined = 1;
1971
1972 while (av_fifo_size(f->fifo)) {
1973 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
1974 av_free_packet(&pkt);
1975 }
1976 av_fifo_free(f->fifo);
1977 }
1978 }
1979
1980 static int init_input_threads(void)
1981 {
1982 int i, ret;
1983
1984 if (nb_input_files == 1)
1985 return 0;
1986
1987 for (i = 0; i < nb_input_files; i++) {
1988 InputFile *f = input_files[i];
1989
1990 if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
1991 return AVERROR(ENOMEM);
1992
1993 pthread_mutex_init(&f->fifo_lock, NULL);
1994 pthread_cond_init (&f->fifo_cond, NULL);
1995
1996 if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
1997 return AVERROR(ret);
1998 }
1999 return 0;
2000 }
2001
2002 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2003 {
2004 int ret = 0;
2005
2006 pthread_mutex_lock(&f->fifo_lock);
2007
2008 if (av_fifo_size(f->fifo)) {
2009 av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2010 pthread_cond_signal(&f->fifo_cond);
2011 } else {
2012 if (f->finished)
2013 ret = AVERROR_EOF;
2014 else
2015 ret = AVERROR(EAGAIN);
2016 }
2017
2018 pthread_mutex_unlock(&f->fifo_lock);
2019
2020 return ret;
2021 }
2022 #endif
2023
2024 static int get_input_packet(InputFile *f, AVPacket *pkt)
2025 {
2026 if (f->rate_emu) {
2027 int i;
2028 for (i = 0; i < f->nb_streams; i++) {
2029 InputStream *ist = input_streams[f->ist_index + i];
2030 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2031 int64_t now = av_gettime() - ist->start;
2032 if (pts > now)
2033 return AVERROR(EAGAIN);
2034 }
2035 }
2036
2037 #if HAVE_PTHREADS
2038 if (nb_input_files > 1)
2039 return get_input_packet_mt(f, pkt);
2040 #endif
2041 return av_read_frame(f->ctx, pkt);
2042 }
2043
2044 static int got_eagain(void)
2045 {
2046 int i;
2047 for (i = 0; i < nb_input_files; i++)
2048 if (input_files[i]->eagain)
2049 return 1;
2050 return 0;
2051 }
2052
2053 static void reset_eagain(void)
2054 {
2055 int i;
2056 for (i = 0; i < nb_input_files; i++)
2057 input_files[i]->eagain = 0;
2058 }
2059
2060 /*
2061 * Read one packet from an input file and send it for
2062 * - decoding -> lavfi (audio/video)
2063 * - decoding -> encoding -> muxing (subtitles)
2064 * - muxing (streamcopy)
2065 *
2066 * Return
2067 * - 0 -- one packet was read and processed
2068 * - AVERROR(EAGAIN) -- no packets were available for selected file,
2069 * this function should be called again
2070 * - AVERROR_EOF -- this function should not be called again
2071 */
2072 static int process_input(void)
2073 {
2074 InputFile *ifile;
2075 AVFormatContext *is;
2076 InputStream *ist;
2077 AVPacket pkt;
2078 int ret, i, j;
2079
2080 /* select the stream that we must read now */
2081 ifile = select_input_file();
2082 /* if none, if is finished */
2083 if (!ifile) {
2084 if (got_eagain()) {
2085 reset_eagain();
2086 av_usleep(10000);
2087 return AVERROR(EAGAIN);
2088 }
2089 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
2090 return AVERROR_EOF;
2091 }
2092
2093 is = ifile->ctx;
2094 ret = get_input_packet(ifile, &pkt);
2095
2096 if (ret == AVERROR(EAGAIN)) {
2097 ifile->eagain = 1;
2098 return ret;
2099 }
2100 if (ret < 0) {
2101 if (ret != AVERROR_EOF) {
2102 print_error(is->filename, ret);
2103 if (exit_on_error)
2104 exit_program(1);
2105 }
2106 ifile->eof_reached = 1;
2107
2108 for (i = 0; i < ifile->nb_streams; i++) {
2109 ist = input_streams[ifile->ist_index + i];
2110 if (ist->decoding_needed)
2111 output_packet(ist, NULL);
2112
2113 /* mark all outputs that don't go through lavfi as finished */
2114 for (j = 0; j < nb_output_streams; j++) {
2115 OutputStream *ost = output_streams[j];
2116
2117 if (ost->source_index == ifile->ist_index + i &&
2118 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2119 ost->finished= 1;
2120 }
2121 }
2122
2123 return AVERROR(EAGAIN);
2124 }
2125
2126 reset_eagain();
2127
2128 if (do_pkt_dump) {
2129 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2130 is->streams[pkt.stream_index]);
2131 }
2132 /* the following test is needed in case new streams appear
2133 dynamically in stream : we ignore them */
2134 if (pkt.stream_index >= ifile->nb_streams)
2135 goto discard_packet;
2136
2137 ist = input_streams[ifile->ist_index + pkt.stream_index];
2138 if (ist->discard)
2139 goto discard_packet;
2140
2141 if (pkt.dts != AV_NOPTS_VALUE)
2142 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2143 if (pkt.pts != AV_NOPTS_VALUE)
2144 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2145
2146 if (pkt.pts != AV_NOPTS_VALUE)
2147 pkt.pts *= ist->ts_scale;
2148 if (pkt.dts != AV_NOPTS_VALUE)
2149 pkt.dts *= ist->ts_scale;
2150
2151 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2152 (is->iformat->flags & AVFMT_TS_DISCONT)) {
2153 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2154 int64_t delta = pkt_dts - ist->next_dts;
2155
2156 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2157 ifile->ts_offset -= delta;
2158 av_log(NULL, AV_LOG_DEBUG,
2159 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2160 delta, ifile->ts_offset);
2161 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2162 if (pkt.pts != AV_NOPTS_VALUE)
2163 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2164 }
2165 }
2166
2167 ret = output_packet(ist, &pkt);
2168 if (ret < 0) {
2169 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2170 ist->file_index, ist->st->index);
2171 if (exit_on_error)
2172 exit_program(1);
2173 }
2174
2175 discard_packet:
2176 av_free_packet(&pkt);
2177
2178 return 0;
2179 }
2180
2181 /*
2182 * The following code is the main loop of the file converter
2183 */
2184 static int transcode(void)
2185 {
2186 int ret, i, need_input = 1;
2187 AVFormatContext *os;
2188 OutputStream *ost;
2189 InputStream *ist;
2190 int64_t timer_start;
2191
2192 ret = transcode_init();
2193 if (ret < 0)
2194 goto fail;
2195
2196 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2197 term_init();
2198
2199 timer_start = av_gettime();
2200
2201 #if HAVE_PTHREADS
2202 if ((ret = init_input_threads()) < 0)
2203 goto fail;
2204 #endif
2205
2206 while (!received_sigterm) {
2207 /* check if there's any stream where output is still needed */
2208 if (!need_output()) {
2209 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2210 break;
2211 }
2212
2213 /* read and process one input packet if needed */
2214 if (need_input) {
2215 ret = process_input();
2216 if (ret == AVERROR_EOF)
2217 need_input = 0;
2218 }
2219
2220 ret = poll_filters();
2221 if (ret < 0) {
2222 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
2223 continue;
2224
2225 av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
2226 break;
2227 }
2228
2229 /* dump report by using the output first video and audio streams */
2230 print_report(0, timer_start);
2231 }
2232 #if HAVE_PTHREADS
2233 free_input_threads();
2234 #endif
2235
2236 /* at the end of stream, we must flush the decoder buffers */
2237 for (i = 0; i < nb_input_streams; i++) {
2238 ist = input_streams[i];
2239 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2240 output_packet(ist, NULL);
2241 }
2242 }
2243 poll_filters();
2244 flush_encoders();
2245
2246 term_exit();
2247
2248 /* write the trailer if needed and close file */
2249 for (i = 0; i < nb_output_files; i++) {
2250 os = output_files[i]->ctx;
2251 av_write_trailer(os);
2252 }
2253
2254 /* dump report by using the first video and audio streams */
2255 print_report(1, timer_start);
2256
2257 /* close each encoder */
2258 for (i = 0; i < nb_output_streams; i++) {
2259 ost = output_streams[i];
2260 if (ost->encoding_needed) {
2261 av_freep(&ost->st->codec->stats_in);
2262 avcodec_close(ost->st->codec);
2263 }
2264 }
2265
2266 /* close each decoder */
2267 for (i = 0; i < nb_input_streams; i++) {
2268 ist = input_streams[i];
2269 if (ist->decoding_needed) {
2270 avcodec_close(ist->st->codec);
2271 }
2272 }
2273
2274 /* finished ! */
2275 ret = 0;
2276
2277 fail:
2278 #if HAVE_PTHREADS
2279 free_input_threads();
2280 #endif
2281
2282 if (output_streams) {
2283 for (i = 0; i < nb_output_streams; i++) {
2284 ost = output_streams[i];
2285 if (ost) {
2286 if (ost->stream_copy)
2287 av_freep(&ost->st->codec->extradata);
2288 if (ost->logfile) {
2289 fclose(ost->logfile);
2290 ost->logfile = NULL;
2291 }
2292 av_freep(&ost->st->codec->subtitle_header);
2293 av_free(ost->forced_kf_pts);
2294 av_dict_free(&ost->opts);
2295 av_dict_free(&ost->resample_opts);
2296 }
2297 }
2298 }
2299 return ret;
2300 }
2301
2302 static int64_t getutime(void)
2303 {
2304 #if HAVE_GETRUSAGE
2305 struct rusage rusage;
2306
2307 getrusage(RUSAGE_SELF, &rusage);
2308 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2309 #elif HAVE_GETPROCESSTIMES
2310 HANDLE proc;
2311 FILETIME c, e, k, u;
2312 proc = GetCurrentProcess();
2313 GetProcessTimes(proc, &c, &e, &k, &u);
2314 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2315 #else
2316 return av_gettime();
2317 #endif
2318 }
2319
2320 static int64_t getmaxrss(void)
2321 {
2322 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2323 struct rusage rusage;
2324 getrusage(RUSAGE_SELF, &rusage);
2325 return (int64_t)rusage.ru_maxrss * 1024;
2326 #elif HAVE_GETPROCESSMEMORYINFO
2327 HANDLE proc;
2328 PROCESS_MEMORY_COUNTERS memcounters;
2329 proc = GetCurrentProcess();
2330 memcounters.cb = sizeof(memcounters);
2331 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2332 return memcounters.PeakPagefileUsage;
2333 #else
2334 return 0;
2335 #endif
2336 }
2337
2338 int main(int argc, char **argv)
2339 {
2340 int ret;
2341 int64_t ti;
2342
2343 register_exit(avconv_cleanup);
2344
2345 av_log_set_flags(AV_LOG_SKIP_REPEATED);
2346 parse_loglevel(argc, argv, options);
2347
2348 avcodec_register_all();
2349 #if CONFIG_AVDEVICE
2350 avdevice_register_all();
2351 #endif
2352 avfilter_register_all();
2353 av_register_all();
2354 avformat_network_init();
2355
2356 show_banner();
2357
2358 /* parse options and open all input/output files */
2359 ret = avconv_parse_options(argc, argv);
2360 if (ret < 0)
2361 exit_program(1);
2362
2363 if (nb_output_files <= 0 && nb_input_files == 0) {
2364 show_usage();
2365 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2366 exit_program(1);
2367 }
2368
2369 /* file converter / grab */
2370 if (nb_output_files <= 0) {
2371 fprintf(stderr, "At least one output file must be specified\n");
2372 exit_program(1);
2373 }
2374
2375 ti = getutime();
2376 if (transcode() < 0)
2377 exit_program(1);
2378 ti = getutime() - ti;
2379 if (do_benchmark) {
2380 int maxrss = getmaxrss() / 1024;
2381 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
2382 }
2383
2384 exit_program(0);
2385 return 0;
2386 }