8913d8511f1eb358d24e6b74d3950b7d84837827
[libav.git] / avconv.c
1 /*
2 * avconv main
3 * Copyright (c) 2000-2011 The Libav developers
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include <stdint.h>
31
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavresample/avresample.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/channel_layout.h"
38 #include "libavutil/parseutils.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/internal.h"
42 #include "libavutil/intreadwrite.h"
43 #include "libavutil/dict.h"
44 #include "libavutil/mathematics.h"
45 #include "libavutil/pixdesc.h"
46 #include "libavutil/avstring.h"
47 #include "libavutil/libm.h"
48 #include "libavutil/imgutils.h"
49 #include "libavutil/time.h"
50 #include "libavformat/os_support.h"
51
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersrc.h"
54 # include "libavfilter/buffersink.h"
55
56 #if HAVE_SYS_RESOURCE_H
57 #include <sys/time.h>
58 #include <sys/types.h>
59 #include <sys/resource.h>
60 #elif HAVE_GETPROCESSTIMES
61 #include <windows.h>
62 #endif
63 #if HAVE_GETPROCESSMEMORYINFO
64 #include <windows.h>
65 #include <psapi.h>
66 #endif
67
68 #if HAVE_SYS_SELECT_H
69 #include <sys/select.h>
70 #endif
71
72 #if HAVE_PTHREADS
73 #include <pthread.h>
74 #endif
75
76 #include <time.h>
77
78 #include "avconv.h"
79 #include "cmdutils.h"
80
81 #include "libavutil/avassert.h"
82
83 const char program_name[] = "avconv";
84 const int program_birth_year = 2000;
85
86 static FILE *vstats_file;
87
88 static int nb_frames_drop = 0;
89
90 static int want_sdp = 1;
91
92 #if HAVE_PTHREADS
93 /* signal to input threads that they should exit; set by the main thread */
94 static int transcoding_finished;
95 #endif
96
97 InputStream **input_streams = NULL;
98 int nb_input_streams = 0;
99 InputFile **input_files = NULL;
100 int nb_input_files = 0;
101
102 OutputStream **output_streams = NULL;
103 int nb_output_streams = 0;
104 OutputFile **output_files = NULL;
105 int nb_output_files = 0;
106
107 FilterGraph **filtergraphs;
108 int nb_filtergraphs;
109
110 static void term_exit(void)
111 {
112 av_log(NULL, AV_LOG_QUIET, "");
113 }
114
115 static volatile int received_sigterm = 0;
116 static volatile int received_nb_signals = 0;
117
118 static void
119 sigterm_handler(int sig)
120 {
121 received_sigterm = sig;
122 received_nb_signals++;
123 term_exit();
124 }
125
126 static void term_init(void)
127 {
128 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
129 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
130 #ifdef SIGXCPU
131 signal(SIGXCPU, sigterm_handler);
132 #endif
133 }
134
135 static int decode_interrupt_cb(void *ctx)
136 {
137 return received_nb_signals > 1;
138 }
139
140 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
141
142 static void avconv_cleanup(int ret)
143 {
144 int i, j;
145
146 for (i = 0; i < nb_filtergraphs; i++) {
147 FilterGraph *fg = filtergraphs[i];
148 avfilter_graph_free(&fg->graph);
149 for (j = 0; j < fg->nb_inputs; j++) {
150 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
151 av_freep(&fg->inputs[j]->name);
152 av_freep(&fg->inputs[j]);
153 }
154 av_freep(&fg->inputs);
155 for (j = 0; j < fg->nb_outputs; j++) {
156 av_freep(&fg->outputs[j]->name);
157 av_freep(&fg->outputs[j]->formats);
158 av_freep(&fg->outputs[j]->channel_layouts);
159 av_freep(&fg->outputs[j]->sample_rates);
160 av_freep(&fg->outputs[j]);
161 }
162 av_freep(&fg->outputs);
163 av_freep(&fg->graph_desc);
164
165 av_freep(&filtergraphs[i]);
166 }
167 av_freep(&filtergraphs);
168
169 /* close files */
170 for (i = 0; i < nb_output_files; i++) {
171 OutputFile *of = output_files[i];
172 AVFormatContext *s = of->ctx;
173 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
174 avio_close(s->pb);
175 avformat_free_context(s);
176 av_dict_free(&of->opts);
177
178 av_freep(&output_files[i]);
179 }
180 for (i = 0; i < nb_output_streams; i++) {
181 OutputStream *ost = output_streams[i];
182
183 for (j = 0; j < ost->nb_bitstream_filters; j++)
184 av_bsf_free(&ost->bsf_ctx[j]);
185 av_freep(&ost->bsf_ctx);
186 av_freep(&ost->bitstream_filters);
187
188 av_frame_free(&ost->filtered_frame);
189
190 av_parser_close(ost->parser);
191 avcodec_free_context(&ost->parser_avctx);
192
193 av_freep(&ost->forced_keyframes);
194 av_freep(&ost->avfilter);
195 av_freep(&ost->logfile_prefix);
196
197 avcodec_free_context(&ost->enc_ctx);
198
199 while (av_fifo_size(ost->muxing_queue)) {
200 AVPacket pkt;
201 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
202 av_packet_unref(&pkt);
203 }
204 av_fifo_free(ost->muxing_queue);
205
206 av_freep(&output_streams[i]);
207 }
208 for (i = 0; i < nb_input_files; i++) {
209 avformat_close_input(&input_files[i]->ctx);
210 av_freep(&input_files[i]);
211 }
212 for (i = 0; i < nb_input_streams; i++) {
213 InputStream *ist = input_streams[i];
214
215 av_frame_free(&ist->decoded_frame);
216 av_frame_free(&ist->filter_frame);
217 av_dict_free(&ist->decoder_opts);
218 av_freep(&ist->filters);
219 av_freep(&ist->hwaccel_device);
220
221 avcodec_free_context(&ist->dec_ctx);
222
223 av_freep(&input_streams[i]);
224 }
225
226 if (vstats_file)
227 fclose(vstats_file);
228 av_free(vstats_filename);
229
230 av_freep(&input_streams);
231 av_freep(&input_files);
232 av_freep(&output_streams);
233 av_freep(&output_files);
234
235 uninit_opts();
236
237 avformat_network_deinit();
238
239 if (received_sigterm) {
240 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
241 (int) received_sigterm);
242 exit (255);
243 }
244 }
245
246 void assert_avoptions(AVDictionary *m)
247 {
248 AVDictionaryEntry *t;
249 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
250 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
251 exit_program(1);
252 }
253 }
254
255 static void abort_codec_experimental(AVCodec *c, int encoder)
256 {
257 const char *codec_string = encoder ? "encoder" : "decoder";
258 AVCodec *codec;
259 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
260 "results.\nAdd '-strict experimental' if you want to use it.\n",
261 codec_string, c->name);
262 codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
263 if (!(codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL))
264 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
265 codec_string, codec->name);
266 exit_program(1);
267 }
268
269 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
270 {
271 AVFormatContext *s = of->ctx;
272 AVStream *st = ost->st;
273 int ret;
274
275 if (!of->header_written) {
276 AVPacket tmp_pkt;
277 /* the muxer is not initialized yet, buffer the packet */
278 if (!av_fifo_space(ost->muxing_queue)) {
279 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
280 ost->max_muxing_queue_size);
281 if (new_size <= av_fifo_size(ost->muxing_queue)) {
282 av_log(NULL, AV_LOG_ERROR,
283 "Too many packets buffered for output stream %d:%d.\n",
284 ost->file_index, ost->st->index);
285 exit_program(1);
286 }
287 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
288 if (ret < 0)
289 exit_program(1);
290 }
291 av_packet_move_ref(&tmp_pkt, pkt);
292 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
293 return;
294 }
295
296 /*
297 * Audio encoders may split the packets -- #frames in != #packets out.
298 * But there is no reordering, so we can limit the number of output packets
299 * by simply dropping them here.
300 * Counting encoded video frames needs to be done separately because of
301 * reordering, see do_video_out()
302 */
303 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
304 if (ost->frame_number >= ost->max_frames) {
305 av_packet_unref(pkt);
306 return;
307 }
308 ost->frame_number++;
309 }
310 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
311 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR,
312 NULL);
313 ost->quality = sd ? *(int *)sd : -1;
314
315 if (ost->frame_rate.num) {
316 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
317 ost->mux_timebase);
318 }
319 }
320
321 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
322 ost->last_mux_dts != AV_NOPTS_VALUE &&
323 pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
324 av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
325 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
326 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
327 if (exit_on_error) {
328 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
329 exit_program(1);
330 }
331 av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
332 "in incorrect timestamps in the output file.\n",
333 ost->last_mux_dts + 1);
334 pkt->dts = ost->last_mux_dts + 1;
335 if (pkt->pts != AV_NOPTS_VALUE)
336 pkt->pts = FFMAX(pkt->pts, pkt->dts);
337 }
338 ost->last_mux_dts = pkt->dts;
339
340 ost->data_size += pkt->size;
341 ost->packets_written++;
342
343 pkt->stream_index = ost->index;
344 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
345
346 ret = av_interleaved_write_frame(s, pkt);
347 if (ret < 0) {
348 print_error("av_interleaved_write_frame()", ret);
349 exit_program(1);
350 }
351 }
352
353 static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
354 {
355 int ret = 0;
356
357 /* apply the output bitstream filters, if any */
358 if (ost->nb_bitstream_filters) {
359 int idx;
360
361 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
362 if (ret < 0)
363 goto finish;
364
365 idx = 1;
366 while (idx) {
367 /* get a packet from the previous filter up the chain */
368 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
369 if (ret == AVERROR(EAGAIN)) {
370 ret = 0;
371 idx--;
372 continue;
373 } else if (ret < 0)
374 goto finish;
375
376 /* send it to the next filter down the chain or to the muxer */
377 if (idx < ost->nb_bitstream_filters) {
378 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
379 if (ret < 0)
380 goto finish;
381 idx++;
382 } else
383 write_packet(of, pkt, ost);
384 }
385 } else
386 write_packet(of, pkt, ost);
387
388 finish:
389 if (ret < 0 && ret != AVERROR_EOF) {
390 av_log(NULL, AV_LOG_FATAL, "Error applying bitstream filters to an output "
391 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
392 exit_program(1);
393 }
394 }
395
396 static int check_recording_time(OutputStream *ost)
397 {
398 OutputFile *of = output_files[ost->file_index];
399
400 if (of->recording_time != INT64_MAX &&
401 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
402 AV_TIME_BASE_Q) >= 0) {
403 ost->finished = 1;
404 return 0;
405 }
406 return 1;
407 }
408
409 static void do_audio_out(OutputFile *of, OutputStream *ost,
410 AVFrame *frame)
411 {
412 AVCodecContext *enc = ost->enc_ctx;
413 AVPacket pkt;
414 int ret;
415
416 av_init_packet(&pkt);
417 pkt.data = NULL;
418 pkt.size = 0;
419
420 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
421 frame->pts = ost->sync_opts;
422 ost->sync_opts = frame->pts + frame->nb_samples;
423
424 ost->samples_encoded += frame->nb_samples;
425 ost->frames_encoded++;
426
427 ret = avcodec_send_frame(enc, frame);
428 if (ret < 0)
429 goto error;
430
431 while (1) {
432 ret = avcodec_receive_packet(enc, &pkt);
433 if (ret == AVERROR(EAGAIN))
434 break;
435 if (ret < 0)
436 goto error;
437
438 output_packet(of, &pkt, ost);
439 }
440
441 return;
442 error:
443 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
444 exit_program(1);
445 }
446
447 static void do_subtitle_out(OutputFile *of,
448 OutputStream *ost,
449 InputStream *ist,
450 AVSubtitle *sub,
451 int64_t pts)
452 {
453 static uint8_t *subtitle_out = NULL;
454 int subtitle_out_max_size = 1024 * 1024;
455 int subtitle_out_size, nb, i;
456 AVCodecContext *enc;
457 AVPacket pkt;
458
459 if (pts == AV_NOPTS_VALUE) {
460 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
461 if (exit_on_error)
462 exit_program(1);
463 return;
464 }
465
466 enc = ost->enc_ctx;
467
468 if (!subtitle_out) {
469 subtitle_out = av_malloc(subtitle_out_max_size);
470 }
471
472 /* Note: DVB subtitle need one packet to draw them and one other
473 packet to clear them */
474 /* XXX: signal it in the codec context ? */
475 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
476 nb = 2;
477 else
478 nb = 1;
479
480 for (i = 0; i < nb; i++) {
481 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
482 if (!check_recording_time(ost))
483 return;
484
485 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
486 // start_display_time is required to be 0
487 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
488 sub->end_display_time -= sub->start_display_time;
489 sub->start_display_time = 0;
490
491 ost->frames_encoded++;
492
493 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
494 subtitle_out_max_size, sub);
495 if (subtitle_out_size < 0) {
496 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
497 exit_program(1);
498 }
499
500 av_init_packet(&pkt);
501 pkt.data = subtitle_out;
502 pkt.size = subtitle_out_size;
503 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
504 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
505 /* XXX: the pts correction is handled here. Maybe handling
506 it in the codec would be better */
507 if (i == 0)
508 pkt.pts += 90 * sub->start_display_time;
509 else
510 pkt.pts += 90 * sub->end_display_time;
511 }
512 output_packet(of, &pkt, ost);
513 }
514 }
515
516 static void do_video_out(OutputFile *of,
517 OutputStream *ost,
518 AVFrame *in_picture,
519 int *frame_size)
520 {
521 int ret, format_video_sync;
522 AVPacket pkt;
523 AVCodecContext *enc = ost->enc_ctx;
524
525 *frame_size = 0;
526
527 format_video_sync = video_sync_method;
528 if (format_video_sync == VSYNC_AUTO)
529 format_video_sync = (of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
530 (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
531 if (format_video_sync != VSYNC_PASSTHROUGH &&
532 ost->frame_number &&
533 in_picture->pts != AV_NOPTS_VALUE &&
534 in_picture->pts < ost->sync_opts) {
535 nb_frames_drop++;
536 av_log(NULL, AV_LOG_WARNING,
537 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
538 ost->frame_number, ost->st->index, in_picture->pts);
539 return;
540 }
541
542 if (in_picture->pts == AV_NOPTS_VALUE)
543 in_picture->pts = ost->sync_opts;
544 ost->sync_opts = in_picture->pts;
545
546
547 if (!ost->frame_number)
548 ost->first_pts = in_picture->pts;
549
550 av_init_packet(&pkt);
551 pkt.data = NULL;
552 pkt.size = 0;
553
554 if (ost->frame_number >= ost->max_frames)
555 return;
556
557 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
558 ost->top_field_first >= 0)
559 in_picture->top_field_first = !!ost->top_field_first;
560
561 in_picture->quality = enc->global_quality;
562 in_picture->pict_type = 0;
563 if (ost->forced_kf_index < ost->forced_kf_count &&
564 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
565 in_picture->pict_type = AV_PICTURE_TYPE_I;
566 ost->forced_kf_index++;
567 }
568
569 ost->frames_encoded++;
570
571 ret = avcodec_send_frame(enc, in_picture);
572 if (ret < 0)
573 goto error;
574
575 /*
576 * For video, there may be reordering, so we can't throw away frames on
577 * encoder flush, we need to limit them here, before they go into encoder.
578 */
579 ost->frame_number++;
580
581 while (1) {
582 ret = avcodec_receive_packet(enc, &pkt);
583 if (ret == AVERROR(EAGAIN))
584 break;
585 if (ret < 0)
586 goto error;
587
588 output_packet(of, &pkt, ost);
589 *frame_size = pkt.size;
590
591 /* if two pass, output log */
592 if (ost->logfile && enc->stats_out) {
593 fprintf(ost->logfile, "%s", enc->stats_out);
594 }
595
596 ost->sync_opts++;
597 }
598
599 return;
600 error:
601 av_assert0(ret != AVERROR(EAGAIN) && ret != AVERROR_EOF);
602 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
603 exit_program(1);
604 }
605
606 #if FF_API_CODED_FRAME && FF_API_ERROR_FRAME
607 static double psnr(double d)
608 {
609 return -10.0 * log(d) / log(10.0);
610 }
611 #endif
612
613 static void do_video_stats(OutputStream *ost, int frame_size)
614 {
615 AVCodecContext *enc;
616 int frame_number;
617 double ti1, bitrate, avg_bitrate;
618
619 /* this is executed just the first time do_video_stats is called */
620 if (!vstats_file) {
621 vstats_file = fopen(vstats_filename, "w");
622 if (!vstats_file) {
623 perror("fopen");
624 exit_program(1);
625 }
626 }
627
628 enc = ost->enc_ctx;
629 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
630 frame_number = ost->frame_number;
631 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
632 ost->quality / (float)FF_QP2LAMBDA);
633
634 #if FF_API_CODED_FRAME && FF_API_ERROR_FRAME
635 FF_DISABLE_DEPRECATION_WARNINGS
636 if (enc->flags & AV_CODEC_FLAG_PSNR)
637 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
638 FF_ENABLE_DEPRECATION_WARNINGS
639 #endif
640
641 fprintf(vstats_file,"f_size= %6d ", frame_size);
642 /* compute pts value */
643 ti1 = ost->sync_opts * av_q2d(enc->time_base);
644 if (ti1 < 0.01)
645 ti1 = 0.01;
646
647 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
648 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
649 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
650 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
651 #if FF_API_CODED_FRAME
652 FF_DISABLE_DEPRECATION_WARNINGS
653 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
654 FF_ENABLE_DEPRECATION_WARNINGS
655 #endif
656 }
657 }
658
659 /*
660 * Read one frame for lavfi output for ost and encode it.
661 */
662 static int poll_filter(OutputStream *ost)
663 {
664 OutputFile *of = output_files[ost->file_index];
665 AVFrame *filtered_frame = NULL;
666 int frame_size, ret;
667
668 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
669 return AVERROR(ENOMEM);
670 }
671 filtered_frame = ost->filtered_frame;
672
673 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
674 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
675 ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
676 ost->enc_ctx->frame_size);
677 else
678 ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
679
680 if (ret < 0)
681 return ret;
682
683 if (filtered_frame->pts != AV_NOPTS_VALUE) {
684 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
685 filtered_frame->pts = av_rescale_q(filtered_frame->pts,
686 ost->filter->filter->inputs[0]->time_base,
687 ost->enc_ctx->time_base) -
688 av_rescale_q(start_time,
689 AV_TIME_BASE_Q,
690 ost->enc_ctx->time_base);
691 }
692
693 switch (ost->filter->filter->inputs[0]->type) {
694 case AVMEDIA_TYPE_VIDEO:
695 if (!ost->frame_aspect_ratio)
696 ost->enc_ctx->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
697
698 do_video_out(of, ost, filtered_frame, &frame_size);
699 if (vstats_filename && frame_size)
700 do_video_stats(ost, frame_size);
701 break;
702 case AVMEDIA_TYPE_AUDIO:
703 do_audio_out(of, ost, filtered_frame);
704 break;
705 default:
706 // TODO support subtitle filters
707 av_assert0(0);
708 }
709
710 av_frame_unref(filtered_frame);
711
712 return 0;
713 }
714
715 static void finish_output_stream(OutputStream *ost)
716 {
717 OutputFile *of = output_files[ost->file_index];
718 int i;
719
720 ost->finished = 1;
721
722 if (of->shortest) {
723 for (i = 0; i < of->ctx->nb_streams; i++)
724 output_streams[of->ost_index + i]->finished = 1;
725 }
726 }
727
728 /*
729 * Read as many frames from possible from lavfi and encode them.
730 *
731 * Always read from the active stream with the lowest timestamp. If no frames
732 * are available for it then return EAGAIN and wait for more input. This way we
733 * can use lavfi sources that generate unlimited amount of frames without memory
734 * usage exploding.
735 */
736 static int poll_filters(void)
737 {
738 int i, ret = 0;
739
740 while (ret >= 0 && !received_sigterm) {
741 OutputStream *ost = NULL;
742 int64_t min_pts = INT64_MAX;
743
744 /* choose output stream with the lowest timestamp */
745 for (i = 0; i < nb_output_streams; i++) {
746 int64_t pts = output_streams[i]->sync_opts;
747
748 if (!output_streams[i]->filter || output_streams[i]->finished)
749 continue;
750
751 pts = av_rescale_q(pts, output_streams[i]->enc_ctx->time_base,
752 AV_TIME_BASE_Q);
753 if (pts < min_pts) {
754 min_pts = pts;
755 ost = output_streams[i];
756 }
757 }
758
759 if (!ost)
760 break;
761
762 ret = poll_filter(ost);
763
764 if (ret == AVERROR_EOF) {
765 finish_output_stream(ost);
766 ret = 0;
767 } else if (ret == AVERROR(EAGAIN))
768 return 0;
769 }
770
771 return ret;
772 }
773
774 static void print_final_stats(int64_t total_size)
775 {
776 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
777 uint64_t data_size = 0;
778 float percent = -1.0;
779 int i, j;
780
781 for (i = 0; i < nb_output_streams; i++) {
782 OutputStream *ost = output_streams[i];
783 switch (ost->enc_ctx->codec_type) {
784 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
785 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
786 default: other_size += ost->data_size; break;
787 }
788 extra_size += ost->enc_ctx->extradata_size;
789 data_size += ost->data_size;
790 }
791
792 if (data_size && total_size >= data_size)
793 percent = 100.0 * (total_size - data_size) / data_size;
794
795 av_log(NULL, AV_LOG_INFO, "\n");
796 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
797 video_size / 1024.0,
798 audio_size / 1024.0,
799 other_size / 1024.0,
800 extra_size / 1024.0);
801 if (percent >= 0.0)
802 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
803 else
804 av_log(NULL, AV_LOG_INFO, "unknown");
805 av_log(NULL, AV_LOG_INFO, "\n");
806
807 /* print verbose per-stream stats */
808 for (i = 0; i < nb_input_files; i++) {
809 InputFile *f = input_files[i];
810 uint64_t total_packets = 0, total_size = 0;
811
812 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
813 i, f->ctx->filename);
814
815 for (j = 0; j < f->nb_streams; j++) {
816 InputStream *ist = input_streams[f->ist_index + j];
817 enum AVMediaType type = ist->dec_ctx->codec_type;
818
819 total_size += ist->data_size;
820 total_packets += ist->nb_packets;
821
822 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
823 i, j, media_type_string(type));
824 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
825 ist->nb_packets, ist->data_size);
826
827 if (ist->decoding_needed) {
828 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
829 ist->frames_decoded);
830 if (type == AVMEDIA_TYPE_AUDIO)
831 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
832 av_log(NULL, AV_LOG_VERBOSE, "; ");
833 }
834
835 av_log(NULL, AV_LOG_VERBOSE, "\n");
836 }
837
838 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
839 total_packets, total_size);
840 }
841
842 for (i = 0; i < nb_output_files; i++) {
843 OutputFile *of = output_files[i];
844 uint64_t total_packets = 0, total_size = 0;
845
846 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
847 i, of->ctx->filename);
848
849 for (j = 0; j < of->ctx->nb_streams; j++) {
850 OutputStream *ost = output_streams[of->ost_index + j];
851 enum AVMediaType type = ost->enc_ctx->codec_type;
852
853 total_size += ost->data_size;
854 total_packets += ost->packets_written;
855
856 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
857 i, j, media_type_string(type));
858 if (ost->encoding_needed) {
859 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
860 ost->frames_encoded);
861 if (type == AVMEDIA_TYPE_AUDIO)
862 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
863 av_log(NULL, AV_LOG_VERBOSE, "; ");
864 }
865
866 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
867 ost->packets_written, ost->data_size);
868
869 av_log(NULL, AV_LOG_VERBOSE, "\n");
870 }
871
872 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
873 total_packets, total_size);
874 }
875 }
876
877 static void print_report(int is_last_report, int64_t timer_start)
878 {
879 char buf[1024];
880 OutputStream *ost;
881 AVFormatContext *oc;
882 int64_t total_size;
883 AVCodecContext *enc;
884 int frame_number, vid, i;
885 double bitrate, ti1, pts;
886 static int64_t last_time = -1;
887 static int qp_histogram[52];
888
889 if (!print_stats && !is_last_report)
890 return;
891
892 if (!is_last_report) {
893 int64_t cur_time;
894 /* display the report every 0.5 seconds */
895 cur_time = av_gettime_relative();
896 if (last_time == -1) {
897 last_time = cur_time;
898 return;
899 }
900 if ((cur_time - last_time) < 500000)
901 return;
902 last_time = cur_time;
903 }
904
905
906 oc = output_files[0]->ctx;
907
908 total_size = avio_size(oc->pb);
909 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
910 total_size = avio_tell(oc->pb);
911 if (total_size < 0) {
912 char errbuf[128];
913 av_strerror(total_size, errbuf, sizeof(errbuf));
914 av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
915 "avio_tell() failed: %s\n", errbuf);
916 total_size = 0;
917 }
918
919 buf[0] = '\0';
920 ti1 = 1e10;
921 vid = 0;
922 for (i = 0; i < nb_output_streams; i++) {
923 float q = -1;
924 ost = output_streams[i];
925 enc = ost->enc_ctx;
926 if (!ost->stream_copy)
927 q = ost->quality / (float) FF_QP2LAMBDA;
928
929 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
930 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
931 }
932 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
933 float t = (av_gettime_relative() - timer_start) / 1000000.0;
934
935 frame_number = ost->frame_number;
936 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
937 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
938 if (is_last_report)
939 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
940 if (qp_hist) {
941 int j;
942 int qp = lrintf(q);
943 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
944 qp_histogram[qp]++;
945 for (j = 0; j < 32; j++)
946 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
947 }
948
949 #if FF_API_CODED_FRAME && FF_API_ERROR_FRAME
950 FF_DISABLE_DEPRECATION_WARNINGS
951 if (enc->flags & AV_CODEC_FLAG_PSNR) {
952 int j;
953 double error, error_sum = 0;
954 double scale, scale_sum = 0;
955 char type[3] = { 'Y','U','V' };
956 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
957 for (j = 0; j < 3; j++) {
958 if (is_last_report) {
959 error = enc->error[j];
960 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
961 } else {
962 error = enc->coded_frame->error[j];
963 scale = enc->width * enc->height * 255.0 * 255.0;
964 }
965 if (j)
966 scale /= 4;
967 error_sum += error;
968 scale_sum += scale;
969 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
970 }
971 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
972 }
973 FF_ENABLE_DEPRECATION_WARNINGS
974 #endif
975 vid = 1;
976 }
977 /* compute min output value */
978 pts = (double)ost->last_mux_dts * av_q2d(ost->mux_timebase);
979 if ((pts < ti1) && (pts > 0))
980 ti1 = pts;
981 }
982 if (ti1 < 0.01)
983 ti1 = 0.01;
984
985 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
986
987 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
988 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
989 (double)total_size / 1024, ti1, bitrate);
990
991 if (nb_frames_drop)
992 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " drop=%d",
993 nb_frames_drop);
994
995 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
996
997 fflush(stderr);
998
999 if (is_last_report)
1000 print_final_stats(total_size);
1001
1002 }
1003
1004 static void flush_encoders(void)
1005 {
1006 int i, ret;
1007
1008 for (i = 0; i < nb_output_streams; i++) {
1009 OutputStream *ost = output_streams[i];
1010 AVCodecContext *enc = ost->enc_ctx;
1011 OutputFile *of = output_files[ost->file_index];
1012 int stop_encoding = 0;
1013
1014 if (!ost->encoding_needed)
1015 continue;
1016
1017 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1018 continue;
1019
1020 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1021 continue;
1022
1023 avcodec_send_frame(enc, NULL);
1024
1025 for (;;) {
1026 const char *desc = NULL;
1027
1028 switch (enc->codec_type) {
1029 case AVMEDIA_TYPE_AUDIO:
1030 desc = "Audio";
1031 break;
1032 case AVMEDIA_TYPE_VIDEO:
1033 desc = "Video";
1034 break;
1035 default:
1036 av_assert0(0);
1037 }
1038
1039 if (1) {
1040 AVPacket pkt;
1041 av_init_packet(&pkt);
1042 pkt.data = NULL;
1043 pkt.size = 0;
1044
1045 ret = avcodec_receive_packet(enc, &pkt);
1046 if (ret < 0 && ret != AVERROR_EOF) {
1047 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1048 exit_program(1);
1049 }
1050 if (ost->logfile && enc->stats_out) {
1051 fprintf(ost->logfile, "%s", enc->stats_out);
1052 }
1053 if (ret == AVERROR_EOF) {
1054 stop_encoding = 1;
1055 break;
1056 }
1057 output_packet(of, &pkt, ost);
1058 }
1059
1060 if (stop_encoding)
1061 break;
1062 }
1063 }
1064 }
1065
1066 /*
1067 * Check whether a packet from ist should be written into ost at this time
1068 */
1069 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1070 {
1071 OutputFile *of = output_files[ost->file_index];
1072 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1073
1074 if (ost->source_index != ist_index)
1075 return 0;
1076
1077 if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
1078 return 0;
1079
1080 return 1;
1081 }
1082
1083 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1084 {
1085 OutputFile *of = output_files[ost->file_index];
1086 InputFile *f = input_files [ist->file_index];
1087 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1088 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1089 AVPacket opkt;
1090
1091 av_init_packet(&opkt);
1092
1093 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1094 !ost->copy_initial_nonkeyframes)
1095 return;
1096
1097 if (of->recording_time != INT64_MAX &&
1098 ist->last_dts >= of->recording_time + start_time) {
1099 ost->finished = 1;
1100 return;
1101 }
1102
1103 if (f->recording_time != INT64_MAX) {
1104 start_time = f->ctx->start_time;
1105 if (f->start_time != AV_NOPTS_VALUE)
1106 start_time += f->start_time;
1107 if (ist->last_dts >= f->recording_time + start_time) {
1108 ost->finished = 1;
1109 return;
1110 }
1111 }
1112
1113 /* force the input stream PTS */
1114 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1115 ost->sync_opts++;
1116
1117 if (pkt->pts != AV_NOPTS_VALUE)
1118 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
1119 else
1120 opkt.pts = AV_NOPTS_VALUE;
1121
1122 if (pkt->dts == AV_NOPTS_VALUE)
1123 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->mux_timebase);
1124 else
1125 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
1126 opkt.dts -= ost_tb_start_time;
1127
1128 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
1129 opkt.flags = pkt->flags;
1130
1131 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1132 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1133 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1134 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1135 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1136 ) {
1137 if (av_parser_change(ost->parser, ost->parser_avctx,
1138 &opkt.data, &opkt.size,
1139 pkt->data, pkt->size,
1140 pkt->flags & AV_PKT_FLAG_KEY)) {
1141 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1142 if (!opkt.buf)
1143 exit_program(1);
1144 }
1145 } else {
1146 opkt.data = pkt->data;
1147 opkt.size = pkt->size;
1148 }
1149
1150 output_packet(of, &opkt, ost);
1151 }
1152
1153 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
1154 // There is the following difference: if you got a frame, you must call
1155 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
1156 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
1157 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
1158 {
1159 int ret;
1160
1161 *got_frame = 0;
1162
1163 if (pkt) {
1164 ret = avcodec_send_packet(avctx, pkt);
1165 // In particular, we don't expect AVERROR(EAGAIN), because we read all
1166 // decoded frames with avcodec_receive_frame() until done.
1167 if (ret < 0)
1168 return ret == AVERROR_EOF ? 0 : ret;
1169 }
1170
1171 ret = avcodec_receive_frame(avctx, frame);
1172 if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1173 return ret;
1174 if (ret >= 0)
1175 *got_frame = 1;
1176
1177 return 0;
1178 }
1179
1180 int guess_input_channel_layout(InputStream *ist)
1181 {
1182 AVCodecContext *dec = ist->dec_ctx;
1183
1184 if (!dec->channel_layout) {
1185 char layout_name[256];
1186
1187 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1188 if (!dec->channel_layout)
1189 return 0;
1190 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1191 dec->channels, dec->channel_layout);
1192 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1193 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1194 }
1195 return 1;
1196 }
1197
1198 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1199 {
1200 AVFrame *decoded_frame, *f;
1201 AVCodecContext *avctx = ist->dec_ctx;
1202 int i, ret, err = 0, resample_changed;
1203
1204 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1205 return AVERROR(ENOMEM);
1206 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1207 return AVERROR(ENOMEM);
1208 decoded_frame = ist->decoded_frame;
1209
1210 ret = decode(avctx, decoded_frame, got_output, pkt);
1211 if (!*got_output || ret < 0)
1212 return ret;
1213
1214 ist->samples_decoded += decoded_frame->nb_samples;
1215 ist->frames_decoded++;
1216
1217 /* if the decoder provides a pts, use it instead of the last packet pts.
1218 the decoder could be delaying output by a packet or more. */
1219 if (decoded_frame->pts != AV_NOPTS_VALUE)
1220 ist->next_dts = decoded_frame->pts;
1221 else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
1222 decoded_frame->pts = pkt->pts;
1223 }
1224
1225 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1226 ist->resample_channels != avctx->channels ||
1227 ist->resample_channel_layout != decoded_frame->channel_layout ||
1228 ist->resample_sample_rate != decoded_frame->sample_rate;
1229 if (resample_changed) {
1230 char layout1[64], layout2[64];
1231
1232 if (!guess_input_channel_layout(ist)) {
1233 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1234 "layout for Input Stream #%d.%d\n", ist->file_index,
1235 ist->st->index);
1236 exit_program(1);
1237 }
1238 decoded_frame->channel_layout = avctx->channel_layout;
1239
1240 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1241 ist->resample_channel_layout);
1242 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1243 decoded_frame->channel_layout);
1244
1245 av_log(NULL, AV_LOG_INFO,
1246 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1247 ist->file_index, ist->st->index,
1248 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
1249 ist->resample_channels, layout1,
1250 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1251 avctx->channels, layout2);
1252
1253 ist->resample_sample_fmt = decoded_frame->format;
1254 ist->resample_sample_rate = decoded_frame->sample_rate;
1255 ist->resample_channel_layout = decoded_frame->channel_layout;
1256 ist->resample_channels = avctx->channels;
1257
1258 for (i = 0; i < ist->nb_filters; i++) {
1259 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
1260 if (err < 0) {
1261 av_log(NULL, AV_LOG_ERROR,
1262 "Error reconfiguring input stream %d:%d filter %d\n",
1263 ist->file_index, ist->st->index, i);
1264 goto fail;
1265 }
1266 }
1267
1268 for (i = 0; i < nb_filtergraphs; i++)
1269 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1270 configure_filtergraph(filtergraphs[i]) < 0) {
1271 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1272 exit_program(1);
1273 }
1274 }
1275
1276 if (decoded_frame->pts != AV_NOPTS_VALUE)
1277 decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1278 ist->st->time_base,
1279 (AVRational){1, avctx->sample_rate});
1280 ist->nb_samples = decoded_frame->nb_samples;
1281 for (i = 0; i < ist->nb_filters; i++) {
1282 if (i < ist->nb_filters - 1) {
1283 f = ist->filter_frame;
1284 err = av_frame_ref(f, decoded_frame);
1285 if (err < 0)
1286 break;
1287 } else
1288 f = decoded_frame;
1289
1290 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1291 if (err < 0)
1292 break;
1293 }
1294
1295 fail:
1296 av_frame_unref(ist->filter_frame);
1297 av_frame_unref(decoded_frame);
1298 return err < 0 ? err : ret;
1299 }
1300
1301 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1302 {
1303 AVFrame *decoded_frame, *f;
1304 int i, ret = 0, err = 0, resample_changed;
1305
1306 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1307 return AVERROR(ENOMEM);
1308 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1309 return AVERROR(ENOMEM);
1310 decoded_frame = ist->decoded_frame;
1311
1312 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
1313 if (!*got_output || ret < 0)
1314 return ret;
1315
1316 ist->frames_decoded++;
1317
1318 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
1319 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
1320 if (err < 0)
1321 goto fail;
1322 }
1323 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
1324
1325 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pts,
1326 decoded_frame->pkt_dts);
1327
1328 if (ist->st->sample_aspect_ratio.num)
1329 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1330
1331 resample_changed = ist->resample_width != decoded_frame->width ||
1332 ist->resample_height != decoded_frame->height ||
1333 ist->resample_pix_fmt != decoded_frame->format;
1334 if (resample_changed) {
1335 av_log(NULL, AV_LOG_INFO,
1336 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1337 ist->file_index, ist->st->index,
1338 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
1339 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1340
1341 ret = poll_filters();
1342 if (ret < 0 && ret != AVERROR_EOF) {
1343 char errbuf[128];
1344 av_strerror(ret, errbuf, sizeof(errbuf));
1345
1346 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
1347 }
1348
1349 ist->resample_width = decoded_frame->width;
1350 ist->resample_height = decoded_frame->height;
1351 ist->resample_pix_fmt = decoded_frame->format;
1352
1353 for (i = 0; i < ist->nb_filters; i++) {
1354 err = ifilter_parameters_from_frame(ist->filters[i], decoded_frame);
1355 if (err < 0) {
1356 av_log(NULL, AV_LOG_ERROR,
1357 "Error reconfiguring input stream %d:%d filter %d\n",
1358 ist->file_index, ist->st->index, i);
1359 goto fail;
1360 }
1361 }
1362
1363 for (i = 0; i < nb_filtergraphs; i++)
1364 if (ist_in_filtergraph(filtergraphs[i], ist) &&
1365 configure_filtergraph(filtergraphs[i]) < 0) {
1366 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1367 exit_program(1);
1368 }
1369 }
1370
1371 for (i = 0; i < ist->nb_filters; i++) {
1372 if (i < ist->nb_filters - 1) {
1373 f = ist->filter_frame;
1374 err = av_frame_ref(f, decoded_frame);
1375 if (err < 0)
1376 break;
1377 } else
1378 f = decoded_frame;
1379
1380 err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
1381 if (err < 0)
1382 break;
1383 }
1384
1385 fail:
1386 av_frame_unref(ist->filter_frame);
1387 av_frame_unref(decoded_frame);
1388 return err < 0 ? err : ret;
1389 }
1390
1391 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
1392 {
1393 AVSubtitle subtitle;
1394 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
1395 &subtitle, got_output, pkt);
1396 if (ret < 0)
1397 return ret;
1398 if (!*got_output)
1399 return ret;
1400
1401 ist->frames_decoded++;
1402
1403 for (i = 0; i < nb_output_streams; i++) {
1404 OutputStream *ost = output_streams[i];
1405
1406 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1407 continue;
1408
1409 do_subtitle_out(output_files[ost->file_index], ost, ist, &subtitle, pkt->pts);
1410 }
1411
1412 avsubtitle_free(&subtitle);
1413 return ret;
1414 }
1415
1416 static int send_filter_eof(InputStream *ist)
1417 {
1418 int i, ret;
1419 for (i = 0; i < ist->nb_filters; i++) {
1420 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1421 if (ret < 0)
1422 return ret;
1423 }
1424 return 0;
1425 }
1426
1427 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1428 static void process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
1429 {
1430 int i;
1431 int repeating = 0;
1432 AVPacket avpkt;
1433
1434 if (ist->next_dts == AV_NOPTS_VALUE)
1435 ist->next_dts = ist->last_dts;
1436
1437 if (!pkt) {
1438 /* EOF handling */
1439 av_init_packet(&avpkt);
1440 avpkt.data = NULL;
1441 avpkt.size = 0;
1442 } else {
1443 avpkt = *pkt;
1444 }
1445
1446 if (pkt && pkt->dts != AV_NOPTS_VALUE)
1447 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1448
1449 // while we have more to decode or while the decoder did output something on EOF
1450 while (ist->decoding_needed && (!pkt || avpkt.size > 0)) {
1451 int ret = 0;
1452 int got_output = 0;
1453
1454 if (!repeating)
1455 ist->last_dts = ist->next_dts;
1456
1457 switch (ist->dec_ctx->codec_type) {
1458 case AVMEDIA_TYPE_AUDIO:
1459 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
1460 break;
1461 case AVMEDIA_TYPE_VIDEO:
1462 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output);
1463 if (repeating && !got_output)
1464 ;
1465 else if (pkt && pkt->duration)
1466 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
1467 else if (ist->st->avg_frame_rate.num)
1468 ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1469 AV_TIME_BASE_Q);
1470 else if (ist->dec_ctx->framerate.num != 0) {
1471 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1472 ist->dec_ctx->ticks_per_frame;
1473 ist->next_dts += av_rescale_q(ticks, ist->dec_ctx->framerate, AV_TIME_BASE_Q);
1474 }
1475 break;
1476 case AVMEDIA_TYPE_SUBTITLE:
1477 if (repeating)
1478 break;
1479 ret = transcode_subtitles(ist, &avpkt, &got_output);
1480 break;
1481 default:
1482 return;
1483 }
1484
1485 if (ret < 0) {
1486 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
1487 ist->file_index, ist->st->index);
1488 if (exit_on_error)
1489 exit_program(1);
1490 break;
1491 }
1492
1493 if (!got_output)
1494 break;
1495
1496 repeating = 1;
1497 }
1498
1499 /* after flushing, send an EOF on all the filter inputs attached to the stream */
1500 /* except when looping we need to flush but not to send an EOF */
1501 if (!pkt && ist->decoding_needed && !no_eof) {
1502 int ret = send_filter_eof(ist);
1503 if (ret < 0) {
1504 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
1505 exit_program(1);
1506 }
1507 }
1508
1509 /* handle stream copy */
1510 if (!ist->decoding_needed) {
1511 ist->last_dts = ist->next_dts;
1512 switch (ist->dec_ctx->codec_type) {
1513 case AVMEDIA_TYPE_AUDIO:
1514 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
1515 ist->dec_ctx->sample_rate;
1516 break;
1517 case AVMEDIA_TYPE_VIDEO:
1518 if (ist->dec_ctx->framerate.num != 0) {
1519 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
1520 ist->next_dts += ((int64_t)AV_TIME_BASE *
1521 ist->dec_ctx->framerate.den * ticks) /
1522 ist->dec_ctx->framerate.num;
1523 }
1524 break;
1525 }
1526 }
1527 for (i = 0; pkt && i < nb_output_streams; i++) {
1528 OutputStream *ost = output_streams[i];
1529
1530 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1531 continue;
1532
1533 do_streamcopy(ist, ost, pkt);
1534 }
1535
1536 return;
1537 }
1538
1539 static void print_sdp(void)
1540 {
1541 char sdp[16384];
1542 int i;
1543 AVFormatContext **avc;
1544
1545 for (i = 0; i < nb_output_files; i++) {
1546 if (!output_files[i]->header_written)
1547 return;
1548 }
1549
1550 avc = av_malloc(sizeof(*avc) * nb_output_files);
1551 if (!avc)
1552 exit_program(1);
1553 for (i = 0; i < nb_output_files; i++)
1554 avc[i] = output_files[i]->ctx;
1555
1556 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1557 printf("SDP:\n%s\n", sdp);
1558 fflush(stdout);
1559 av_freep(&avc);
1560 }
1561
1562 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
1563 {
1564 int i;
1565 for (i = 0; hwaccels[i].name; i++)
1566 if (hwaccels[i].pix_fmt == pix_fmt)
1567 return &hwaccels[i];
1568 return NULL;
1569 }
1570
1571 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
1572 {
1573 InputStream *ist = s->opaque;
1574 const enum AVPixelFormat *p;
1575 int ret;
1576
1577 for (p = pix_fmts; *p != -1; p++) {
1578 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
1579 const HWAccel *hwaccel;
1580
1581 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1582 break;
1583
1584 hwaccel = get_hwaccel(*p);
1585 if (!hwaccel ||
1586 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
1587 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
1588 continue;
1589
1590 ret = hwaccel->init(s);
1591 if (ret < 0) {
1592 if (ist->hwaccel_id == hwaccel->id) {
1593 av_log(NULL, AV_LOG_FATAL,
1594 "%s hwaccel requested for input stream #%d:%d, "
1595 "but cannot be initialized.\n", hwaccel->name,
1596 ist->file_index, ist->st->index);
1597 return AV_PIX_FMT_NONE;
1598 }
1599 continue;
1600 }
1601 ist->active_hwaccel_id = hwaccel->id;
1602 ist->hwaccel_pix_fmt = *p;
1603 break;
1604 }
1605
1606 return *p;
1607 }
1608
1609 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
1610 {
1611 InputStream *ist = s->opaque;
1612
1613 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
1614 return ist->hwaccel_get_buffer(s, frame, flags);
1615
1616 return avcodec_default_get_buffer2(s, frame, flags);
1617 }
1618
1619 static int init_input_stream(int ist_index, char *error, int error_len)
1620 {
1621 int i, ret;
1622 InputStream *ist = input_streams[ist_index];
1623
1624 for (i = 0; i < ist->nb_filters; i++) {
1625 ret = ifilter_parameters_from_decoder(ist->filters[i], ist->dec_ctx);
1626 if (ret < 0) {
1627 av_log(NULL, AV_LOG_FATAL, "Error initializing filter input\n");
1628 return ret;
1629 }
1630 }
1631
1632 if (ist->decoding_needed) {
1633 AVCodec *codec = ist->dec;
1634 if (!codec) {
1635 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1636 ist->dec_ctx->codec_id, ist->file_index, ist->st->index);
1637 return AVERROR(EINVAL);
1638 }
1639
1640 ist->dec_ctx->opaque = ist;
1641 ist->dec_ctx->get_format = get_format;
1642 ist->dec_ctx->get_buffer2 = get_buffer;
1643 ist->dec_ctx->thread_safe_callbacks = 1;
1644
1645 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
1646
1647 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
1648 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
1649 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
1650 char errbuf[128];
1651 if (ret == AVERROR_EXPERIMENTAL)
1652 abort_codec_experimental(codec, 0);
1653
1654 av_strerror(ret, errbuf, sizeof(errbuf));
1655
1656 snprintf(error, error_len,
1657 "Error while opening decoder for input stream "
1658 "#%d:%d : %s",
1659 ist->file_index, ist->st->index, errbuf);
1660 return ret;
1661 }
1662 assert_avoptions(ist->decoder_opts);
1663 }
1664
1665 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1666 ist->next_dts = AV_NOPTS_VALUE;
1667 init_pts_correction(&ist->pts_ctx);
1668
1669 return 0;
1670 }
1671
1672 static InputStream *get_input_stream(OutputStream *ost)
1673 {
1674 if (ost->source_index >= 0)
1675 return input_streams[ost->source_index];
1676
1677 if (ost->filter) {
1678 FilterGraph *fg = ost->filter->graph;
1679 int i;
1680
1681 for (i = 0; i < fg->nb_inputs; i++)
1682 if (fg->inputs[i]->ist->dec_ctx->codec_type == ost->enc_ctx->codec_type)
1683 return fg->inputs[i]->ist;
1684 }
1685
1686 return NULL;
1687 }
1688
1689 /* open the muxer when all the streams are initialized */
1690 static int check_init_output_file(OutputFile *of, int file_index)
1691 {
1692 int ret, i;
1693
1694 for (i = 0; i < of->ctx->nb_streams; i++) {
1695 OutputStream *ost = output_streams[of->ost_index + i];
1696 if (!ost->initialized)
1697 return 0;
1698 }
1699
1700 of->ctx->interrupt_callback = int_cb;
1701
1702 ret = avformat_write_header(of->ctx, &of->opts);
1703 if (ret < 0) {
1704 char errbuf[128];
1705
1706 av_strerror(ret, errbuf, sizeof(errbuf));
1707
1708 av_log(NULL, AV_LOG_ERROR,
1709 "Could not write header for output file #%d "
1710 "(incorrect codec parameters ?): %s",
1711 file_index, errbuf);
1712 return ret;
1713 }
1714 assert_avoptions(of->opts);
1715 of->header_written = 1;
1716
1717 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
1718
1719 if (want_sdp)
1720 print_sdp();
1721
1722 /* flush the muxing queues */
1723 for (i = 0; i < of->ctx->nb_streams; i++) {
1724 OutputStream *ost = output_streams[of->ost_index + i];
1725
1726 while (av_fifo_size(ost->muxing_queue)) {
1727 AVPacket pkt;
1728 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
1729 write_packet(of, &pkt, ost);
1730 }
1731 }
1732
1733 return 0;
1734 }
1735
1736 static int init_output_bsfs(OutputStream *ost)
1737 {
1738 AVBSFContext *ctx;
1739 int i, ret;
1740
1741 if (!ost->nb_bitstream_filters)
1742 return 0;
1743
1744 ost->bsf_ctx = av_mallocz_array(ost->nb_bitstream_filters, sizeof(*ost->bsf_ctx));
1745 if (!ost->bsf_ctx)
1746 return AVERROR(ENOMEM);
1747
1748 for (i = 0; i < ost->nb_bitstream_filters; i++) {
1749 ret = av_bsf_alloc(ost->bitstream_filters[i], &ctx);
1750 if (ret < 0) {
1751 av_log(NULL, AV_LOG_ERROR, "Error allocating a bitstream filter context\n");
1752 return ret;
1753 }
1754 ost->bsf_ctx[i] = ctx;
1755
1756 ret = avcodec_parameters_copy(ctx->par_in,
1757 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
1758 if (ret < 0)
1759 return ret;
1760
1761 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
1762
1763 ret = av_bsf_init(ctx);
1764 if (ret < 0) {
1765 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
1766 ost->bitstream_filters[i]->name);
1767 return ret;
1768 }
1769 }
1770
1771 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
1772 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
1773 if (ret < 0)
1774 return ret;
1775
1776 ost->st->time_base = ctx->time_base_out;
1777
1778 return 0;
1779 }
1780
1781 static int init_output_stream_streamcopy(OutputStream *ost)
1782 {
1783 OutputFile *of = output_files[ost->file_index];
1784 InputStream *ist = get_input_stream(ost);
1785 AVCodecParameters *par_dst = ost->st->codecpar;
1786 AVCodecParameters *par_src = ist->st->codecpar;
1787 AVRational sar;
1788 int i;
1789 uint64_t extra_size;
1790
1791 extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
1792 if (extra_size > INT_MAX) {
1793 return AVERROR(EINVAL);
1794 }
1795
1796 ost->st->disposition = ist->st->disposition;
1797
1798 /* if stream_copy is selected, no need to decode or encode */
1799 par_dst->codec_id = par_src->codec_id;
1800 par_dst->codec_type = par_src->codec_type;
1801
1802 if (!par_dst->codec_tag) {
1803 if (!of->ctx->oformat->codec_tag ||
1804 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
1805 av_codec_get_tag(of->ctx->oformat->codec_tag, par_src->codec_id) <= 0)
1806 par_dst->codec_tag = par_src->codec_tag;
1807 }
1808
1809 par_dst->bit_rate = par_src->bit_rate;
1810 par_dst->field_order = par_src->field_order;
1811 par_dst->chroma_location = par_src->chroma_location;
1812
1813 if (par_src->extradata) {
1814 par_dst->extradata = av_mallocz(extra_size);
1815 if (!par_dst->extradata) {
1816 return AVERROR(ENOMEM);
1817 }
1818 memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
1819 par_dst->extradata_size = par_src->extradata_size;
1820 }
1821
1822 ost->st->time_base = ist->st->time_base;
1823
1824 if (ist->st->nb_side_data) {
1825 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
1826 sizeof(*ist->st->side_data));
1827 if (!ost->st->side_data)
1828 return AVERROR(ENOMEM);
1829
1830 for (i = 0; i < ist->st->nb_side_data; i++) {
1831 const AVPacketSideData *sd_src = &ist->st->side_data[i];
1832 AVPacketSideData *sd_dst = &ost->st->side_data[i];
1833
1834 sd_dst->data = av_malloc(sd_src->size);
1835 if (!sd_dst->data)
1836 return AVERROR(ENOMEM);
1837 memcpy(sd_dst->data, sd_src->data, sd_src->size);
1838 sd_dst->size = sd_src->size;
1839 sd_dst->type = sd_src->type;
1840 ost->st->nb_side_data++;
1841 }
1842 }
1843
1844 ost->parser = av_parser_init(par_dst->codec_id);
1845 ost->parser_avctx = avcodec_alloc_context3(NULL);
1846 if (!ost->parser_avctx)
1847 return AVERROR(ENOMEM);
1848
1849 switch (par_dst->codec_type) {
1850 case AVMEDIA_TYPE_AUDIO:
1851 if (audio_volume != 256) {
1852 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
1853 exit_program(1);
1854 }
1855 par_dst->channel_layout = par_src->channel_layout;
1856 par_dst->sample_rate = par_src->sample_rate;
1857 par_dst->channels = par_src->channels;
1858 par_dst->block_align = par_src->block_align;
1859 break;
1860 case AVMEDIA_TYPE_VIDEO:
1861 par_dst->format = par_src->format;
1862 par_dst->width = par_src->width;
1863 par_dst->height = par_src->height;
1864 if (ost->frame_aspect_ratio)
1865 sar = av_d2q(ost->frame_aspect_ratio * par_dst->height / par_dst->width, 255);
1866 else if (ist->st->sample_aspect_ratio.num)
1867 sar = ist->st->sample_aspect_ratio;
1868 else
1869 sar = par_src->sample_aspect_ratio;
1870 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
1871 break;
1872 case AVMEDIA_TYPE_SUBTITLE:
1873 par_dst->width = par_src->width;
1874 par_dst->height = par_src->height;
1875 break;
1876 case AVMEDIA_TYPE_DATA:
1877 case AVMEDIA_TYPE_ATTACHMENT:
1878 break;
1879 default:
1880 abort();
1881 }
1882
1883 return 0;
1884 }
1885
1886 static void set_encoder_id(OutputFile *of, OutputStream *ost)
1887 {
1888 AVDictionaryEntry *e;
1889
1890 uint8_t *encoder_string;
1891 int encoder_string_len;
1892 int format_flags = 0;
1893
1894 e = av_dict_get(of->opts, "fflags", NULL, 0);
1895 if (e) {
1896 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
1897 if (!o)
1898 return;
1899 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
1900 }
1901
1902 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
1903 encoder_string = av_mallocz(encoder_string_len);
1904 if (!encoder_string)
1905 exit_program(1);
1906
1907 if (!(format_flags & AVFMT_FLAG_BITEXACT))
1908 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
1909 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
1910 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
1911 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
1912 }
1913
1914 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1915 AVCodecContext *avctx)
1916 {
1917 char *p;
1918 int n = 1, i;
1919 int64_t t;
1920
1921 for (p = kf; *p; p++)
1922 if (*p == ',')
1923 n++;
1924 ost->forced_kf_count = n;
1925 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1926 if (!ost->forced_kf_pts) {
1927 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1928 exit_program(1);
1929 }
1930
1931 p = kf;
1932 for (i = 0; i < n; i++) {
1933 char *next = strchr(p, ',');
1934
1935 if (next)
1936 *next++ = 0;
1937
1938 t = parse_time_or_die("force_key_frames", p, 1);
1939 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1940
1941 p = next;
1942 }
1943 }
1944
1945 static int init_output_stream_encode(OutputStream *ost)
1946 {
1947 InputStream *ist = get_input_stream(ost);
1948 AVCodecContext *enc_ctx = ost->enc_ctx;
1949 AVCodecContext *dec_ctx = NULL;
1950
1951 set_encoder_id(output_files[ost->file_index], ost);
1952
1953 if (ist) {
1954 ost->st->disposition = ist->st->disposition;
1955
1956 dec_ctx = ist->dec_ctx;
1957
1958 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
1959 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
1960 }
1961
1962 if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
1963 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
1964 filtergraph_is_simple(ost->filter->graph)) {
1965 FilterGraph *fg = ost->filter->graph;
1966
1967 if (configure_filtergraph(fg)) {
1968 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
1969 exit_program(1);
1970 }
1971 }
1972
1973 switch (enc_ctx->codec_type) {
1974 case AVMEDIA_TYPE_AUDIO:
1975 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
1976 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
1977 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
1978 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
1979 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
1980 break;
1981 case AVMEDIA_TYPE_VIDEO:
1982 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
1983
1984 enc_ctx->width = ost->filter->filter->inputs[0]->w;
1985 enc_ctx->height = ost->filter->filter->inputs[0]->h;
1986 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
1987 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
1988 av_d2q(ost->frame_aspect_ratio * enc_ctx->height/enc_ctx->width, 255) :
1989 ost->filter->filter->inputs[0]->sample_aspect_ratio;
1990 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
1991
1992 ost->st->avg_frame_rate = ost->frame_rate;
1993
1994 if (dec_ctx &&
1995 (enc_ctx->width != dec_ctx->width ||
1996 enc_ctx->height != dec_ctx->height ||
1997 enc_ctx->pix_fmt != dec_ctx->pix_fmt)) {
1998 enc_ctx->bits_per_raw_sample = 0;
1999 }
2000
2001 if (ost->forced_keyframes)
2002 parse_forced_key_frames(ost->forced_keyframes, ost,
2003 ost->enc_ctx);
2004 break;
2005 case AVMEDIA_TYPE_SUBTITLE:
2006 enc_ctx->time_base = (AVRational){1, 1000};
2007 break;
2008 default:
2009 abort();
2010 break;
2011 }
2012
2013 return 0;
2014 }
2015
2016 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2017 {
2018 int ret = 0;
2019
2020 if (ost->encoding_needed) {
2021 AVCodec *codec = ost->enc;
2022 AVCodecContext *dec = NULL;
2023 InputStream *ist;
2024
2025 ret = init_output_stream_encode(ost);
2026 if (ret < 0)
2027 return ret;
2028
2029 if ((ist = get_input_stream(ost)))
2030 dec = ist->dec_ctx;
2031 if (dec && dec->subtitle_header) {
2032 ost->enc_ctx->subtitle_header = av_malloc(dec->subtitle_header_size);
2033 if (!ost->enc_ctx->subtitle_header)
2034 return AVERROR(ENOMEM);
2035 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2036 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2037 }
2038 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2039 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2040
2041 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2042 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2043 if (!ost->enc_ctx->hw_frames_ctx)
2044 return AVERROR(ENOMEM);
2045 }
2046
2047 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2048 if (ret == AVERROR_EXPERIMENTAL)
2049 abort_codec_experimental(codec, 1);
2050 snprintf(error, error_len,
2051 "Error while opening encoder for output stream #%d:%d - "
2052 "maybe incorrect parameters such as bit_rate, rate, width or height",
2053 ost->file_index, ost->index);
2054 return ret;
2055 }
2056 assert_avoptions(ost->encoder_opts);
2057 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2058 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2059 "It takes bits/s as argument, not kbits/s\n");
2060
2061 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
2062 if (ret < 0) {
2063 av_log(NULL, AV_LOG_FATAL,
2064 "Error initializing the output stream codec context.\n");
2065 exit_program(1);
2066 }
2067
2068 if (ost->enc_ctx->nb_coded_side_data) {
2069 int i;
2070
2071 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2072 sizeof(*ost->st->side_data));
2073 if (!ost->st->side_data)
2074 return AVERROR(ENOMEM);
2075
2076 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2077 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2078 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2079
2080 sd_dst->data = av_malloc(sd_src->size);
2081 if (!sd_dst->data)
2082 return AVERROR(ENOMEM);
2083 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2084 sd_dst->size = sd_src->size;
2085 sd_dst->type = sd_src->type;
2086 ost->st->nb_side_data++;
2087 }
2088 }
2089
2090 ost->st->time_base = ost->enc_ctx->time_base;
2091 } else if (ost->stream_copy) {
2092 ret = init_output_stream_streamcopy(ost);
2093 if (ret < 0)
2094 return ret;
2095
2096 /*
2097 * FIXME: will the codec context used by the parser during streamcopy
2098 * This should go away with the new parser API.
2099 */
2100 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
2101 if (ret < 0)
2102 return ret;
2103 }
2104
2105 /* initialize bitstream filters for the output stream
2106 * needs to be done here, because the codec id for streamcopy is not
2107 * known until now */
2108 ret = init_output_bsfs(ost);
2109 if (ret < 0)
2110 return ret;
2111
2112 ost->mux_timebase = ost->st->time_base;
2113
2114 ost->initialized = 1;
2115
2116 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
2117 if (ret < 0)
2118 return ret;
2119
2120 return ret;
2121 }
2122
2123 static int transcode_init(void)
2124 {
2125 int ret = 0, i, j, k;
2126 OutputStream *ost;
2127 InputStream *ist;
2128 char error[1024];
2129
2130 /* init framerate emulation */
2131 for (i = 0; i < nb_input_files; i++) {
2132 InputFile *ifile = input_files[i];
2133 if (ifile->rate_emu)
2134 for (j = 0; j < ifile->nb_streams; j++)
2135 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2136 }
2137
2138 /* init input streams */
2139 for (i = 0; i < nb_input_streams; i++)
2140 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2141 goto dump_format;
2142
2143 /* open each encoder */
2144 for (i = 0; i < nb_output_streams; i++) {
2145 ret = init_output_stream(output_streams[i], error, sizeof(error));
2146 if (ret < 0)
2147 goto dump_format;
2148 }
2149
2150
2151 /* discard unused programs */
2152 for (i = 0; i < nb_input_files; i++) {
2153 InputFile *ifile = input_files[i];
2154 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2155 AVProgram *p = ifile->ctx->programs[j];
2156 int discard = AVDISCARD_ALL;
2157
2158 for (k = 0; k < p->nb_stream_indexes; k++)
2159 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2160 discard = AVDISCARD_DEFAULT;
2161 break;
2162 }
2163 p->discard = discard;
2164 }
2165 }
2166
2167 dump_format:
2168 /* dump the stream mapping */
2169 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2170 for (i = 0; i < nb_input_streams; i++) {
2171 ist = input_streams[i];
2172
2173 for (j = 0; j < ist->nb_filters; j++) {
2174 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
2175 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2176 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2177 ist->filters[j]->name);
2178 if (nb_filtergraphs > 1)
2179 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2180 av_log(NULL, AV_LOG_INFO, "\n");
2181 }
2182 }
2183 }
2184
2185 for (i = 0; i < nb_output_streams; i++) {
2186 ost = output_streams[i];
2187
2188 if (ost->attachment_filename) {
2189 /* an attached file */
2190 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2191 ost->attachment_filename, ost->file_index, ost->index);
2192 continue;
2193 }
2194
2195 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
2196 /* output from a complex graph */
2197 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
2198 if (nb_filtergraphs > 1)
2199 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2200
2201 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2202 ost->index, ost->enc ? ost->enc->name : "?");
2203 continue;
2204 }
2205
2206 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2207 input_streams[ost->source_index]->file_index,
2208 input_streams[ost->source_index]->st->index,
2209 ost->file_index,
2210 ost->index);
2211 if (ost->sync_ist != input_streams[ost->source_index])
2212 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2213 ost->sync_ist->file_index,
2214 ost->sync_ist->st->index);
2215 if (ost->stream_copy)
2216 av_log(NULL, AV_LOG_INFO, " (copy)");
2217 else {
2218 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
2219 const AVCodec *out_codec = ost->enc;
2220 const char *decoder_name = "?";
2221 const char *in_codec_name = "?";
2222 const char *encoder_name = "?";
2223 const char *out_codec_name = "?";
2224 const AVCodecDescriptor *desc;
2225
2226 if (in_codec) {
2227 decoder_name = in_codec->name;
2228 desc = avcodec_descriptor_get(in_codec->id);
2229 if (desc)
2230 in_codec_name = desc->name;
2231 if (!strcmp(decoder_name, in_codec_name))
2232 decoder_name = "native";
2233 }
2234
2235 if (out_codec) {
2236 encoder_name = out_codec->name;
2237 desc = avcodec_descriptor_get(out_codec->id);
2238 if (desc)
2239 out_codec_name = desc->name;
2240 if (!strcmp(encoder_name, out_codec_name))
2241 encoder_name = "native";
2242 }
2243
2244 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
2245 in_codec_name, decoder_name,
2246 out_codec_name, encoder_name);
2247 }
2248 av_log(NULL, AV_LOG_INFO, "\n");
2249 }
2250
2251 if (ret) {
2252 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2253 return ret;
2254 }
2255
2256 return 0;
2257 }
2258
2259 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2260 static int need_output(void)
2261 {
2262 int i;
2263
2264 for (i = 0; i < nb_output_streams; i++) {
2265 OutputStream *ost = output_streams[i];
2266 OutputFile *of = output_files[ost->file_index];
2267 AVFormatContext *os = output_files[ost->file_index]->ctx;
2268
2269 if (ost->finished ||
2270 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2271 continue;
2272 if (ost->frame_number >= ost->max_frames) {
2273 int j;
2274 for (j = 0; j < of->ctx->nb_streams; j++)
2275 output_streams[of->ost_index + j]->finished = 1;
2276 continue;
2277 }
2278
2279 return 1;
2280 }
2281
2282 return 0;
2283 }
2284
2285 static InputFile *select_input_file(void)
2286 {
2287 InputFile *ifile = NULL;
2288 int64_t ipts_min = INT64_MAX;
2289 int i;
2290
2291 for (i = 0; i < nb_input_streams; i++) {
2292 InputStream *ist = input_streams[i];
2293 int64_t ipts = ist->last_dts;
2294
2295 if (ist->discard || input_files[ist->file_index]->eagain)
2296 continue;
2297 if (!input_files[ist->file_index]->eof_reached) {
2298 if (ipts < ipts_min) {
2299 ipts_min = ipts;
2300 ifile = input_files[ist->file_index];
2301 }
2302 }
2303 }
2304
2305 return ifile;
2306 }
2307
2308 #if HAVE_PTHREADS
2309 static void *input_thread(void *arg)
2310 {
2311 InputFile *f = arg;
2312 int ret = 0;
2313
2314 while (!transcoding_finished && ret >= 0) {
2315 AVPacket pkt;
2316 ret = av_read_frame(f->ctx, &pkt);
2317
2318 if (ret == AVERROR(EAGAIN)) {
2319 av_usleep(10000);
2320 ret = 0;
2321 continue;
2322 } else if (ret < 0)
2323 break;
2324
2325 pthread_mutex_lock(&f->fifo_lock);
2326 while (!av_fifo_space(f->fifo))
2327 pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
2328
2329 av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2330
2331 pthread_mutex_unlock(&f->fifo_lock);
2332 }
2333
2334 f->finished = 1;
2335 return NULL;
2336 }
2337
2338 static void free_input_threads(void)
2339 {
2340 int i;
2341
2342 if (nb_input_files == 1)
2343 return;
2344
2345 transcoding_finished = 1;
2346
2347 for (i = 0; i < nb_input_files; i++) {
2348 InputFile *f = input_files[i];
2349 AVPacket pkt;
2350
2351 if (!f->fifo || f->joined)
2352 continue;
2353
2354 pthread_mutex_lock(&f->fifo_lock);
2355 while (av_fifo_size(f->fifo)) {
2356 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2357 av_packet_unref(&pkt);
2358 }
2359 pthread_cond_signal(&f->fifo_cond);
2360 pthread_mutex_unlock(&f->fifo_lock);
2361
2362 pthread_join(f->thread, NULL);
2363 f->joined = 1;
2364
2365 while (av_fifo_size(f->fifo)) {
2366 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2367 av_packet_unref(&pkt);
2368 }
2369 av_fifo_free(f->fifo);
2370 }
2371 }
2372
2373 static int init_input_threads(void)
2374 {
2375 int i, ret;
2376
2377 if (nb_input_files == 1)
2378 return 0;
2379
2380 for (i = 0; i < nb_input_files; i++) {
2381 InputFile *f = input_files[i];
2382
2383 if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2384 return AVERROR(ENOMEM);
2385
2386 pthread_mutex_init(&f->fifo_lock, NULL);
2387 pthread_cond_init (&f->fifo_cond, NULL);
2388
2389 if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2390 return AVERROR(ret);
2391 }
2392 return 0;
2393 }
2394
2395 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2396 {
2397 int ret = 0;
2398
2399 pthread_mutex_lock(&f->fifo_lock);
2400
2401 if (av_fifo_size(f->fifo)) {
2402 av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2403 pthread_cond_signal(&f->fifo_cond);
2404 } else {
2405 if (f->finished)
2406 ret = AVERROR_EOF;
2407 else
2408 ret = AVERROR(EAGAIN);
2409 }
2410
2411 pthread_mutex_unlock(&f->fifo_lock);
2412
2413 return ret;
2414 }
2415 #endif
2416
2417 static int get_input_packet(InputFile *f, AVPacket *pkt)
2418 {
2419 if (f->rate_emu) {
2420 int i;
2421 for (i = 0; i < f->nb_streams; i++) {
2422 InputStream *ist = input_streams[f->ist_index + i];
2423 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2424 int64_t now = av_gettime_relative() - ist->start;
2425 if (pts > now)
2426 return AVERROR(EAGAIN);
2427 }
2428 }
2429
2430 #if HAVE_PTHREADS
2431 if (nb_input_files > 1)
2432 return get_input_packet_mt(f, pkt);
2433 #endif
2434 return av_read_frame(f->ctx, pkt);
2435 }
2436
2437 static int got_eagain(void)
2438 {
2439 int i;
2440 for (i = 0; i < nb_input_files; i++)
2441 if (input_files[i]->eagain)
2442 return 1;
2443 return 0;
2444 }
2445
2446 static void reset_eagain(void)
2447 {
2448 int i;
2449 for (i = 0; i < nb_input_files; i++)
2450 input_files[i]->eagain = 0;
2451 }
2452
2453 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
2454 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
2455 AVRational time_base)
2456 {
2457 int ret;
2458
2459 if (!*duration) {
2460 *duration = tmp;
2461 return tmp_time_base;
2462 }
2463
2464 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
2465 if (ret < 0) {
2466 *duration = tmp;
2467 return tmp_time_base;
2468 }
2469
2470 return time_base;
2471 }
2472
2473 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
2474 {
2475 InputStream *ist;
2476 AVCodecContext *avctx;
2477 int i, ret, has_audio = 0;
2478 int64_t duration = 0;
2479
2480 ret = av_seek_frame(is, -1, is->start_time, 0);
2481 if (ret < 0)
2482 return ret;
2483
2484 for (i = 0; i < ifile->nb_streams; i++) {
2485 ist = input_streams[ifile->ist_index + i];
2486 avctx = ist->dec_ctx;
2487
2488 // flush decoders
2489 if (ist->decoding_needed) {
2490 process_input_packet(ist, NULL, 1);
2491 avcodec_flush_buffers(avctx);
2492 }
2493
2494 /* duration is the length of the last frame in a stream
2495 * when audio stream is present we don't care about
2496 * last video frame length because it's not defined exactly */
2497 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
2498 has_audio = 1;
2499 }
2500
2501 for (i = 0; i < ifile->nb_streams; i++) {
2502 ist = input_streams[ifile->ist_index + i];
2503 avctx = ist->dec_ctx;
2504
2505 if (has_audio) {
2506 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
2507 AVRational sample_rate = {1, avctx->sample_rate};
2508
2509 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
2510 } else
2511 continue;
2512 } else {
2513 if (ist->framerate.num) {
2514 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
2515 } else if (ist->st->avg_frame_rate.num) {
2516 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
2517 } else duration = 1;
2518 }
2519 if (!ifile->duration)
2520 ifile->time_base = ist->st->time_base;
2521 /* the total duration of the stream, max_pts - min_pts is
2522 * the duration of the stream without the last frame */
2523 duration += ist->max_pts - ist->min_pts;
2524 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
2525 ifile->time_base);
2526 }
2527
2528 if (ifile->loop > 0)
2529 ifile->loop--;
2530
2531 return ret;
2532 }
2533
2534 /*
2535 * Read one packet from an input file and send it for
2536 * - decoding -> lavfi (audio/video)
2537 * - decoding -> encoding -> muxing (subtitles)
2538 * - muxing (streamcopy)
2539 *
2540 * Return
2541 * - 0 -- one packet was read and processed
2542 * - AVERROR(EAGAIN) -- no packets were available for selected file,
2543 * this function should be called again
2544 * - AVERROR_EOF -- this function should not be called again
2545 */
2546 static int process_input(void)
2547 {
2548 InputFile *ifile;
2549 AVFormatContext *is;
2550 InputStream *ist;
2551 AVPacket pkt;
2552 int ret, i, j;
2553 int64_t duration;
2554
2555 /* select the stream that we must read now */
2556 ifile = select_input_file();
2557 /* if none, if is finished */
2558 if (!ifile) {
2559 if (got_eagain()) {
2560 reset_eagain();
2561 av_usleep(10000);
2562 return AVERROR(EAGAIN);
2563 }
2564 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
2565 return AVERROR_EOF;
2566 }
2567
2568 is = ifile->ctx;
2569 ret = get_input_packet(ifile, &pkt);
2570
2571 if (ret == AVERROR(EAGAIN)) {
2572 ifile->eagain = 1;
2573 return ret;
2574 }
2575 if (ret < 0 && ifile->loop) {
2576 if ((ret = seek_to_start(ifile, is)) < 0)
2577 return ret;
2578 ret = get_input_packet(ifile, &pkt);
2579 }
2580 if (ret < 0) {
2581 if (ret != AVERROR_EOF) {
2582 print_error(is->filename, ret);
2583 if (exit_on_error)
2584 exit_program(1);
2585 }
2586 ifile->eof_reached = 1;
2587
2588 for (i = 0; i < ifile->nb_streams; i++) {
2589 ist = input_streams[ifile->ist_index + i];
2590 if (ist->decoding_needed)
2591 process_input_packet(ist, NULL, 0);
2592
2593 /* mark all outputs that don't go through lavfi as finished */
2594 for (j = 0; j < nb_output_streams; j++) {
2595 OutputStream *ost = output_streams[j];
2596
2597 if (ost->source_index == ifile->ist_index + i &&
2598 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2599 finish_output_stream(ost);
2600 }
2601 }
2602
2603 return AVERROR(EAGAIN);
2604 }
2605
2606 reset_eagain();
2607
2608 if (do_pkt_dump) {
2609 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2610 is->streams[pkt.stream_index]);
2611 }
2612 /* the following test is needed in case new streams appear
2613 dynamically in stream : we ignore them */
2614 if (pkt.stream_index >= ifile->nb_streams)
2615 goto discard_packet;
2616
2617 ist = input_streams[ifile->ist_index + pkt.stream_index];
2618
2619 ist->data_size += pkt.size;
2620 ist->nb_packets++;
2621
2622 if (ist->discard)
2623 goto discard_packet;
2624
2625 /* add the stream-global side data to the first packet */
2626 if (ist->nb_packets == 1)
2627 for (i = 0; i < ist->st->nb_side_data; i++) {
2628 AVPacketSideData *src_sd = &ist->st->side_data[i];
2629 uint8_t *dst_data;
2630
2631 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
2632 continue;
2633 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
2634 continue;
2635
2636 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
2637 if (!dst_data)
2638 exit_program(1);
2639
2640 memcpy(dst_data, src_sd->data, src_sd->size);
2641 }
2642
2643 if (pkt.dts != AV_NOPTS_VALUE)
2644 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2645 if (pkt.pts != AV_NOPTS_VALUE)
2646 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2647
2648 if (pkt.pts != AV_NOPTS_VALUE)
2649 pkt.pts *= ist->ts_scale;
2650 if (pkt.dts != AV_NOPTS_VALUE)
2651 pkt.dts *= ist->ts_scale;
2652
2653 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2654 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
2655 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2656 (is->iformat->flags & AVFMT_TS_DISCONT)) {
2657 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2658 int64_t delta = pkt_dts - ist->next_dts;
2659
2660 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2661 ifile->ts_offset -= delta;
2662 av_log(NULL, AV_LOG_DEBUG,
2663 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2664 delta, ifile->ts_offset);
2665 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2666 if (pkt.pts != AV_NOPTS_VALUE)
2667 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2668 }
2669 }
2670 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
2671 if (pkt.pts != AV_NOPTS_VALUE) {
2672 pkt.pts += duration;
2673 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
2674 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
2675 }
2676
2677 if (pkt.dts != AV_NOPTS_VALUE)
2678 pkt.dts += duration;
2679
2680 process_input_packet(ist, &pkt, 0);
2681
2682 discard_packet:
2683 av_packet_unref(&pkt);
2684
2685 return 0;
2686 }
2687
2688 /*
2689 * The following code is the main loop of the file converter
2690 */
2691 static int transcode(void)
2692 {
2693 int ret, i, need_input = 1;
2694 AVFormatContext *os;
2695 OutputStream *ost;
2696 InputStream *ist;
2697 int64_t timer_start;
2698
2699 ret = transcode_init();
2700 if (ret < 0)
2701 goto fail;
2702
2703 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2704 term_init();
2705
2706 timer_start = av_gettime_relative();
2707
2708 #if HAVE_PTHREADS
2709 if ((ret = init_input_threads()) < 0)
2710 goto fail;
2711 #endif
2712
2713 while (!received_sigterm) {
2714 /* check if there's any stream where output is still needed */
2715 if (!need_output()) {
2716 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2717 break;
2718 }
2719
2720 /* read and process one input packet if needed */
2721 if (need_input) {
2722 ret = process_input();
2723 if (ret == AVERROR_EOF)
2724 need_input = 0;
2725 }
2726
2727 ret = poll_filters();
2728 if (ret < 0 && ret != AVERROR_EOF) {
2729 char errbuf[128];
2730 av_strerror(ret, errbuf, sizeof(errbuf));
2731
2732 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2733 break;
2734 }
2735
2736 /* dump report by using the output first video and audio streams */
2737 print_report(0, timer_start);
2738 }
2739 #if HAVE_PTHREADS
2740 free_input_threads();
2741 #endif
2742
2743 /* at the end of stream, we must flush the decoder buffers */
2744 for (i = 0; i < nb_input_streams; i++) {
2745 ist = input_streams[i];
2746 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
2747 process_input_packet(ist, NULL, 0);
2748 }
2749 }
2750 poll_filters();
2751 flush_encoders();
2752
2753 term_exit();
2754
2755 /* write the trailer if needed and close file */
2756 for (i = 0; i < nb_output_files; i++) {
2757 os = output_files[i]->ctx;
2758 if (!output_files[i]->header_written) {
2759 av_log(NULL, AV_LOG_ERROR,
2760 "Nothing was written into output file %d (%s), because "
2761 "at least one of its streams received no packets.\n",
2762 i, os->filename);
2763 continue;
2764 }
2765 av_write_trailer(os);
2766 }
2767
2768 /* dump report by using the first video and audio streams */
2769 print_report(1, timer_start);
2770
2771 /* close each encoder */
2772 for (i = 0; i < nb_output_streams; i++) {
2773 ost = output_streams[i];
2774 if (ost->encoding_needed) {
2775 av_freep(&ost->enc_ctx->stats_in);
2776 }
2777 }
2778
2779 /* close each decoder */
2780 for (i = 0; i < nb_input_streams; i++) {
2781 ist = input_streams[i];
2782 if (ist->decoding_needed) {
2783 avcodec_close(ist->dec_ctx);
2784 if (ist->hwaccel_uninit)
2785 ist->hwaccel_uninit(ist->dec_ctx);
2786 }
2787 }
2788
2789 av_buffer_unref(&hw_device_ctx);
2790
2791 /* finished ! */
2792 ret = 0;
2793
2794 fail:
2795 #if HAVE_PTHREADS
2796 free_input_threads();
2797 #endif
2798
2799 if (output_streams) {
2800 for (i = 0; i < nb_output_streams; i++) {
2801 ost = output_streams[i];
2802 if (ost) {
2803 if (ost->logfile) {
2804 fclose(ost->logfile);
2805 ost->logfile = NULL;
2806 }
2807 av_free(ost->forced_kf_pts);
2808 av_dict_free(&ost->encoder_opts);
2809 av_dict_free(&ost->resample_opts);
2810 }
2811 }
2812 }
2813 return ret;
2814 }
2815
2816 static int64_t getutime(void)
2817 {
2818 #if HAVE_GETRUSAGE
2819 struct rusage rusage;
2820
2821 getrusage(RUSAGE_SELF, &rusage);
2822 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2823 #elif HAVE_GETPROCESSTIMES
2824 HANDLE proc;
2825 FILETIME c, e, k, u;
2826 proc = GetCurrentProcess();
2827 GetProcessTimes(proc, &c, &e, &k, &u);
2828 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2829 #else
2830 return av_gettime_relative();
2831 #endif
2832 }
2833
2834 static int64_t getmaxrss(void)
2835 {
2836 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2837 struct rusage rusage;
2838 getrusage(RUSAGE_SELF, &rusage);
2839 return (int64_t)rusage.ru_maxrss * 1024;
2840 #elif HAVE_GETPROCESSMEMORYINFO
2841 HANDLE proc;
2842 PROCESS_MEMORY_COUNTERS memcounters;
2843 proc = GetCurrentProcess();
2844 memcounters.cb = sizeof(memcounters);
2845 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2846 return memcounters.PeakPagefileUsage;
2847 #else
2848 return 0;
2849 #endif
2850 }
2851
2852 int main(int argc, char **argv)
2853 {
2854 int i, ret;
2855 int64_t ti;
2856
2857 register_exit(avconv_cleanup);
2858
2859 av_log_set_flags(AV_LOG_SKIP_REPEATED);
2860 parse_loglevel(argc, argv, options);
2861
2862 avcodec_register_all();
2863 #if CONFIG_AVDEVICE
2864 avdevice_register_all();
2865 #endif
2866 avfilter_register_all();
2867 av_register_all();
2868 avformat_network_init();
2869
2870 show_banner();
2871
2872 /* parse options and open all input/output files */
2873 ret = avconv_parse_options(argc, argv);
2874 if (ret < 0)
2875 exit_program(1);
2876
2877 if (nb_output_files <= 0 && nb_input_files == 0) {
2878 show_usage();
2879 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2880 exit_program(1);
2881 }
2882
2883 /* file converter / grab */
2884 if (nb_output_files <= 0) {
2885 fprintf(stderr, "At least one output file must be specified\n");
2886 exit_program(1);
2887 }
2888
2889 for (i = 0; i < nb_output_files; i++) {
2890 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
2891 want_sdp = 0;
2892 }
2893
2894 ti = getutime();
2895 if (transcode() < 0)
2896 exit_program(1);
2897 ti = getutime() - ti;
2898 if (do_benchmark) {
2899 int maxrss = getmaxrss() / 1024;
2900 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
2901 }
2902
2903 exit_program(0);
2904 return 0;
2905 }