avconv: Flush output BSFs when stream copy reaches EOF
[libav.git] / avtools / avconv.c
1 /*
2 * avconv main
3 * Copyright (c) 2000-2011 The Libav developers
4 *
5 * This file is part of Libav.
6 *
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include <stdint.h>
31
32 #include "libavformat/avformat.h"
33 #include "libavdevice/avdevice.h"
34 #include "libswscale/swscale.h"
35 #include "libavresample/avresample.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/channel_layout.h"
38 #include "libavutil/parseutils.h"
39 #include "libavutil/samplefmt.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/hwcontext.h"
42 #include "libavutil/internal.h"
43 #include "libavutil/intreadwrite.h"
44 #include "libavutil/dict.h"
45 #include "libavutil/mathematics.h"
46 #include "libavutil/pixdesc.h"
47 #include "libavutil/avstring.h"
48 #include "libavutil/libm.h"
49 #include "libavutil/imgutils.h"
50 #include "libavutil/time.h"
51 #include "libavformat/os_support.h"
52
53 # include "libavfilter/avfilter.h"
54 # include "libavfilter/buffersrc.h"
55 # include "libavfilter/buffersink.h"
56
57 #if HAVE_SYS_RESOURCE_H
58 #include <sys/time.h>
59 #include <sys/types.h>
60 #include <sys/resource.h>
61 #elif HAVE_GETPROCESSTIMES
62 #include <windows.h>
63 #endif
64 #if HAVE_GETPROCESSMEMORYINFO
65 #include <windows.h>
66 #include <psapi.h>
67 #endif
68
69 #if HAVE_SYS_SELECT_H
70 #include <sys/select.h>
71 #endif
72
73 #if HAVE_PTHREADS
74 #include <pthread.h>
75 #endif
76
77 #include <time.h>
78
79 #include "avconv.h"
80 #include "cmdutils.h"
81
82 #include "libavutil/avassert.h"
83
84 const char program_name[] = "avconv";
85 const int program_birth_year = 2000;
86
87 static FILE *vstats_file;
88
89 static int nb_frames_drop = 0;
90
91 static int want_sdp = 1;
92
93 #if HAVE_PTHREADS
94 /* signal to input threads that they should exit; set by the main thread */
95 static int transcoding_finished;
96 #endif
97
98 InputStream **input_streams = NULL;
99 int nb_input_streams = 0;
100 InputFile **input_files = NULL;
101 int nb_input_files = 0;
102
103 OutputStream **output_streams = NULL;
104 int nb_output_streams = 0;
105 OutputFile **output_files = NULL;
106 int nb_output_files = 0;
107
108 FilterGraph **filtergraphs;
109 int nb_filtergraphs;
110
111 static void term_exit(void)
112 {
113 av_log(NULL, AV_LOG_QUIET, "");
114 }
115
116 static volatile int received_sigterm = 0;
117 static volatile int received_nb_signals = 0;
118
119 static void
120 sigterm_handler(int sig)
121 {
122 received_sigterm = sig;
123 received_nb_signals++;
124 term_exit();
125 }
126
127 static void term_init(void)
128 {
129 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
130 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
131 #ifdef SIGXCPU
132 signal(SIGXCPU, sigterm_handler);
133 #endif
134 }
135
136 static int decode_interrupt_cb(void *ctx)
137 {
138 return received_nb_signals > 1;
139 }
140
141 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
142
143 static void avconv_cleanup(int ret)
144 {
145 int i, j;
146
147 for (i = 0; i < nb_filtergraphs; i++) {
148 FilterGraph *fg = filtergraphs[i];
149 avfilter_graph_free(&fg->graph);
150 for (j = 0; j < fg->nb_inputs; j++) {
151 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
152 AVFrame *frame;
153 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
154 sizeof(frame), NULL);
155 av_frame_free(&frame);
156 }
157 av_fifo_free(fg->inputs[j]->frame_queue);
158 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
159 av_freep(&fg->inputs[j]->name);
160 av_freep(&fg->inputs[j]);
161 }
162 av_freep(&fg->inputs);
163 for (j = 0; j < fg->nb_outputs; j++) {
164 av_freep(&fg->outputs[j]->name);
165 av_freep(&fg->outputs[j]->formats);
166 av_freep(&fg->outputs[j]->channel_layouts);
167 av_freep(&fg->outputs[j]->sample_rates);
168 av_freep(&fg->outputs[j]);
169 }
170 av_freep(&fg->outputs);
171 av_freep(&fg->graph_desc);
172
173 av_freep(&filtergraphs[i]);
174 }
175 av_freep(&filtergraphs);
176
177 /* close files */
178 for (i = 0; i < nb_output_files; i++) {
179 OutputFile *of = output_files[i];
180 AVFormatContext *s = of->ctx;
181 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
182 avio_close(s->pb);
183 avformat_free_context(s);
184 av_dict_free(&of->opts);
185
186 av_freep(&output_files[i]);
187 }
188 for (i = 0; i < nb_output_streams; i++) {
189 OutputStream *ost = output_streams[i];
190
191 for (j = 0; j < ost->nb_bitstream_filters; j++)
192 av_bsf_free(&ost->bsf_ctx[j]);
193 av_freep(&ost->bsf_ctx);
194
195 av_frame_free(&ost->filtered_frame);
196
197 av_parser_close(ost->parser);
198 avcodec_free_context(&ost->parser_avctx);
199
200 av_freep(&ost->forced_keyframes);
201 av_freep(&ost->avfilter);
202 av_freep(&ost->logfile_prefix);
203
204 avcodec_free_context(&ost->enc_ctx);
205
206 if (ost->muxing_queue) {
207 while (av_fifo_size(ost->muxing_queue)) {
208 AVPacket pkt;
209 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
210 av_packet_unref(&pkt);
211 }
212 av_fifo_free(ost->muxing_queue);
213 }
214 av_freep(&output_streams[i]);
215 }
216 for (i = 0; i < nb_input_files; i++) {
217 avformat_close_input(&input_files[i]->ctx);
218 av_freep(&input_files[i]);
219 }
220 for (i = 0; i < nb_input_streams; i++) {
221 InputStream *ist = input_streams[i];
222
223 av_frame_free(&ist->decoded_frame);
224 av_frame_free(&ist->filter_frame);
225 av_dict_free(&ist->decoder_opts);
226 av_freep(&ist->filters);
227 av_freep(&ist->hwaccel_device);
228
229 avcodec_free_context(&ist->dec_ctx);
230
231 av_freep(&input_streams[i]);
232 }
233
234 if (vstats_file)
235 fclose(vstats_file);
236 av_free(vstats_filename);
237
238 av_freep(&input_streams);
239 av_freep(&input_files);
240 av_freep(&output_streams);
241 av_freep(&output_files);
242
243 uninit_opts();
244
245 avformat_network_deinit();
246
247 if (received_sigterm) {
248 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
249 (int) received_sigterm);
250 exit (255);
251 }
252 }
253
254 void assert_avoptions(AVDictionary *m)
255 {
256 AVDictionaryEntry *t;
257 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
258 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
259 exit_program(1);
260 }
261 }
262
263 static void abort_codec_experimental(AVCodec *c, int encoder)
264 {
265 const char *codec_string = encoder ? "encoder" : "decoder";
266 AVCodec *codec;
267 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
268 "results.\nAdd '-strict experimental' if you want to use it.\n",
269 codec_string, c->name);
270 codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
271 if (!(codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL))
272 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
273 codec_string, codec->name);
274 exit_program(1);
275 }
276
277 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
278 {
279 AVFormatContext *s = of->ctx;
280 AVStream *st = ost->st;
281 int ret;
282
283 if (!of->header_written) {
284 AVPacket tmp_pkt;
285 /* the muxer is not initialized yet, buffer the packet */
286 if (!av_fifo_space(ost->muxing_queue)) {
287 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
288 ost->max_muxing_queue_size);
289 if (new_size <= av_fifo_size(ost->muxing_queue)) {
290 av_log(NULL, AV_LOG_ERROR,
291 "Too many packets buffered for output stream %d:%d.\n",
292 ost->file_index, ost->st->index);
293 exit_program(1);
294 }
295 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
296 if (ret < 0)
297 exit_program(1);
298 }
299 av_packet_move_ref(&tmp_pkt, pkt);
300 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
301 return;
302 }
303
304 /*
305 * Audio encoders may split the packets -- #frames in != #packets out.
306 * But there is no reordering, so we can limit the number of output packets
307 * by simply dropping them here.
308 * Counting encoded video frames needs to be done separately because of
309 * reordering, see do_video_out()
310 */
311 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
312 if (ost->frame_number >= ost->max_frames) {
313 av_packet_unref(pkt);
314 return;
315 }
316 ost->frame_number++;
317 }
318 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
319 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR,
320 NULL);
321 ost->quality = sd ? *(int *)sd : -1;
322
323 if (ost->frame_rate.num) {
324 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
325 ost->mux_timebase);
326 }
327 }
328
329 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
330
331 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
332 ost->last_mux_dts != AV_NOPTS_VALUE &&
333 pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
334 av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
335 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
336 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
337 if (exit_on_error) {
338 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
339 exit_program(1);
340 }
341 av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
342 "in incorrect timestamps in the output file.\n",
343 ost->last_mux_dts + 1);
344 pkt->dts = ost->last_mux_dts + 1;
345 if (pkt->pts != AV_NOPTS_VALUE)
346 pkt->pts = FFMAX(pkt->pts, pkt->dts);
347 }
348 ost->last_mux_dts = pkt->dts;
349
350 ost->data_size += pkt->size;
351 ost->packets_written++;
352
353 pkt->stream_index = ost->index;
354
355 ret = av_interleaved_write_frame(s, pkt);
356 if (ret < 0) {
357 print_error("av_interleaved_write_frame()", ret);
358 exit_program(1);
359 }
360 }
361
362 static void output_packet(OutputFile *of, AVPacket *pkt,
363 OutputStream *ost, int eof)
364 {
365 int ret = 0;
366
367 /* apply the output bitstream filters, if any */
368 if (ost->nb_bitstream_filters) {
369 int idx;
370
371 ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
372 if (ret < 0)
373 goto finish;
374
375 eof = 0;
376 idx = 1;
377 while (idx) {
378 /* get a packet from the previous filter up the chain */
379 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
380 if (ret == AVERROR(EAGAIN)) {
381 ret = 0;
382 idx--;
383 continue;
384 } else if (ret == AVERROR_EOF) {
385 eof = 1;
386 } else if (ret < 0)
387 goto finish;
388
389 /* send it to the next filter down the chain or to the muxer */
390 if (idx < ost->nb_bitstream_filters) {
391 ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
392 if (ret < 0)
393 goto finish;
394 idx++;
395 eof = 0;
396 } else if (eof)
397 goto finish;
398 else
399 write_packet(of, pkt, ost);
400 }
401 } else if (!eof)
402 write_packet(of, pkt, ost);
403
404 finish:
405 if (ret < 0 && ret != AVERROR_EOF) {
406 av_log(NULL, AV_LOG_FATAL, "Error applying bitstream filters to an output "
407 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
408 exit_program(1);
409 }
410 }
411
412 static int check_recording_time(OutputStream *ost)
413 {
414 OutputFile *of = output_files[ost->file_index];
415
416 if (of->recording_time != INT64_MAX &&
417 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
418 AV_TIME_BASE_Q) >= 0) {
419 ost->finished = 1;
420 return 0;
421 }
422 return 1;
423 }
424
425 static void do_audio_out(OutputFile *of, OutputStream *ost,
426 AVFrame *frame)
427 {
428 AVCodecContext *enc = ost->enc_ctx;
429 AVPacket pkt;
430 int ret;
431
432 av_init_packet(&pkt);
433 pkt.data = NULL;
434 pkt.size = 0;
435
436 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
437 frame->pts = ost->sync_opts;
438 ost->sync_opts = frame->pts + frame->nb_samples;
439
440 ost->samples_encoded += frame->nb_samples;
441 ost->frames_encoded++;
442
443 ret = avcodec_send_frame(enc, frame);
444 if (ret < 0)
445 goto error;
446
447 while (1) {
448 ret = avcodec_receive_packet(enc, &pkt);
449 if (ret == AVERROR(EAGAIN))
450 break;
451 if (ret < 0)
452 goto error;
453
454 output_packet(of, &pkt, ost, 0);
455 }
456
457 return;
458 error:
459 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
460 exit_program(1);
461 }
462
463 static void do_subtitle_out(OutputFile *of,
464 OutputStream *ost,
465 InputStream *ist,
466 AVSubtitle *sub,
467 int64_t pts)
468 {
469 static uint8_t *subtitle_out = NULL;
470 int subtitle_out_max_size = 1024 * 1024;
471 int subtitle_out_size, nb, i;
472 AVCodecContext *enc;
473 AVPacket pkt;
474
475 if (pts == AV_NOPTS_VALUE) {
476 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
477 if (exit_on_error)
478 exit_program(1);
479 return;
480 }
481
482 enc = ost->enc_ctx;
483
484 if (!subtitle_out) {
485 subtitle_out = av_malloc(subtitle_out_max_size);
486 }
487
488 /* Note: DVB subtitle need one packet to draw them and one other
489 packet to clear them */
490 /* XXX: signal it in the codec context ? */
491 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
492 nb = 2;
493 else
494 nb = 1;
495
496 for (i = 0; i < nb; i++) {
497 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
498 if (!check_recording_time(ost))
499 return;
500
501 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
502 // start_display_time is required to be 0
503 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
504 sub->end_display_time -= sub->start_display_time;
505 sub->start_display_time = 0;
506
507 ost->frames_encoded++;
508
509 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
510 subtitle_out_max_size, sub);
511 if (subtitle_out_size < 0) {
512 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
513 exit_program(1);
514 }
515
516 av_init_packet(&pkt);
517 pkt.data = subtitle_out;
518 pkt.size = subtitle_out_size;
519 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
520 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
521 /* XXX: the pts correction is handled here. Maybe handling
522 it in the codec would be better */
523 if (i == 0)
524 pkt.pts += 90 * sub->start_display_time;
525 else
526 pkt.pts += 90 * sub->end_display_time;
527 }
528 output_packet(of, &pkt, ost, 0);
529 }
530 }
531
532 static void do_video_out(OutputFile *of,
533 OutputStream *ost,
534 AVFrame *in_picture,
535 int *frame_size)
536 {
537 int ret, format_video_sync;
538 AVPacket pkt;
539 AVCodecContext *enc = ost->enc_ctx;
540
541 *frame_size = 0;
542
543 format_video_sync = video_sync_method;
544 if (format_video_sync == VSYNC_AUTO)
545 format_video_sync = (of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
546 (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
547 if (format_video_sync != VSYNC_PASSTHROUGH &&
548 ost->frame_number &&
549 in_picture->pts != AV_NOPTS_VALUE &&
550 in_picture->pts < ost->sync_opts) {
551 nb_frames_drop++;
552 av_log(NULL, AV_LOG_WARNING,
553 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
554 ost->frame_number, ost->st->index, in_picture->pts);
555 return;
556 }
557
558 if (in_picture->pts == AV_NOPTS_VALUE)
559 in_picture->pts = ost->sync_opts;
560 ost->sync_opts = in_picture->pts;
561
562
563 if (!ost->frame_number)
564 ost->first_pts = in_picture->pts;
565
566 av_init_packet(&pkt);
567 pkt.data = NULL;
568 pkt.size = 0;
569
570 if (ost->frame_number >= ost->max_frames)
571 return;
572
573 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
574 ost->top_field_first >= 0)
575 in_picture->top_field_first = !!ost->top_field_first;
576
577 in_picture->quality = enc->global_quality;
578 in_picture->pict_type = 0;
579 if (ost->forced_kf_index < ost->forced_kf_count &&
580 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
581 in_picture->pict_type = AV_PICTURE_TYPE_I;
582 ost->forced_kf_index++;
583 }
584
585 ost->frames_encoded++;
586
587 ret = avcodec_send_frame(enc, in_picture);
588 if (ret < 0)
589 goto error;
590
591 /*
592 * For video, there may be reordering, so we can't throw away frames on
593 * encoder flush, we need to limit them here, before they go into encoder.
594 */
595 ost->frame_number++;
596
597 while (1) {
598 ret = avcodec_receive_packet(enc, &pkt);
599 if (ret == AVERROR(EAGAIN))
600 break;
601 if (ret < 0)
602 goto error;
603
604 output_packet(of, &pkt, ost, 0);
605 *frame_size = pkt.size;
606
607 /* if two pass, output log */
608 if (ost->logfile && enc->stats_out) {
609 fprintf(ost->logfile, "%s", enc->stats_out);
610 }
611
612 ost->sync_opts++;
613 }
614
615 return;
616 error:
617 av_assert0(ret != AVERROR(EAGAIN) && ret != AVERROR_EOF);
618 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
619 exit_program(1);
620 }
621
622 #if FF_API_CODED_FRAME && FF_API_ERROR_FRAME
623 static double psnr(double d)
624 {
625 return -10.0 * log(d) / log(10.0);
626 }
627 #endif
628
629 static void do_video_stats(OutputStream *ost, int frame_size)
630 {
631 AVCodecContext *enc;
632 int frame_number;
633 double ti1, bitrate, avg_bitrate;
634
635 /* this is executed just the first time do_video_stats is called */
636 if (!vstats_file) {
637 vstats_file = fopen(vstats_filename, "w");
638 if (!vstats_file) {
639 perror("fopen");
640 exit_program(1);
641 }
642 }
643
644 enc = ost->enc_ctx;
645 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
646 frame_number = ost->frame_number;
647 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
648 ost->quality / (float)FF_QP2LAMBDA);
649
650 #if FF_API_CODED_FRAME && FF_API_ERROR_FRAME
651 FF_DISABLE_DEPRECATION_WARNINGS
652 if (enc->flags & AV_CODEC_FLAG_PSNR)
653 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
654 FF_ENABLE_DEPRECATION_WARNINGS
655 #endif
656
657 fprintf(vstats_file,"f_size= %6d ", frame_size);
658 /* compute pts value */
659 ti1 = ost->sync_opts * av_q2d(enc->time_base);
660 if (ti1 < 0.01)
661 ti1 = 0.01;
662
663 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
664 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
665 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
666 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
667 #if FF_API_CODED_FRAME
668 FF_DISABLE_DEPRECATION_WARNINGS
669 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
670 FF_ENABLE_DEPRECATION_WARNINGS
671 #endif
672 }
673 }
674
675 static int init_output_stream(OutputStream *ost, char *error, int error_len);
676
677 /*
678 * Read one frame for lavfi output for ost and encode it.
679 */
680 static int poll_filter(OutputStream *ost)
681 {
682 OutputFile *of = output_files[ost->file_index];
683 AVFrame *filtered_frame = NULL;
684 int frame_size, ret;
685
686 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
687 return AVERROR(ENOMEM);
688 }
689 filtered_frame = ost->filtered_frame;
690
691 if (!ost->initialized) {
692 char error[1024];
693 ret = init_output_stream(ost, error, sizeof(error));
694 if (ret < 0) {
695 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
696 ost->file_index, ost->index, error);
697 exit_program(1);
698 }
699 }
700
701 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
702 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
703 ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
704 ost->enc_ctx->frame_size);
705 else
706 ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
707
708 if (ret < 0)
709 return ret;
710
711 if (filtered_frame->pts != AV_NOPTS_VALUE) {
712 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
713 filtered_frame->pts = av_rescale_q(filtered_frame->pts,
714 ost->filter->filter->inputs[0]->time_base,
715 ost->enc_ctx->time_base) -
716 av_rescale_q(start_time,
717 AV_TIME_BASE_Q,
718 ost->enc_ctx->time_base);
719 }
720
721 switch (ost->filter->filter->inputs[0]->type) {
722 case AVMEDIA_TYPE_VIDEO:
723 if (!ost->frame_aspect_ratio)
724 ost->enc_ctx->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
725
726 do_video_out(of, ost, filtered_frame, &frame_size);
727 if (vstats_filename && frame_size)
728 do_video_stats(ost, frame_size);
729 break;
730 case AVMEDIA_TYPE_AUDIO:
731 do_audio_out(of, ost, filtered_frame);
732 break;
733 default:
734 // TODO support subtitle filters
735 av_assert0(0);
736 }
737
738 av_frame_unref(filtered_frame);
739
740 return 0;
741 }
742
743 static void finish_output_stream(OutputStream *ost)
744 {
745 OutputFile *of = output_files[ost->file_index];
746 int i;
747
748 ost->finished = 1;
749
750 if (of->shortest) {
751 for (i = 0; i < of->ctx->nb_streams; i++)
752 output_streams[of->ost_index + i]->finished = 1;
753 }
754 }
755
756 /*
757 * Read as many frames from possible from lavfi and encode them.
758 *
759 * Always read from the active stream with the lowest timestamp. If no frames
760 * are available for it then return EAGAIN and wait for more input. This way we
761 * can use lavfi sources that generate unlimited amount of frames without memory
762 * usage exploding.
763 */
764 static int poll_filters(void)
765 {
766 int i, ret = 0;
767
768 while (ret >= 0 && !received_sigterm) {
769 OutputStream *ost = NULL;
770 int64_t min_pts = INT64_MAX;
771
772 /* choose output stream with the lowest timestamp */
773 for (i = 0; i < nb_output_streams; i++) {
774 int64_t pts = output_streams[i]->sync_opts;
775
776 if (output_streams[i]->filter && !output_streams[i]->filter->graph->graph &&
777 !output_streams[i]->filter->graph->nb_inputs) {
778 ret = configure_filtergraph(output_streams[i]->filter->graph);
779 if (ret < 0) {
780 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
781 return ret;
782 }
783 }
784
785 if (!output_streams[i]->filter || output_streams[i]->finished ||
786 !output_streams[i]->filter->graph->graph)
787 continue;
788
789 pts = av_rescale_q(pts, output_streams[i]->enc_ctx->time_base,
790 AV_TIME_BASE_Q);
791 if (pts < min_pts) {
792 min_pts = pts;
793 ost = output_streams[i];
794 }
795 }
796
797 if (!ost)
798 break;
799
800 ret = poll_filter(ost);
801
802 if (ret == AVERROR_EOF) {
803 finish_output_stream(ost);
804 ret = 0;
805 } else if (ret == AVERROR(EAGAIN))
806 return 0;
807 }
808
809 return ret;
810 }
811
812 static void print_final_stats(int64_t total_size)
813 {
814 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
815 uint64_t data_size = 0;
816 float percent = -1.0;
817 int i, j;
818
819 for (i = 0; i < nb_output_streams; i++) {
820 OutputStream *ost = output_streams[i];
821 switch (ost->enc_ctx->codec_type) {
822 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
823 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
824 default: other_size += ost->data_size; break;
825 }
826 extra_size += ost->enc_ctx->extradata_size;
827 data_size += ost->data_size;
828 }
829
830 if (data_size && total_size >= data_size)
831 percent = 100.0 * (total_size - data_size) / data_size;
832
833 av_log(NULL, AV_LOG_INFO, "\n");
834 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
835 video_size / 1024.0,
836 audio_size / 1024.0,
837 other_size / 1024.0,
838 extra_size / 1024.0);
839 if (percent >= 0.0)
840 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
841 else
842 av_log(NULL, AV_LOG_INFO, "unknown");
843 av_log(NULL, AV_LOG_INFO, "\n");
844
845 /* print verbose per-stream stats */
846 for (i = 0; i < nb_input_files; i++) {
847 InputFile *f = input_files[i];
848 uint64_t total_packets = 0, total_size = 0;
849
850 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
851 i, f->ctx->filename);
852
853 for (j = 0; j < f->nb_streams; j++) {
854 InputStream *ist = input_streams[f->ist_index + j];
855 enum AVMediaType type = ist->dec_ctx->codec_type;
856
857 total_size += ist->data_size;
858 total_packets += ist->nb_packets;
859
860 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
861 i, j, media_type_string(type));
862 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
863 ist->nb_packets, ist->data_size);
864
865 if (ist->decoding_needed) {
866 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
867 ist->frames_decoded);
868 if (type == AVMEDIA_TYPE_AUDIO)
869 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
870 av_log(NULL, AV_LOG_VERBOSE, "; ");
871 }
872
873 av_log(NULL, AV_LOG_VERBOSE, "\n");
874 }
875
876 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
877 total_packets, total_size);
878 }
879
880 for (i = 0; i < nb_output_files; i++) {
881 OutputFile *of = output_files[i];
882 uint64_t total_packets = 0, total_size = 0;
883
884 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
885 i, of->ctx->filename);
886
887 for (j = 0; j < of->ctx->nb_streams; j++) {
888 OutputStream *ost = output_streams[of->ost_index + j];
889 enum AVMediaType type = ost->enc_ctx->codec_type;
890
891 total_size += ost->data_size;
892 total_packets += ost->packets_written;
893
894 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
895 i, j, media_type_string(type));
896 if (ost->encoding_needed) {
897 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
898 ost->frames_encoded);
899 if (type == AVMEDIA_TYPE_AUDIO)
900 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
901 av_log(NULL, AV_LOG_VERBOSE, "; ");
902 }
903
904 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
905 ost->packets_written, ost->data_size);
906
907 av_log(NULL, AV_LOG_VERBOSE, "\n");
908 }
909
910 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
911 total_packets, total_size);
912 }
913 }
914
915 static void print_report(int is_last_report, int64_t timer_start)
916 {
917 char buf[1024];
918 OutputStream *ost;
919 AVFormatContext *oc;
920 int64_t total_size = 0;
921 AVCodecContext *enc;
922 int frame_number, vid, i;
923 double bitrate, ti1, pts;
924 static int64_t last_time = -1;
925 static int qp_histogram[52];
926
927 if (!print_stats && !is_last_report)
928 return;
929
930 if (!is_last_report) {
931 int64_t cur_time;
932 /* display the report every 0.5 seconds */
933 cur_time = av_gettime_relative();
934 if (last_time == -1) {
935 last_time = cur_time;
936 return;
937 }
938 if ((cur_time - last_time) < 500000)
939 return;
940 last_time = cur_time;
941 }
942
943
944 oc = output_files[0]->ctx;
945 if (oc->pb) {
946 total_size = avio_size(oc->pb);
947 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
948 total_size = avio_tell(oc->pb);
949 if (total_size < 0) {
950 char errbuf[128];
951 av_strerror(total_size, errbuf, sizeof(errbuf));
952 av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
953 "avio_tell() failed: %s\n", errbuf);
954 total_size = 0;
955 }
956 }
957
958 buf[0] = '\0';
959 ti1 = 1e10;
960 vid = 0;
961 for (i = 0; i < nb_output_streams; i++) {
962 float q = -1;
963 ost = output_streams[i];
964 enc = ost->enc_ctx;
965 if (!ost->stream_copy)
966 q = ost->quality / (float) FF_QP2LAMBDA;
967
968 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
969 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
970 }
971 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
972 float t = (av_gettime_relative() - timer_start) / 1000000.0;
973
974 frame_number = ost->frame_number;
975 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
976 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
977 if (is_last_report)
978 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
979 if (qp_hist) {
980 int j;
981 int qp = lrintf(q);
982 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
983 qp_histogram[qp]++;
984 for (j = 0; j < 32; j++)
985 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
986 }
987
988 #if FF_API_CODED_FRAME && FF_API_ERROR_FRAME
989 FF_DISABLE_DEPRECATION_WARNINGS
990 if (enc->flags & AV_CODEC_FLAG_PSNR) {
991 int j;
992 double error, error_sum = 0;
993 double scale, scale_sum = 0;
994 char type[3] = { 'Y','U','V' };
995 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
996 for (j = 0; j < 3; j++) {
997 if (is_last_report) {
998 error = enc->error[j];
999 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1000 } else {
1001 error = enc->coded_frame->error[j];
1002 scale = enc->width * enc->height * 255.0 * 255.0;
1003 }
1004 if (j)
1005 scale /= 4;
1006 error_sum += error;
1007 scale_sum += scale;
1008 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1009 }
1010 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1011 }
1012 FF_ENABLE_DEPRECATION_WARNINGS
1013 #endif
1014 vid = 1;
1015 }
1016 /* compute min output value */
1017 pts = (double)ost->last_mux_dts * av_q2d(ost->st->time_base);
1018 if ((pts < ti1) && (pts > 0))
1019 ti1 = pts;
1020 }
1021 if (ti1 < 0.01)
1022 ti1 = 0.01;
1023
1024 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1025
1026 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1027 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1028 (double)total_size / 1024, ti1, bitrate);
1029
1030 if (nb_frames_drop)
1031 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " drop=%d",
1032 nb_frames_drop);
1033
1034 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1035
1036 fflush(stderr);
1037
1038 if (is_last_report)
1039 print_final_stats(total_size);
1040
1041 }
1042
1043 static void flush_encoders(void)
1044 {
1045 int i, ret;
1046
1047 for (i = 0; i < nb_output_streams; i++) {
1048 OutputStream *ost = output_streams[i];
1049 AVCodecContext *enc = ost->enc_ctx;
1050 OutputFile *of = output_files[ost->file_index];
1051 int stop_encoding = 0;
1052
1053 if (!ost->encoding_needed)
1054 continue;
1055
1056 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1057 continue;
1058
1059 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1060 continue;
1061
1062 avcodec_send_frame(enc, NULL);
1063
1064 for (;;) {
1065 const char *desc = NULL;
1066
1067 switch (enc->codec_type) {
1068 case AVMEDIA_TYPE_AUDIO:
1069 desc = "Audio";
1070 break;
1071 case AVMEDIA_TYPE_VIDEO:
1072 desc = "Video";
1073 break;
1074 default:
1075 av_assert0(0);
1076 }
1077
1078 if (1) {
1079 AVPacket pkt;
1080 av_init_packet(&pkt);
1081 pkt.data = NULL;
1082 pkt.size = 0;
1083
1084 ret = avcodec_receive_packet(enc, &pkt);
1085 if (ret < 0 && ret != AVERROR_EOF) {
1086 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1087 exit_program(1);
1088 }
1089 if (ost->logfile && enc->stats_out) {
1090 fprintf(ost->logfile, "%s", enc->stats_out);
1091 }
1092 output_packet(of, &pkt, ost, ret == AVERROR_EOF);
1093 if (ret == AVERROR_EOF) {
1094 stop_encoding = 1;
1095 break;
1096 }
1097 }
1098
1099 if (stop_encoding)
1100 break;
1101 }
1102 }
1103 }
1104
1105 /*
1106 * Check whether a packet from ist should be written into ost at this time
1107 */
1108 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1109 {
1110 OutputFile *of = output_files[ost->file_index];
1111 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1112
1113 if (ost->source_index != ist_index)
1114 return 0;
1115
1116 if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
1117 return 0;
1118
1119 return 1;
1120 }
1121
1122 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1123 {
1124 OutputFile *of = output_files[ost->file_index];
1125 InputFile *f = input_files [ist->file_index];
1126 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1127 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1128 AVPacket opkt;
1129
1130 // EOF: flush output bitstream filters.
1131 if (!pkt) {
1132 output_packet(of, &opkt, ost, 1);
1133 return;
1134 }
1135
1136 av_init_packet(&opkt);
1137
1138 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1139 !ost->copy_initial_nonkeyframes)
1140 return;
1141
1142 if (of->recording_time != INT64_MAX &&
1143 ist->last_dts >= of->recording_time + start_time) {
1144 ost->finished = 1;
1145 return;
1146 }
1147
1148 if (f->recording_time != INT64_MAX) {
1149 start_time = f->ctx->start_time;
1150 if (f->start_time != AV_NOPTS_VALUE)
1151 start_time += f->start_time;
1152 if (ist->last_dts >= f->recording_time + start_time) {
1153 ost->finished = 1;
1154 return;
1155 }
1156 }
1157
1158 /* force the input stream PTS */
1159 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1160 ost->sync_opts++;
1161
1162 if (pkt->pts != AV_NOPTS_VALUE)
1163 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
1164 else
1165 opkt.pts = AV_NOPTS_VALUE;
1166
1167 if (pkt->dts == AV_NOPTS_VALUE)
1168 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->mux_timebase);
1169 else
1170 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
1171 opkt.dts -= ost_tb_start_time;
1172
1173 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
1174 opkt.flags = pkt->flags;
1175
1176 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1177 if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1178 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
1179 && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
1180 && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1181 ) {
1182 if (av_parser_change(ost->parser, ost->parser_avctx,
1183 &opkt.data, &opkt.size,
1184 pkt->data, pkt->size,
1185 pkt->flags & AV_PKT_FLAG_KEY)) {
1186 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1187 if (!opkt.buf)
1188 exit_program(1);
1189 }
1190 } else {
1191 opkt.data = pkt->data;
1192 opkt.size = pkt->size;
1193 }
1194
1195 output_packet(of, &opkt, ost, 0);
1196 }
1197
1198 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
1199 {
1200 FilterGraph *fg = ifilter->graph;
1201 int need_reinit, ret, i;
1202
1203 /* determine if the parameters for this input changed */
1204 need_reinit = ifilter->format != frame->format;
1205 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
1206 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
1207 need_reinit = 1;
1208
1209 switch (ifilter->ist->st->codecpar->codec_type) {
1210 case AVMEDIA_TYPE_AUDIO:
1211 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
1212 ifilter->channel_layout != frame->channel_layout;
1213 break;
1214 case AVMEDIA_TYPE_VIDEO:
1215 need_reinit |= ifilter->width != frame->width ||
1216 ifilter->height != frame->height;
1217 break;
1218 }
1219
1220 if (need_reinit) {
1221 ret = ifilter_parameters_from_frame(ifilter, frame);
1222 if (ret < 0)
1223 return ret;
1224 }
1225
1226 /* (re)init the graph if possible, otherwise buffer the frame and return */
1227 if (need_reinit || !fg->graph) {
1228 for (i = 0; i < fg->nb_inputs; i++) {
1229 if (fg->inputs[i]->format < 0) {
1230 AVFrame *tmp = av_frame_clone(frame);
1231 if (!tmp)
1232 return AVERROR(ENOMEM);
1233 av_frame_unref(frame);
1234
1235 if (!av_fifo_space(ifilter->frame_queue)) {
1236 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
1237 if (ret < 0)
1238 return ret;
1239 }
1240 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
1241 return 0;
1242 }
1243 }
1244
1245 ret = poll_filters();
1246 if (ret < 0 && ret != AVERROR_EOF) {
1247 char errbuf[128];
1248 av_strerror(ret, errbuf, sizeof(errbuf));
1249
1250 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
1251 return ret;
1252 }
1253
1254 ret = configure_filtergraph(fg);
1255 if (ret < 0) {
1256 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
1257 return ret;
1258 }
1259 }
1260
1261 ret = av_buffersrc_add_frame(ifilter->filter, frame);
1262 if (ret < 0) {
1263 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
1264 return ret;
1265 }
1266
1267 return 0;
1268 }
1269
1270 static int ifilter_send_eof(InputFilter *ifilter)
1271 {
1272 int i, j, ret;
1273
1274 ifilter->eof = 1;
1275
1276 if (ifilter->filter) {
1277 ret = av_buffersrc_add_frame(ifilter->filter, NULL);
1278 if (ret < 0)
1279 return ret;
1280 } else {
1281 // the filtergraph was never configured
1282 FilterGraph *fg = ifilter->graph;
1283 for (i = 0; i < fg->nb_inputs; i++)
1284 if (!fg->inputs[i]->eof)
1285 break;
1286 if (i == fg->nb_inputs) {
1287 // All the input streams have finished without the filtergraph
1288 // ever being configured.
1289 // Mark the output streams as finished.
1290 for (j = 0; j < fg->nb_outputs; j++)
1291 finish_output_stream(fg->outputs[j]->ost);
1292 }
1293 }
1294
1295 return 0;
1296 }
1297
1298 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
1299 // There is the following difference: if you got a frame, you must call
1300 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
1301 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
1302 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
1303 {
1304 int ret;
1305
1306 *got_frame = 0;
1307
1308 if (pkt) {
1309 ret = avcodec_send_packet(avctx, pkt);
1310 // In particular, we don't expect AVERROR(EAGAIN), because we read all
1311 // decoded frames with avcodec_receive_frame() until done.
1312 if (ret < 0)
1313 return ret == AVERROR_EOF ? 0 : ret;
1314 }
1315
1316 ret = avcodec_receive_frame(avctx, frame);
1317 if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1318 return ret;
1319 if (ret >= 0)
1320 *got_frame = 1;
1321
1322 return 0;
1323 }
1324
1325 int guess_input_channel_layout(InputStream *ist)
1326 {
1327 AVCodecContext *dec = ist->dec_ctx;
1328
1329 if (!dec->channel_layout) {
1330 char layout_name[256];
1331
1332 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1333 if (!dec->channel_layout)
1334 return 0;
1335 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1336 dec->channels, dec->channel_layout);
1337 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1338 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1339 }
1340 return 1;
1341 }
1342
1343 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
1344 int *decode_failed)
1345 {
1346 AVFrame *decoded_frame, *f;
1347 AVCodecContext *avctx = ist->dec_ctx;
1348 int i, ret, err = 0;
1349
1350 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1351 return AVERROR(ENOMEM);
1352 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1353 return AVERROR(ENOMEM);
1354 decoded_frame = ist->decoded_frame;
1355
1356 ret = decode(avctx, decoded_frame, got_output, pkt);
1357 if (ret < 0)
1358 *decode_failed = 1;
1359 if (!*got_output || ret < 0)
1360 return ret;
1361
1362 ist->samples_decoded += decoded_frame->nb_samples;
1363 ist->frames_decoded++;
1364
1365 /* if the decoder provides a pts, use it instead of the last packet pts.
1366 the decoder could be delaying output by a packet or more. */
1367 if (decoded_frame->pts != AV_NOPTS_VALUE)
1368 ist->next_dts = av_rescale_q(decoded_frame->pts, ist->st->time_base, AV_TIME_BASE_Q);
1369 else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
1370 decoded_frame->pts = pkt->pts;
1371 }
1372
1373 if (decoded_frame->pts != AV_NOPTS_VALUE)
1374 decoded_frame->pts = av_rescale_q(decoded_frame->pts,
1375 ist->st->time_base,
1376 (AVRational){1, avctx->sample_rate});
1377 ist->nb_samples = decoded_frame->nb_samples;
1378 for (i = 0; i < ist->nb_filters; i++) {
1379 if (i < ist->nb_filters - 1) {
1380 f = ist->filter_frame;
1381 err = av_frame_ref(f, decoded_frame);
1382 if (err < 0)
1383 break;
1384 } else
1385 f = decoded_frame;
1386
1387 err = ifilter_send_frame(ist->filters[i], f);
1388 if (err < 0)
1389 break;
1390 }
1391
1392 av_frame_unref(ist->filter_frame);
1393 av_frame_unref(decoded_frame);
1394 return err < 0 ? err : ret;
1395 }
1396
1397 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output,
1398 int *decode_failed)
1399 {
1400 AVFrame *decoded_frame, *f;
1401 int i, ret = 0, err = 0;
1402
1403 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1404 return AVERROR(ENOMEM);
1405 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1406 return AVERROR(ENOMEM);
1407 decoded_frame = ist->decoded_frame;
1408
1409 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
1410 if (ret < 0)
1411 *decode_failed = 1;
1412 if (!*got_output || ret < 0)
1413 return ret;
1414
1415 ist->frames_decoded++;
1416
1417 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
1418 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
1419 if (err < 0)
1420 goto fail;
1421 }
1422 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
1423
1424 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pts,
1425 decoded_frame->pkt_dts);
1426 if (ist->framerate.num)
1427 decoded_frame->pts = ist->cfr_next_pts++;
1428
1429 if (ist->st->sample_aspect_ratio.num)
1430 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1431
1432 for (i = 0; i < ist->nb_filters; i++) {
1433 if (i < ist->nb_filters - 1) {
1434 f = ist->filter_frame;
1435 err = av_frame_ref(f, decoded_frame);
1436 if (err < 0)
1437 break;
1438 } else
1439 f = decoded_frame;
1440
1441 err = ifilter_send_frame(ist->filters[i], f);
1442 if (err < 0)
1443 break;
1444 }
1445
1446 fail:
1447 av_frame_unref(ist->filter_frame);
1448 av_frame_unref(decoded_frame);
1449 return err < 0 ? err : ret;
1450 }
1451
1452 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
1453 int *decode_failed)
1454 {
1455 AVSubtitle subtitle;
1456 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
1457 &subtitle, got_output, pkt);
1458 if (ret < 0) {
1459 *decode_failed = 1;
1460 return ret;
1461 }
1462 if (!*got_output)
1463 return ret;
1464
1465 ist->frames_decoded++;
1466
1467 for (i = 0; i < nb_output_streams; i++) {
1468 OutputStream *ost = output_streams[i];
1469
1470 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1471 continue;
1472
1473 do_subtitle_out(output_files[ost->file_index], ost, ist, &subtitle, pkt->pts);
1474 }
1475
1476 avsubtitle_free(&subtitle);
1477 return ret;
1478 }
1479
1480 static int send_filter_eof(InputStream *ist)
1481 {
1482 int i, ret;
1483 for (i = 0; i < ist->nb_filters; i++) {
1484 ret = ifilter_send_eof(ist->filters[i]);
1485 if (ret < 0)
1486 return ret;
1487 }
1488 return 0;
1489 }
1490
1491 /* pkt = NULL means EOF (needed to flush decoder buffers) */
1492 static void process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
1493 {
1494 int i;
1495 int repeating = 0;
1496 AVPacket avpkt;
1497
1498 if (ist->next_dts == AV_NOPTS_VALUE)
1499 ist->next_dts = ist->last_dts;
1500
1501 if (!pkt) {
1502 /* EOF handling */
1503 av_init_packet(&avpkt);
1504 avpkt.data = NULL;
1505 avpkt.size = 0;
1506 } else {
1507 avpkt = *pkt;
1508 }
1509
1510 if (pkt && pkt->dts != AV_NOPTS_VALUE)
1511 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
1512
1513 // while we have more to decode or while the decoder did output something on EOF
1514 while (ist->decoding_needed && (!pkt || avpkt.size > 0)) {
1515 int ret = 0;
1516 int got_output = 0;
1517 int decode_failed = 0;
1518
1519 if (!repeating)
1520 ist->last_dts = ist->next_dts;
1521
1522 switch (ist->dec_ctx->codec_type) {
1523 case AVMEDIA_TYPE_AUDIO:
1524 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
1525 &decode_failed);
1526 break;
1527 case AVMEDIA_TYPE_VIDEO:
1528 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output,
1529 &decode_failed);
1530 if (repeating && !got_output)
1531 ;
1532 else if (pkt && pkt->duration)
1533 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
1534 else if (ist->st->avg_frame_rate.num)
1535 ist->next_dts += av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate),
1536 AV_TIME_BASE_Q);
1537 else if (ist->dec_ctx->framerate.num != 0) {
1538 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1539 ist->dec_ctx->ticks_per_frame;
1540 ist->next_dts += av_rescale_q(ticks, ist->dec_ctx->framerate, AV_TIME_BASE_Q);
1541 }
1542 break;
1543 case AVMEDIA_TYPE_SUBTITLE:
1544 if (repeating)
1545 break;
1546 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
1547 break;
1548 default:
1549 return;
1550 }
1551
1552 if (ret < 0) {
1553 if (decode_failed) {
1554 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
1555 ist->file_index, ist->st->index);
1556 } else {
1557 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
1558 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
1559 }
1560 if (!decode_failed || exit_on_error)
1561 exit_program(1);
1562 break;
1563 }
1564
1565 if (!got_output)
1566 break;
1567
1568 repeating = 1;
1569 }
1570
1571 /* after flushing, send an EOF on all the filter inputs attached to the stream */
1572 /* except when looping we need to flush but not to send an EOF */
1573 if (!pkt && ist->decoding_needed && !no_eof) {
1574 int ret = send_filter_eof(ist);
1575 if (ret < 0) {
1576 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
1577 exit_program(1);
1578 }
1579 }
1580
1581 /* handle stream copy */
1582 if (!ist->decoding_needed) {
1583 ist->last_dts = ist->next_dts;
1584 switch (ist->dec_ctx->codec_type) {
1585 case AVMEDIA_TYPE_AUDIO:
1586 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
1587 ist->dec_ctx->sample_rate;
1588 break;
1589 case AVMEDIA_TYPE_VIDEO:
1590 if (ist->dec_ctx->framerate.num != 0) {
1591 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
1592 ist->next_dts += ((int64_t)AV_TIME_BASE *
1593 ist->dec_ctx->framerate.den * ticks) /
1594 ist->dec_ctx->framerate.num;
1595 }
1596 break;
1597 }
1598 }
1599 for (i = 0; i < nb_output_streams; i++) {
1600 OutputStream *ost = output_streams[i];
1601
1602 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
1603 continue;
1604
1605 do_streamcopy(ist, ost, pkt);
1606 }
1607
1608 return;
1609 }
1610
1611 static void print_sdp(void)
1612 {
1613 char sdp[16384];
1614 int i;
1615 AVFormatContext **avc;
1616
1617 for (i = 0; i < nb_output_files; i++) {
1618 if (!output_files[i]->header_written)
1619 return;
1620 }
1621
1622 avc = av_malloc(sizeof(*avc) * nb_output_files);
1623 if (!avc)
1624 exit_program(1);
1625 for (i = 0; i < nb_output_files; i++)
1626 avc[i] = output_files[i]->ctx;
1627
1628 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
1629 printf("SDP:\n%s\n", sdp);
1630 fflush(stdout);
1631 av_freep(&avc);
1632 }
1633
1634 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
1635 {
1636 int i;
1637 for (i = 0; hwaccels[i].name; i++)
1638 if (hwaccels[i].pix_fmt == pix_fmt)
1639 return &hwaccels[i];
1640 return NULL;
1641 }
1642
1643 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
1644 {
1645 InputStream *ist = s->opaque;
1646 const enum AVPixelFormat *p;
1647 int ret;
1648
1649 for (p = pix_fmts; *p != -1; p++) {
1650 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
1651 const HWAccel *hwaccel;
1652
1653 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1654 break;
1655
1656 hwaccel = get_hwaccel(*p);
1657 if (!hwaccel ||
1658 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
1659 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
1660 continue;
1661
1662 ret = hwaccel->init(s);
1663 if (ret < 0) {
1664 if (ist->hwaccel_id == hwaccel->id) {
1665 av_log(NULL, AV_LOG_FATAL,
1666 "%s hwaccel requested for input stream #%d:%d, "
1667 "but cannot be initialized.\n", hwaccel->name,
1668 ist->file_index, ist->st->index);
1669 return AV_PIX_FMT_NONE;
1670 }
1671 continue;
1672 }
1673
1674 if (ist->hw_frames_ctx) {
1675 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
1676 if (!s->hw_frames_ctx)
1677 return AV_PIX_FMT_NONE;
1678 }
1679
1680 ist->active_hwaccel_id = hwaccel->id;
1681 ist->hwaccel_pix_fmt = *p;
1682 break;
1683 }
1684
1685 return *p;
1686 }
1687
1688 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
1689 {
1690 InputStream *ist = s->opaque;
1691
1692 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
1693 return ist->hwaccel_get_buffer(s, frame, flags);
1694
1695 return avcodec_default_get_buffer2(s, frame, flags);
1696 }
1697
1698 static int init_input_stream(int ist_index, char *error, int error_len)
1699 {
1700 int ret;
1701 InputStream *ist = input_streams[ist_index];
1702
1703 if (ist->decoding_needed) {
1704 AVCodec *codec = ist->dec;
1705 if (!codec) {
1706 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
1707 ist->dec_ctx->codec_id, ist->file_index, ist->st->index);
1708 return AVERROR(EINVAL);
1709 }
1710
1711 ist->dec_ctx->opaque = ist;
1712 ist->dec_ctx->get_format = get_format;
1713 ist->dec_ctx->get_buffer2 = get_buffer;
1714 ist->dec_ctx->thread_safe_callbacks = 1;
1715
1716 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
1717
1718 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
1719 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
1720
1721 ret = hw_device_setup_for_decode(ist);
1722 if (ret < 0) {
1723 char errbuf[128];
1724 av_strerror(ret, errbuf, sizeof(errbuf));
1725 snprintf(error, error_len, "Device setup failed for "
1726 "decoder on input stream #%d:%d : %s",
1727 ist->file_index, ist->st->index, errbuf);
1728 return ret;
1729 }
1730
1731 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
1732 char errbuf[128];
1733 if (ret == AVERROR_EXPERIMENTAL)
1734 abort_codec_experimental(codec, 0);
1735
1736 av_strerror(ret, errbuf, sizeof(errbuf));
1737
1738 snprintf(error, error_len,
1739 "Error while opening decoder for input stream "
1740 "#%d:%d : %s",
1741 ist->file_index, ist->st->index, errbuf);
1742 return ret;
1743 }
1744 assert_avoptions(ist->decoder_opts);
1745 }
1746
1747 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
1748 ist->next_dts = AV_NOPTS_VALUE;
1749 init_pts_correction(&ist->pts_ctx);
1750
1751 return 0;
1752 }
1753
1754 static InputStream *get_input_stream(OutputStream *ost)
1755 {
1756 if (ost->source_index >= 0)
1757 return input_streams[ost->source_index];
1758
1759 if (ost->filter) {
1760 FilterGraph *fg = ost->filter->graph;
1761 int i;
1762
1763 for (i = 0; i < fg->nb_inputs; i++)
1764 if (fg->inputs[i]->ist->dec_ctx->codec_type == ost->enc_ctx->codec_type)
1765 return fg->inputs[i]->ist;
1766 }
1767
1768 return NULL;
1769 }
1770
1771 /* open the muxer when all the streams are initialized */
1772 static int check_init_output_file(OutputFile *of, int file_index)
1773 {
1774 int ret, i;
1775
1776 for (i = 0; i < of->ctx->nb_streams; i++) {
1777 OutputStream *ost = output_streams[of->ost_index + i];
1778 if (!ost->initialized)
1779 return 0;
1780 }
1781
1782 of->ctx->interrupt_callback = int_cb;
1783
1784 ret = avformat_write_header(of->ctx, &of->opts);
1785 if (ret < 0) {
1786 char errbuf[128];
1787
1788 av_strerror(ret, errbuf, sizeof(errbuf));
1789
1790 av_log(NULL, AV_LOG_ERROR,
1791 "Could not write header for output file #%d "
1792 "(incorrect codec parameters ?): %s",
1793 file_index, errbuf);
1794 return ret;
1795 }
1796 assert_avoptions(of->opts);
1797 of->header_written = 1;
1798
1799 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
1800
1801 if (want_sdp)
1802 print_sdp();
1803
1804 /* flush the muxing queues */
1805 for (i = 0; i < of->ctx->nb_streams; i++) {
1806 OutputStream *ost = output_streams[of->ost_index + i];
1807
1808 while (av_fifo_size(ost->muxing_queue)) {
1809 AVPacket pkt;
1810 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
1811 write_packet(of, &pkt, ost);
1812 }
1813 }
1814
1815 return 0;
1816 }
1817
1818 static int init_output_bsfs(OutputStream *ost)
1819 {
1820 AVBSFContext *ctx;
1821 int i, ret;
1822
1823 if (!ost->nb_bitstream_filters)
1824 return 0;
1825
1826 for (i = 0; i < ost->nb_bitstream_filters; i++) {
1827 ctx = ost->bsf_ctx[i];
1828
1829 ret = avcodec_parameters_copy(ctx->par_in,
1830 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
1831 if (ret < 0)
1832 return ret;
1833
1834 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
1835
1836 ret = av_bsf_init(ctx);
1837 if (ret < 0) {
1838 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
1839 ctx->filter->name);
1840 return ret;
1841 }
1842 }
1843
1844 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
1845 if (ret < 0)
1846 return ret;
1847
1848 ost->st->time_base = ctx->time_base_out;
1849
1850 return 0;
1851 }
1852
1853 static int init_output_stream_streamcopy(OutputStream *ost)
1854 {
1855 OutputFile *of = output_files[ost->file_index];
1856 InputStream *ist = get_input_stream(ost);
1857 AVCodecParameters *par_dst = ost->st->codecpar;
1858 AVCodecParameters *par_src = ist->st->codecpar;
1859 AVRational sar;
1860 uint32_t codec_tag = par_dst->codec_tag;
1861 int i, ret;
1862
1863 if (!codec_tag) {
1864 if (!of->ctx->oformat->codec_tag ||
1865 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
1866 av_codec_get_tag(of->ctx->oformat->codec_tag, par_src->codec_id) <= 0)
1867 codec_tag = par_src->codec_tag;
1868 }
1869
1870 ret = avcodec_parameters_copy(par_dst, par_src);
1871 if (ret < 0)
1872 return ret;
1873
1874 par_dst->codec_tag = codec_tag;
1875
1876 ost->st->disposition = ist->st->disposition;
1877
1878 ost->st->time_base = ist->st->time_base;
1879
1880 if (ost->bitrate_override)
1881 par_dst->bit_rate = ost->bitrate_override;
1882
1883 if (ist->st->nb_side_data) {
1884 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
1885 sizeof(*ist->st->side_data));
1886 if (!ost->st->side_data)
1887 return AVERROR(ENOMEM);
1888
1889 for (i = 0; i < ist->st->nb_side_data; i++) {
1890 const AVPacketSideData *sd_src = &ist->st->side_data[i];
1891 AVPacketSideData *sd_dst = &ost->st->side_data[i];
1892
1893 sd_dst->data = av_malloc(sd_src->size);
1894 if (!sd_dst->data)
1895 return AVERROR(ENOMEM);
1896 memcpy(sd_dst->data, sd_src->data, sd_src->size);
1897 sd_dst->size = sd_src->size;
1898 sd_dst->type = sd_src->type;
1899 ost->st->nb_side_data++;
1900 }
1901 }
1902
1903 ost->parser = av_parser_init(par_dst->codec_id);
1904 ost->parser_avctx = avcodec_alloc_context3(NULL);
1905 if (!ost->parser_avctx)
1906 return AVERROR(ENOMEM);
1907
1908 if (par_dst->codec_type == AVMEDIA_TYPE_VIDEO) {
1909 if (ost->frame_aspect_ratio)
1910 sar = av_d2q(ost->frame_aspect_ratio * par_dst->height / par_dst->width, 255);
1911 else if (ist->st->sample_aspect_ratio.num)
1912 sar = ist->st->sample_aspect_ratio;
1913 else
1914 sar = par_src->sample_aspect_ratio;
1915 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
1916 }
1917
1918 return 0;
1919 }
1920
1921 static void set_encoder_id(OutputFile *of, OutputStream *ost)
1922 {
1923 AVDictionaryEntry *e;
1924
1925 uint8_t *encoder_string;
1926 int encoder_string_len;
1927 int format_flags = 0;
1928
1929 e = av_dict_get(of->opts, "fflags", NULL, 0);
1930 if (e) {
1931 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
1932 if (!o)
1933 return;
1934 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
1935 }
1936
1937 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
1938 encoder_string = av_mallocz(encoder_string_len);
1939 if (!encoder_string)
1940 exit_program(1);
1941
1942 if (!(format_flags & AVFMT_FLAG_BITEXACT))
1943 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
1944 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
1945 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
1946 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
1947 }
1948
1949 static void parse_forced_key_frames(char *kf, OutputStream *ost,
1950 AVCodecContext *avctx)
1951 {
1952 char *p;
1953 int n = 1, i;
1954 int64_t t;
1955
1956 for (p = kf; *p; p++)
1957 if (*p == ',')
1958 n++;
1959 ost->forced_kf_count = n;
1960 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
1961 if (!ost->forced_kf_pts) {
1962 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
1963 exit_program(1);
1964 }
1965
1966 p = kf;
1967 for (i = 0; i < n; i++) {
1968 char *next = strchr(p, ',');
1969
1970 if (next)
1971 *next++ = 0;
1972
1973 t = parse_time_or_die("force_key_frames", p, 1);
1974 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
1975
1976 p = next;
1977 }
1978 }
1979
1980 static int init_output_stream_encode(OutputStream *ost)
1981 {
1982 InputStream *ist = get_input_stream(ost);
1983 AVCodecContext *enc_ctx = ost->enc_ctx;
1984 AVCodecContext *dec_ctx = NULL;
1985
1986 set_encoder_id(output_files[ost->file_index], ost);
1987
1988 if (ist) {
1989 ost->st->disposition = ist->st->disposition;
1990
1991 dec_ctx = ist->dec_ctx;
1992
1993 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
1994 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
1995 }
1996
1997 switch (enc_ctx->codec_type) {
1998 case AVMEDIA_TYPE_AUDIO:
1999 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
2000 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2001 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2002 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
2003 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
2004 break;
2005 case AVMEDIA_TYPE_VIDEO:
2006 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
2007
2008 enc_ctx->width = ost->filter->filter->inputs[0]->w;
2009 enc_ctx->height = ost->filter->filter->inputs[0]->h;
2010 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2011 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2012 av_d2q(ost->frame_aspect_ratio * enc_ctx->height/enc_ctx->width, 255) :
2013 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2014 enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
2015
2016 enc_ctx->framerate = ost->frame_rate;
2017
2018 ost->st->avg_frame_rate = ost->frame_rate;
2019
2020 if (dec_ctx &&
2021 (enc_ctx->width != dec_ctx->width ||
2022 enc_ctx->height != dec_ctx->height ||
2023 enc_ctx->pix_fmt != dec_ctx->pix_fmt)) {
2024 enc_ctx->bits_per_raw_sample = 0;
2025 }
2026
2027 if (ost->forced_keyframes)
2028 parse_forced_key_frames(ost->forced_keyframes, ost,
2029 ost->enc_ctx);
2030 break;
2031 case AVMEDIA_TYPE_SUBTITLE:
2032 enc_ctx->time_base = (AVRational){1, 1000};
2033 break;
2034 default:
2035 abort();
2036 break;
2037 }
2038
2039 return 0;
2040 }
2041
2042 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2043 {
2044 int ret = 0;
2045
2046 if (ost->encoding_needed) {
2047 AVCodec *codec = ost->enc;
2048 AVCodecContext *dec = NULL;
2049 InputStream *ist;
2050
2051 ret = init_output_stream_encode(ost);
2052 if (ret < 0)
2053 return ret;
2054
2055 if ((ist = get_input_stream(ost)))
2056 dec = ist->dec_ctx;
2057 if (dec && dec->subtitle_header) {
2058 ost->enc_ctx->subtitle_header = av_malloc(dec->subtitle_header_size);
2059 if (!ost->enc_ctx->subtitle_header)
2060 return AVERROR(ENOMEM);
2061 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2062 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2063 }
2064 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2065 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2066
2067 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx &&
2068 ((AVHWFramesContext*)ost->filter->filter->inputs[0]->hw_frames_ctx->data)->format ==
2069 ost->filter->filter->inputs[0]->format) {
2070 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2071 if (!ost->enc_ctx->hw_frames_ctx)
2072 return AVERROR(ENOMEM);
2073 } else {
2074 ret = hw_device_setup_for_encode(ost);
2075 if (ret < 0) {
2076 char errbuf[128];
2077 av_strerror(ret, errbuf, sizeof(errbuf));
2078 snprintf(error, error_len, "Device setup failed for "
2079 "encoder on output stream #%d:%d : %s",
2080 ost->file_index, ost->index, errbuf);
2081 return ret;
2082 }
2083 }
2084
2085 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2086 if (ret == AVERROR_EXPERIMENTAL)
2087 abort_codec_experimental(codec, 1);
2088 snprintf(error, error_len,
2089 "Error while opening encoder for output stream #%d:%d - "
2090 "maybe incorrect parameters such as bit_rate, rate, width or height",
2091 ost->file_index, ost->index);
2092 return ret;
2093 }
2094 assert_avoptions(ost->encoder_opts);
2095 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2096 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2097 "It takes bits/s as argument, not kbits/s\n");
2098
2099 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
2100 if (ret < 0) {
2101 av_log(NULL, AV_LOG_FATAL,
2102 "Error initializing the output stream codec context.\n");
2103 exit_program(1);
2104 }
2105
2106 if (ost->enc_ctx->nb_coded_side_data) {
2107 int i;
2108
2109 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2110 sizeof(*ost->st->side_data));
2111 if (!ost->st->side_data)
2112 return AVERROR(ENOMEM);
2113
2114 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2115 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2116 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2117
2118 sd_dst->data = av_malloc(sd_src->size);
2119 if (!sd_dst->data)
2120 return AVERROR(ENOMEM);
2121 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2122 sd_dst->size = sd_src->size;
2123 sd_dst->type = sd_src->type;
2124 ost->st->nb_side_data++;
2125 }
2126 }
2127
2128 ost->st->time_base = ost->enc_ctx->time_base;
2129 } else if (ost->stream_copy) {
2130 ret = init_output_stream_streamcopy(ost);
2131 if (ret < 0)
2132 return ret;
2133
2134 /*
2135 * FIXME: will the codec context used by the parser during streamcopy
2136 * This should go away with the new parser API.
2137 */
2138 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
2139 if (ret < 0)
2140 return ret;
2141 }
2142
2143 /* initialize bitstream filters for the output stream
2144 * needs to be done here, because the codec id for streamcopy is not
2145 * known until now */
2146 ret = init_output_bsfs(ost);
2147 if (ret < 0)
2148 return ret;
2149
2150 ost->mux_timebase = ost->st->time_base;
2151
2152 ost->initialized = 1;
2153
2154 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
2155 if (ret < 0)
2156 return ret;
2157
2158 return ret;
2159 }
2160
2161 static int transcode_init(void)
2162 {
2163 int ret = 0, i, j, k;
2164 OutputStream *ost;
2165 InputStream *ist;
2166 char error[1024];
2167
2168 /* init framerate emulation */
2169 for (i = 0; i < nb_input_files; i++) {
2170 InputFile *ifile = input_files[i];
2171 if (ifile->rate_emu)
2172 for (j = 0; j < ifile->nb_streams; j++)
2173 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2174 }
2175
2176 /* init input streams */
2177 for (i = 0; i < nb_input_streams; i++)
2178 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2179 goto dump_format;
2180
2181 /* open each encoder */
2182 for (i = 0; i < nb_output_streams; i++) {
2183 // skip streams fed from filtergraphs until we have a frame for them
2184 if (output_streams[i]->filter)
2185 continue;
2186
2187 ret = init_output_stream(output_streams[i], error, sizeof(error));
2188 if (ret < 0)
2189 goto dump_format;
2190 }
2191
2192
2193 /* discard unused programs */
2194 for (i = 0; i < nb_input_files; i++) {
2195 InputFile *ifile = input_files[i];
2196 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2197 AVProgram *p = ifile->ctx->programs[j];
2198 int discard = AVDISCARD_ALL;
2199
2200 for (k = 0; k < p->nb_stream_indexes; k++)
2201 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2202 discard = AVDISCARD_DEFAULT;
2203 break;
2204 }
2205 p->discard = discard;
2206 }
2207 }
2208
2209 dump_format:
2210 /* dump the stream mapping */
2211 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2212 for (i = 0; i < nb_input_streams; i++) {
2213 ist = input_streams[i];
2214
2215 for (j = 0; j < ist->nb_filters; j++) {
2216 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
2217 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2218 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2219 ist->filters[j]->name);
2220 if (nb_filtergraphs > 1)
2221 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2222 av_log(NULL, AV_LOG_INFO, "\n");
2223 }
2224 }
2225 }
2226
2227 for (i = 0; i < nb_output_streams; i++) {
2228 ost = output_streams[i];
2229
2230 if (ost->attachment_filename) {
2231 /* an attached file */
2232 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2233 ost->attachment_filename, ost->file_index, ost->index);
2234 continue;
2235 }
2236
2237 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
2238 /* output from a complex graph */
2239 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
2240 if (nb_filtergraphs > 1)
2241 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2242
2243 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2244 ost->index, ost->enc ? ost->enc->name : "?");
2245 continue;
2246 }
2247
2248 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2249 input_streams[ost->source_index]->file_index,
2250 input_streams[ost->source_index]->st->index,
2251 ost->file_index,
2252 ost->index);
2253 if (ost->sync_ist != input_streams[ost->source_index])
2254 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2255 ost->sync_ist->file_index,
2256 ost->sync_ist->st->index);
2257 if (ost->stream_copy)
2258 av_log(NULL, AV_LOG_INFO, " (copy)");
2259 else {
2260 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
2261 const AVCodec *out_codec = ost->enc;
2262 const char *decoder_name = "?";
2263 const char *in_codec_name = "?";
2264 const char *encoder_name = "?";
2265 const char *out_codec_name = "?";
2266 const AVCodecDescriptor *desc;
2267
2268 if (in_codec) {
2269 decoder_name = in_codec->name;
2270 desc = avcodec_descriptor_get(in_codec->id);
2271 if (desc)
2272 in_codec_name = desc->name;
2273 if (!strcmp(decoder_name, in_codec_name))
2274 decoder_name = "native";
2275 }
2276
2277 if (out_codec) {
2278 encoder_name = out_codec->name;
2279 desc = avcodec_descriptor_get(out_codec->id);
2280 if (desc)
2281 out_codec_name = desc->name;
2282 if (!strcmp(encoder_name, out_codec_name))
2283 encoder_name = "native";
2284 }
2285
2286 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
2287 in_codec_name, decoder_name,
2288 out_codec_name, encoder_name);
2289 }
2290 av_log(NULL, AV_LOG_INFO, "\n");
2291 }
2292
2293 if (ret) {
2294 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2295 return ret;
2296 }
2297
2298 return 0;
2299 }
2300
2301 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
2302 static int need_output(void)
2303 {
2304 int i;
2305
2306 for (i = 0; i < nb_output_streams; i++) {
2307 OutputStream *ost = output_streams[i];
2308 OutputFile *of = output_files[ost->file_index];
2309 AVFormatContext *os = output_files[ost->file_index]->ctx;
2310
2311 if (ost->finished ||
2312 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2313 continue;
2314 if (ost->frame_number >= ost->max_frames) {
2315 int j;
2316 for (j = 0; j < of->ctx->nb_streams; j++)
2317 output_streams[of->ost_index + j]->finished = 1;
2318 continue;
2319 }
2320
2321 return 1;
2322 }
2323
2324 return 0;
2325 }
2326
2327 static InputFile *select_input_file(void)
2328 {
2329 InputFile *ifile = NULL;
2330 int64_t ipts_min = INT64_MAX;
2331 int i;
2332
2333 for (i = 0; i < nb_input_streams; i++) {
2334 InputStream *ist = input_streams[i];
2335 int64_t ipts = ist->last_dts;
2336
2337 if (ist->discard || input_files[ist->file_index]->eagain)
2338 continue;
2339 if (!input_files[ist->file_index]->eof_reached) {
2340 if (ipts < ipts_min) {
2341 ipts_min = ipts;
2342 ifile = input_files[ist->file_index];
2343 }
2344 }
2345 }
2346
2347 return ifile;
2348 }
2349
2350 #if HAVE_PTHREADS
2351 static void *input_thread(void *arg)
2352 {
2353 InputFile *f = arg;
2354 int ret = 0;
2355
2356 while (!transcoding_finished && ret >= 0) {
2357 AVPacket pkt;
2358 ret = av_read_frame(f->ctx, &pkt);
2359
2360 if (ret == AVERROR(EAGAIN)) {
2361 av_usleep(10000);
2362 ret = 0;
2363 continue;
2364 } else if (ret < 0)
2365 break;
2366
2367 pthread_mutex_lock(&f->fifo_lock);
2368 while (!av_fifo_space(f->fifo))
2369 pthread_cond_wait(&f->fifo_cond, &f->fifo_lock);
2370
2371 av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL);
2372
2373 pthread_mutex_unlock(&f->fifo_lock);
2374 }
2375
2376 f->finished = 1;
2377 return NULL;
2378 }
2379
2380 static void free_input_threads(void)
2381 {
2382 int i;
2383
2384 if (nb_input_files == 1)
2385 return;
2386
2387 transcoding_finished = 1;
2388
2389 for (i = 0; i < nb_input_files; i++) {
2390 InputFile *f = input_files[i];
2391 AVPacket pkt;
2392
2393 if (!f->fifo || f->joined)
2394 continue;
2395
2396 pthread_mutex_lock(&f->fifo_lock);
2397 while (av_fifo_size(f->fifo)) {
2398 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2399 av_packet_unref(&pkt);
2400 }
2401 pthread_cond_signal(&f->fifo_cond);
2402 pthread_mutex_unlock(&f->fifo_lock);
2403
2404 pthread_join(f->thread, NULL);
2405 f->joined = 1;
2406
2407 while (av_fifo_size(f->fifo)) {
2408 av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL);
2409 av_packet_unref(&pkt);
2410 }
2411 av_fifo_free(f->fifo);
2412 }
2413 }
2414
2415 static int init_input_threads(void)
2416 {
2417 int i, ret;
2418
2419 if (nb_input_files == 1)
2420 return 0;
2421
2422 for (i = 0; i < nb_input_files; i++) {
2423 InputFile *f = input_files[i];
2424
2425 if (!(f->fifo = av_fifo_alloc(8*sizeof(AVPacket))))
2426 return AVERROR(ENOMEM);
2427
2428 pthread_mutex_init(&f->fifo_lock, NULL);
2429 pthread_cond_init (&f->fifo_cond, NULL);
2430
2431 if ((ret = pthread_create(&f->thread, NULL, input_thread, f)))
2432 return AVERROR(ret);
2433 }
2434 return 0;
2435 }
2436
2437 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
2438 {
2439 int ret = 0;
2440
2441 pthread_mutex_lock(&f->fifo_lock);
2442
2443 if (av_fifo_size(f->fifo)) {
2444 av_fifo_generic_read(f->fifo, pkt, sizeof(*pkt), NULL);
2445 pthread_cond_signal(&f->fifo_cond);
2446 } else {
2447 if (f->finished)
2448 ret = AVERROR_EOF;
2449 else
2450 ret = AVERROR(EAGAIN);
2451 }
2452
2453 pthread_mutex_unlock(&f->fifo_lock);
2454
2455 return ret;
2456 }
2457 #endif
2458
2459 static int get_input_packet(InputFile *f, AVPacket *pkt)
2460 {
2461 if (f->rate_emu) {
2462 int i;
2463 for (i = 0; i < f->nb_streams; i++) {
2464 InputStream *ist = input_streams[f->ist_index + i];
2465 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2466 int64_t now = av_gettime_relative() - ist->start;
2467 if (pts > now)
2468 return AVERROR(EAGAIN);
2469 }
2470 }
2471
2472 #if HAVE_PTHREADS
2473 if (nb_input_files > 1)
2474 return get_input_packet_mt(f, pkt);
2475 #endif
2476 return av_read_frame(f->ctx, pkt);
2477 }
2478
2479 static int got_eagain(void)
2480 {
2481 int i;
2482 for (i = 0; i < nb_input_files; i++)
2483 if (input_files[i]->eagain)
2484 return 1;
2485 return 0;
2486 }
2487
2488 static void reset_eagain(void)
2489 {
2490 int i;
2491 for (i = 0; i < nb_input_files; i++)
2492 input_files[i]->eagain = 0;
2493 }
2494
2495 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
2496 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
2497 AVRational time_base)
2498 {
2499 int ret;
2500
2501 if (!*duration) {
2502 *duration = tmp;
2503 return tmp_time_base;
2504 }
2505
2506 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
2507 if (ret < 0) {
2508 *duration = tmp;
2509 return tmp_time_base;
2510 }
2511
2512 return time_base;
2513 }
2514
2515 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
2516 {
2517 InputStream *ist;
2518 AVCodecContext *avctx;
2519 int i, ret, has_audio = 0;
2520 int64_t duration = 0;
2521
2522 ret = av_seek_frame(is, -1, is->start_time, 0);
2523 if (ret < 0)
2524 return ret;
2525
2526 for (i = 0; i < ifile->nb_streams; i++) {
2527 ist = input_streams[ifile->ist_index + i];
2528 avctx = ist->dec_ctx;
2529
2530 // flush decoders
2531 if (ist->decoding_needed) {
2532 process_input_packet(ist, NULL, 1);
2533 avcodec_flush_buffers(avctx);
2534 }
2535
2536 /* duration is the length of the last frame in a stream
2537 * when audio stream is present we don't care about
2538 * last video frame length because it's not defined exactly */
2539 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
2540 has_audio = 1;
2541 }
2542
2543 for (i = 0; i < ifile->nb_streams; i++) {
2544 ist = input_streams[ifile->ist_index + i];
2545 avctx = ist->dec_ctx;
2546
2547 if (has_audio) {
2548 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
2549 AVRational sample_rate = {1, avctx->sample_rate};
2550
2551 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
2552 } else
2553 continue;
2554 } else {
2555 if (ist->framerate.num) {
2556 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
2557 } else if (ist->st->avg_frame_rate.num) {
2558 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
2559 } else duration = 1;
2560 }
2561 if (!ifile->duration)
2562 ifile->time_base = ist->st->time_base;
2563 /* the total duration of the stream, max_pts - min_pts is
2564 * the duration of the stream without the last frame */
2565 duration += ist->max_pts - ist->min_pts;
2566 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
2567 ifile->time_base);
2568 }
2569
2570 if (ifile->loop > 0)
2571 ifile->loop--;
2572
2573 return ret;
2574 }
2575
2576 /*
2577 * Read one packet from an input file and send it for
2578 * - decoding -> lavfi (audio/video)
2579 * - decoding -> encoding -> muxing (subtitles)
2580 * - muxing (streamcopy)
2581 *
2582 * Return
2583 * - 0 -- one packet was read and processed
2584 * - AVERROR(EAGAIN) -- no packets were available for selected file,
2585 * this function should be called again
2586 * - AVERROR_EOF -- this function should not be called again
2587 */
2588 static int process_input(void)
2589 {
2590 InputFile *ifile;
2591 AVFormatContext *is;
2592 InputStream *ist;
2593 AVPacket pkt;
2594 int ret, i, j;
2595 int64_t duration;
2596
2597 /* select the stream that we must read now */
2598 ifile = select_input_file();
2599 /* if none, if is finished */
2600 if (!ifile) {
2601 if (got_eagain()) {
2602 reset_eagain();
2603 av_usleep(10000);
2604 return AVERROR(EAGAIN);
2605 }
2606 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from.\n");
2607 return AVERROR_EOF;
2608 }
2609
2610 is = ifile->ctx;
2611 ret = get_input_packet(ifile, &pkt);
2612
2613 if (ret == AVERROR(EAGAIN)) {
2614 ifile->eagain = 1;
2615 return ret;
2616 }
2617 if (ret < 0 && ifile->loop) {
2618 if ((ret = seek_to_start(ifile, is)) < 0)
2619 return ret;
2620 ret = get_input_packet(ifile, &pkt);
2621 }
2622 if (ret < 0) {
2623 if (ret != AVERROR_EOF) {
2624 print_error(is->filename, ret);
2625 if (exit_on_error)
2626 exit_program(1);
2627 }
2628 ifile->eof_reached = 1;
2629
2630 for (i = 0; i < ifile->nb_streams; i++) {
2631 ist = input_streams[ifile->ist_index + i];
2632 if (ist->decoding_needed)
2633 process_input_packet(ist, NULL, 0);
2634
2635 /* mark all outputs that don't go through lavfi as finished */
2636 for (j = 0; j < nb_output_streams; j++) {
2637 OutputStream *ost = output_streams[j];
2638
2639 if (ost->source_index == ifile->ist_index + i &&
2640 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
2641 finish_output_stream(ost);
2642 }
2643 }
2644
2645 return AVERROR(EAGAIN);
2646 }
2647
2648 reset_eagain();
2649
2650 if (do_pkt_dump) {
2651 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2652 is->streams[pkt.stream_index]);
2653 }
2654 /* the following test is needed in case new streams appear
2655 dynamically in stream : we ignore them */
2656 if (pkt.stream_index >= ifile->nb_streams)
2657 goto discard_packet;
2658
2659 ist = input_streams[ifile->ist_index + pkt.stream_index];
2660
2661 ist->data_size += pkt.size;
2662 ist->nb_packets++;
2663
2664 if (ist->discard)
2665 goto discard_packet;
2666
2667 /* add the stream-global side data to the first packet */
2668 if (ist->nb_packets == 1)
2669 for (i = 0; i < ist->st->nb_side_data; i++) {
2670 AVPacketSideData *src_sd = &ist->st->side_data[i];
2671 uint8_t *dst_data;
2672
2673 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
2674 continue;
2675 if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
2676 continue;
2677
2678 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
2679 if (!dst_data)
2680 exit_program(1);
2681
2682 memcpy(dst_data, src_sd->data, src_sd->size);
2683 }
2684
2685 if (pkt.dts != AV_NOPTS_VALUE)
2686 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2687 if (pkt.pts != AV_NOPTS_VALUE)
2688 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2689
2690 if (pkt.pts != AV_NOPTS_VALUE)
2691 pkt.pts *= ist->ts_scale;
2692 if (pkt.dts != AV_NOPTS_VALUE)
2693 pkt.dts *= ist->ts_scale;
2694
2695 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2696 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
2697 pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
2698 (is->iformat->flags & AVFMT_TS_DISCONT)) {
2699 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2700 int64_t delta = pkt_dts - ist->next_dts;
2701
2702 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2703 ifile->ts_offset -= delta;
2704 av_log(NULL, AV_LOG_DEBUG,
2705 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2706 delta, ifile->ts_offset);
2707 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2708 if (pkt.pts != AV_NOPTS_VALUE)
2709 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2710 }
2711 }
2712 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
2713 if (pkt.pts != AV_NOPTS_VALUE) {
2714 pkt.pts += duration;
2715 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
2716 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
2717 }
2718
2719 if (pkt.dts != AV_NOPTS_VALUE)
2720 pkt.dts += duration;
2721
2722 process_input_packet(ist, &pkt, 0);
2723
2724 discard_packet:
2725 av_packet_unref(&pkt);
2726
2727 return 0;
2728 }
2729
2730 /*
2731 * The following code is the main loop of the file converter
2732 */
2733 static int transcode(void)
2734 {
2735 int ret, i, need_input = 1;
2736 AVFormatContext *os;
2737 OutputStream *ost;
2738 InputStream *ist;
2739 int64_t timer_start;
2740
2741 ret = transcode_init();
2742 if (ret < 0)
2743 goto fail;
2744
2745 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2746 term_init();
2747
2748 timer_start = av_gettime_relative();
2749
2750 #if HAVE_PTHREADS
2751 if ((ret = init_input_threads()) < 0)
2752 goto fail;
2753 #endif
2754
2755 while (!received_sigterm) {
2756 /* check if there's any stream where output is still needed */
2757 if (!need_output()) {
2758 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2759 break;
2760 }
2761
2762 /* read and process one input packet if needed */
2763 if (need_input) {
2764 ret = process_input();
2765 if (ret == AVERROR_EOF)
2766 need_input = 0;
2767 }
2768
2769 ret = poll_filters();
2770 if (ret < 0 && ret != AVERROR_EOF) {
2771 char errbuf[128];
2772 av_strerror(ret, errbuf, sizeof(errbuf));
2773
2774 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2775 break;
2776 }
2777
2778 /* dump report by using the output first video and audio streams */
2779 print_report(0, timer_start);
2780 }
2781 #if HAVE_PTHREADS
2782 free_input_threads();
2783 #endif
2784
2785 /* at the end of stream, we must flush the decoder buffers */
2786 for (i = 0; i < nb_input_streams; i++) {
2787 ist = input_streams[i];
2788 if (!input_files[ist->file_index]->eof_reached) {
2789 process_input_packet(ist, NULL, 0);
2790 }
2791 }
2792 poll_filters();
2793 flush_encoders();
2794
2795 term_exit();
2796
2797 /* write the trailer if needed and close file */
2798 for (i = 0; i < nb_output_files; i++) {
2799 os = output_files[i]->ctx;
2800 if (!output_files[i]->header_written) {
2801 av_log(NULL, AV_LOG_ERROR,
2802 "Nothing was written into output file %d (%s), because "
2803 "at least one of its streams received no packets.\n",
2804 i, os->filename);
2805 continue;
2806 }
2807 av_write_trailer(os);
2808 }
2809
2810 /* dump report by using the first video and audio streams */
2811 print_report(1, timer_start);
2812
2813 /* close each encoder */
2814 for (i = 0; i < nb_output_streams; i++) {
2815 ost = output_streams[i];
2816 if (ost->encoding_needed) {
2817 av_freep(&ost->enc_ctx->stats_in);
2818 }
2819 }
2820
2821 /* close each decoder */
2822 for (i = 0; i < nb_input_streams; i++) {
2823 ist = input_streams[i];
2824 if (ist->decoding_needed) {
2825 avcodec_close(ist->dec_ctx);
2826 if (ist->hwaccel_uninit)
2827 ist->hwaccel_uninit(ist->dec_ctx);
2828 }
2829 }
2830
2831 av_buffer_unref(&hw_device_ctx);
2832 hw_device_free_all();
2833
2834 /* finished ! */
2835 ret = 0;
2836
2837 fail:
2838 #if HAVE_PTHREADS
2839 free_input_threads();
2840 #endif
2841
2842 if (output_streams) {
2843 for (i = 0; i < nb_output_streams; i++) {
2844 ost = output_streams[i];
2845 if (ost) {
2846 if (ost->logfile) {
2847 fclose(ost->logfile);
2848 ost->logfile = NULL;
2849 }
2850 av_free(ost->forced_kf_pts);
2851 av_dict_free(&ost->encoder_opts);
2852 av_dict_free(&ost->resample_opts);
2853 }
2854 }
2855 }
2856 return ret;
2857 }
2858
2859 static int64_t getutime(void)
2860 {
2861 #if HAVE_GETRUSAGE
2862 struct rusage rusage;
2863
2864 getrusage(RUSAGE_SELF, &rusage);
2865 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
2866 #elif HAVE_GETPROCESSTIMES
2867 HANDLE proc;
2868 FILETIME c, e, k, u;
2869 proc = GetCurrentProcess();
2870 GetProcessTimes(proc, &c, &e, &k, &u);
2871 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
2872 #else
2873 return av_gettime_relative();
2874 #endif
2875 }
2876
2877 static int64_t getmaxrss(void)
2878 {
2879 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
2880 struct rusage rusage;
2881 getrusage(RUSAGE_SELF, &rusage);
2882 return (int64_t)rusage.ru_maxrss * 1024;
2883 #elif HAVE_GETPROCESSMEMORYINFO
2884 HANDLE proc;
2885 PROCESS_MEMORY_COUNTERS memcounters;
2886 proc = GetCurrentProcess();
2887 memcounters.cb = sizeof(memcounters);
2888 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
2889 return memcounters.PeakPagefileUsage;
2890 #else
2891 return 0;
2892 #endif
2893 }
2894
2895 int main(int argc, char **argv)
2896 {
2897 int i, ret;
2898 int64_t ti;
2899
2900 register_exit(avconv_cleanup);
2901
2902 av_log_set_flags(AV_LOG_SKIP_REPEATED);
2903 parse_loglevel(argc, argv, options);
2904
2905 avcodec_register_all();
2906 #if CONFIG_AVDEVICE
2907 avdevice_register_all();
2908 #endif
2909 avfilter_register_all();
2910 av_register_all();
2911 avformat_network_init();
2912
2913 show_banner();
2914
2915 /* parse options and open all input/output files */
2916 ret = avconv_parse_options(argc, argv);
2917 if (ret < 0)
2918 exit_program(1);
2919
2920 if (nb_output_files <= 0 && nb_input_files == 0) {
2921 show_usage();
2922 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2923 exit_program(1);
2924 }
2925
2926 /* file converter / grab */
2927 if (nb_output_files <= 0) {
2928 fprintf(stderr, "At least one output file must be specified\n");
2929 exit_program(1);
2930 }
2931
2932 for (i = 0; i < nb_output_files; i++) {
2933 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
2934 want_sdp = 0;
2935 }
2936
2937 ti = getutime();
2938 if (transcode() < 0)
2939 exit_program(1);
2940 ti = getutime() - ti;
2941 if (do_benchmark) {
2942 int maxrss = getmaxrss() / 1024;
2943 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
2944 }
2945
2946 exit_program(0);
2947 return 0;
2948 }