vp9: add superframe merging bitstream filter.
[libav.git] / ffmpeg.c
1 /*
2 * Copyright (c) 2000-2003 Fabrice Bellard
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * multimedia converter based on the FFmpeg libraries
24 */
25
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
44 #include "libswresample/swresample.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/channel_layout.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
61 #include "libavutil/threadmessage.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83
84
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101
102 #include <time.h>
103
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106
107 #include "libavutil/avassert.h"
108
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111
112 static FILE *vstats_file;
113
114 const char *const forced_keyframes_const_names[] = {
115 "n",
116 "n_forced",
117 "prev_forced_n",
118 "prev_forced_t",
119 "t",
120 NULL
121 };
122
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
131
132 static int current_time;
133 AVIOContext *progress_avio = NULL;
134
135 static uint8_t *subtitle_out;
136
137 InputStream **input_streams = NULL;
138 int nb_input_streams = 0;
139 InputFile **input_files = NULL;
140 int nb_input_files = 0;
141
142 OutputStream **output_streams = NULL;
143 int nb_output_streams = 0;
144 OutputFile **output_files = NULL;
145 int nb_output_files = 0;
146
147 FilterGraph **filtergraphs;
148 int nb_filtergraphs;
149
150 #if HAVE_TERMIOS_H
151
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
155 #endif
156
157 #if HAVE_PTHREADS
158 static void free_input_threads(void);
159 #endif
160
161 /* sub2video hack:
162 Convert subtitles to video with alpha to insert them in filter graphs.
163 This is a temporary solution until libavfilter gets real subtitles support.
164 */
165
166 static int sub2video_get_blank_frame(InputStream *ist)
167 {
168 int ret;
169 AVFrame *frame = ist->sub2video.frame;
170
171 av_frame_unref(frame);
172 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
174 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
175 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
176 return ret;
177 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
178 return 0;
179 }
180
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
182 AVSubtitleRect *r)
183 {
184 uint32_t *pal, *dst2;
185 uint8_t *src, *src2;
186 int x, y;
187
188 if (r->type != SUBTITLE_BITMAP) {
189 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
190 return;
191 }
192 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194 r->x, r->y, r->w, r->h, w, h
195 );
196 return;
197 }
198
199 dst += r->y * dst_linesize + r->x * 4;
200 src = r->pict.data[0];
201 pal = (uint32_t *)r->pict.data[1];
202 for (y = 0; y < r->h; y++) {
203 dst2 = (uint32_t *)dst;
204 src2 = src;
205 for (x = 0; x < r->w; x++)
206 *(dst2++) = pal[*(src2++)];
207 dst += dst_linesize;
208 src += r->pict.linesize[0];
209 }
210 }
211
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
213 {
214 AVFrame *frame = ist->sub2video.frame;
215 int i;
216
217 av_assert1(frame->data[0]);
218 ist->sub2video.last_pts = frame->pts = pts;
219 for (i = 0; i < ist->nb_filters; i++)
220 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
221 AV_BUFFERSRC_FLAG_KEEP_REF |
222 AV_BUFFERSRC_FLAG_PUSH);
223 }
224
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
226 {
227 AVFrame *frame = ist->sub2video.frame;
228 int8_t *dst;
229 int dst_linesize;
230 int num_rects, i;
231 int64_t pts, end_pts;
232
233 if (!frame)
234 return;
235 if (sub) {
236 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237 AV_TIME_BASE_Q, ist->st->time_base);
238 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239 AV_TIME_BASE_Q, ist->st->time_base);
240 num_rects = sub->num_rects;
241 } else {
242 pts = ist->sub2video.end_pts;
243 end_pts = INT64_MAX;
244 num_rects = 0;
245 }
246 if (sub2video_get_blank_frame(ist) < 0) {
247 av_log(ist->dec_ctx, AV_LOG_ERROR,
248 "Impossible to get a blank canvas.\n");
249 return;
250 }
251 dst = frame->data [0];
252 dst_linesize = frame->linesize[0];
253 for (i = 0; i < num_rects; i++)
254 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255 sub2video_push_ref(ist, pts);
256 ist->sub2video.end_pts = end_pts;
257 }
258
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
260 {
261 InputFile *infile = input_files[ist->file_index];
262 int i, j, nb_reqs;
263 int64_t pts2;
264
265 /* When a frame is read from a file, examine all sub2video streams in
266 the same file and send the sub2video frame again. Otherwise, decoded
267 video frames could be accumulating in the filter graph while a filter
268 (possibly overlay) is desperately waiting for a subtitle frame. */
269 for (i = 0; i < infile->nb_streams; i++) {
270 InputStream *ist2 = input_streams[infile->ist_index + i];
271 if (!ist2->sub2video.frame)
272 continue;
273 /* subtitles seem to be usually muxed ahead of other streams;
274 if not, subtracting a larger time here is necessary */
275 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276 /* do not send the heartbeat frame if the subtitle is already ahead */
277 if (pts2 <= ist2->sub2video.last_pts)
278 continue;
279 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280 sub2video_update(ist2, NULL);
281 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
283 if (nb_reqs)
284 sub2video_push_ref(ist2, pts2);
285 }
286 }
287
288 static void sub2video_flush(InputStream *ist)
289 {
290 int i;
291
292 if (ist->sub2video.end_pts < INT64_MAX)
293 sub2video_update(ist, NULL);
294 for (i = 0; i < ist->nb_filters; i++)
295 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
296 }
297
298 /* end of sub2video hack */
299
300 static void term_exit_sigsafe(void)
301 {
302 #if HAVE_TERMIOS_H
303 if(restore_tty)
304 tcsetattr (0, TCSANOW, &oldtty);
305 #endif
306 }
307
308 void term_exit(void)
309 {
310 av_log(NULL, AV_LOG_QUIET, "%s", "");
311 term_exit_sigsafe();
312 }
313
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
319
320 static void
321 sigterm_handler(int sig)
322 {
323 received_sigterm = sig;
324 received_nb_signals++;
325 term_exit_sigsafe();
326 if(received_nb_signals > 3) {
327 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328 strlen("Received > 3 system signals, hard exiting\n"));
329
330 exit(123);
331 }
332 }
333
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
336 {
337 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
338
339 switch (fdwCtrlType)
340 {
341 case CTRL_C_EVENT:
342 case CTRL_BREAK_EVENT:
343 sigterm_handler(SIGINT);
344 return TRUE;
345
346 case CTRL_CLOSE_EVENT:
347 case CTRL_LOGOFF_EVENT:
348 case CTRL_SHUTDOWN_EVENT:
349 sigterm_handler(SIGTERM);
350 /* Basically, with these 3 events, when we return from this method the
351 process is hard terminated, so stall as long as we need to
352 to try and let the main thread(s) clean up and gracefully terminate
353 (we have at most 5 seconds, but should be done far before that). */
354 while (!ffmpeg_exited) {
355 Sleep(0);
356 }
357 return TRUE;
358
359 default:
360 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
361 return FALSE;
362 }
363 }
364 #endif
365
366 void term_init(void)
367 {
368 #if HAVE_TERMIOS_H
369 if(!run_as_daemon){
370 struct termios tty;
371 if (tcgetattr (0, &tty) == 0) {
372 oldtty = tty;
373 restore_tty = 1;
374
375 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376 |INLCR|IGNCR|ICRNL|IXON);
377 tty.c_oflag |= OPOST;
378 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379 tty.c_cflag &= ~(CSIZE|PARENB);
380 tty.c_cflag |= CS8;
381 tty.c_cc[VMIN] = 1;
382 tty.c_cc[VTIME] = 0;
383
384 tcsetattr (0, TCSANOW, &tty);
385 }
386 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
387 }
388 #endif
389
390 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
392 #ifdef SIGXCPU
393 signal(SIGXCPU, sigterm_handler);
394 #endif
395 #if HAVE_SETCONSOLECTRLHANDLER
396 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
397 #endif
398 }
399
400 /* read a key without blocking */
401 static int read_key(void)
402 {
403 unsigned char ch;
404 #if HAVE_TERMIOS_H
405 int n = 1;
406 struct timeval tv;
407 fd_set rfds;
408
409 FD_ZERO(&rfds);
410 FD_SET(0, &rfds);
411 tv.tv_sec = 0;
412 tv.tv_usec = 0;
413 n = select(1, &rfds, NULL, NULL, &tv);
414 if (n > 0) {
415 n = read(0, &ch, 1);
416 if (n == 1)
417 return ch;
418
419 return n;
420 }
421 #elif HAVE_KBHIT
422 # if HAVE_PEEKNAMEDPIPE
423 static int is_pipe;
424 static HANDLE input_handle;
425 DWORD dw, nchars;
426 if(!input_handle){
427 input_handle = GetStdHandle(STD_INPUT_HANDLE);
428 is_pipe = !GetConsoleMode(input_handle, &dw);
429 }
430
431 if (is_pipe) {
432 /* When running under a GUI, you will end here. */
433 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434 // input pipe may have been closed by the program that ran ffmpeg
435 return -1;
436 }
437 //Read it
438 if(nchars != 0) {
439 read(0, &ch, 1);
440 return ch;
441 }else{
442 return -1;
443 }
444 }
445 # endif
446 if(kbhit())
447 return(getch());
448 #endif
449 return -1;
450 }
451
452 static int decode_interrupt_cb(void *ctx)
453 {
454 return received_nb_signals > transcode_init_done;
455 }
456
457 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
458
459 static void ffmpeg_cleanup(int ret)
460 {
461 int i, j;
462
463 if (do_benchmark) {
464 int maxrss = getmaxrss() / 1024;
465 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
466 }
467
468 for (i = 0; i < nb_filtergraphs; i++) {
469 FilterGraph *fg = filtergraphs[i];
470 avfilter_graph_free(&fg->graph);
471 for (j = 0; j < fg->nb_inputs; j++) {
472 av_freep(&fg->inputs[j]->name);
473 av_freep(&fg->inputs[j]);
474 }
475 av_freep(&fg->inputs);
476 for (j = 0; j < fg->nb_outputs; j++) {
477 av_freep(&fg->outputs[j]->name);
478 av_freep(&fg->outputs[j]);
479 }
480 av_freep(&fg->outputs);
481 av_freep(&fg->graph_desc);
482
483 av_freep(&filtergraphs[i]);
484 }
485 av_freep(&filtergraphs);
486
487 av_freep(&subtitle_out);
488
489 /* close files */
490 for (i = 0; i < nb_output_files; i++) {
491 OutputFile *of = output_files[i];
492 AVFormatContext *s;
493 if (!of)
494 continue;
495 s = of->ctx;
496 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
497 avio_closep(&s->pb);
498 avformat_free_context(s);
499 av_dict_free(&of->opts);
500
501 av_freep(&output_files[i]);
502 }
503 for (i = 0; i < nb_output_streams; i++) {
504 OutputStream *ost = output_streams[i];
505 AVBitStreamFilterContext *bsfc;
506
507 if (!ost)
508 continue;
509
510 bsfc = ost->bitstream_filters;
511 while (bsfc) {
512 AVBitStreamFilterContext *next = bsfc->next;
513 av_bitstream_filter_close(bsfc);
514 bsfc = next;
515 }
516 ost->bitstream_filters = NULL;
517 av_frame_free(&ost->filtered_frame);
518 av_frame_free(&ost->last_frame);
519
520 av_parser_close(ost->parser);
521
522 av_freep(&ost->forced_keyframes);
523 av_expr_free(ost->forced_keyframes_pexpr);
524 av_freep(&ost->avfilter);
525 av_freep(&ost->logfile_prefix);
526
527 av_freep(&ost->audio_channels_map);
528 ost->audio_channels_mapped = 0;
529
530 av_dict_free(&ost->sws_dict);
531
532 avcodec_free_context(&ost->enc_ctx);
533
534 av_freep(&output_streams[i]);
535 }
536 #if HAVE_PTHREADS
537 free_input_threads();
538 #endif
539 for (i = 0; i < nb_input_files; i++) {
540 avformat_close_input(&input_files[i]->ctx);
541 av_freep(&input_files[i]);
542 }
543 for (i = 0; i < nb_input_streams; i++) {
544 InputStream *ist = input_streams[i];
545
546 av_frame_free(&ist->decoded_frame);
547 av_frame_free(&ist->filter_frame);
548 av_dict_free(&ist->decoder_opts);
549 avsubtitle_free(&ist->prev_sub.subtitle);
550 av_frame_free(&ist->sub2video.frame);
551 av_freep(&ist->filters);
552 av_freep(&ist->hwaccel_device);
553
554 avcodec_free_context(&ist->dec_ctx);
555
556 av_freep(&input_streams[i]);
557 }
558
559 if (vstats_file) {
560 if (fclose(vstats_file))
561 av_log(NULL, AV_LOG_ERROR,
562 "Error closing vstats file, loss of information possible: %s\n",
563 av_err2str(AVERROR(errno)));
564 }
565 av_freep(&vstats_filename);
566
567 av_freep(&input_streams);
568 av_freep(&input_files);
569 av_freep(&output_streams);
570 av_freep(&output_files);
571
572 uninit_opts();
573
574 avformat_network_deinit();
575
576 if (received_sigterm) {
577 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578 (int) received_sigterm);
579 } else if (ret && transcode_init_done) {
580 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
581 }
582 term_exit();
583 ffmpeg_exited = 1;
584 }
585
586 void remove_avoptions(AVDictionary **a, AVDictionary *b)
587 {
588 AVDictionaryEntry *t = NULL;
589
590 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
591 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
592 }
593 }
594
595 void assert_avoptions(AVDictionary *m)
596 {
597 AVDictionaryEntry *t;
598 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
600 exit_program(1);
601 }
602 }
603
604 static void abort_codec_experimental(AVCodec *c, int encoder)
605 {
606 exit_program(1);
607 }
608
609 static void update_benchmark(const char *fmt, ...)
610 {
611 if (do_benchmark_all) {
612 int64_t t = getutime();
613 va_list va;
614 char buf[1024];
615
616 if (fmt) {
617 va_start(va, fmt);
618 vsnprintf(buf, sizeof(buf), fmt, va);
619 va_end(va);
620 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
621 }
622 current_time = t;
623 }
624 }
625
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
627 {
628 int i;
629 for (i = 0; i < nb_output_streams; i++) {
630 OutputStream *ost2 = output_streams[i];
631 ost2->finished |= ost == ost2 ? this_stream : others;
632 }
633 }
634
635 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
636 {
637 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
638 AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
639 int ret;
640
641 if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642 ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643 if (ost->st->codec->extradata) {
644 memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645 ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
646 }
647 }
648
649 if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
650 (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
651 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
652
653 /*
654 * Audio encoders may split the packets -- #frames in != #packets out.
655 * But there is no reordering, so we can limit the number of output packets
656 * by simply dropping them here.
657 * Counting encoded video frames needs to be done separately because of
658 * reordering, see do_video_out()
659 */
660 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661 if (ost->frame_number >= ost->max_frames) {
662 av_packet_unref(pkt);
663 return;
664 }
665 ost->frame_number++;
666 }
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
668 int i;
669 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
670 NULL);
671 ost->quality = sd ? AV_RL32(sd) : -1;
672 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
673
674 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
675 if (sd && i < sd[5])
676 ost->error[i] = AV_RL64(sd + 8 + 8*i);
677 else
678 ost->error[i] = -1;
679 }
680
681 if (ost->frame_rate.num && ost->is_cfr) {
682 if (pkt->duration > 0)
683 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
685 ost->st->time_base);
686 }
687 }
688
689 if (bsfc)
690 av_packet_split_side_data(pkt);
691
692 if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693 print_error("", ret);
694 if (exit_on_error)
695 exit_program(1);
696 }
697 if (pkt->size == 0 && pkt->side_data_elems == 0)
698 return;
699
700 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
701 if (pkt->dts != AV_NOPTS_VALUE &&
702 pkt->pts != AV_NOPTS_VALUE &&
703 pkt->dts > pkt->pts) {
704 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
705 pkt->dts, pkt->pts,
706 ost->file_index, ost->st->index);
707 pkt->pts =
708 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
709 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
710 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
711 }
712 if(
713 (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
714 pkt->dts != AV_NOPTS_VALUE &&
715 !(avctx->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
716 ost->last_mux_dts != AV_NOPTS_VALUE) {
717 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
718 if (pkt->dts < max) {
719 int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
720 av_log(s, loglevel, "Non-monotonous DTS in output stream "
721 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
722 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
723 if (exit_on_error) {
724 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
725 exit_program(1);
726 }
727 av_log(s, loglevel, "changing to %"PRId64". This may result "
728 "in incorrect timestamps in the output file.\n",
729 max);
730 if(pkt->pts >= pkt->dts)
731 pkt->pts = FFMAX(pkt->pts, max);
732 pkt->dts = max;
733 }
734 }
735 }
736 ost->last_mux_dts = pkt->dts;
737
738 ost->data_size += pkt->size;
739 ost->packets_written++;
740
741 pkt->stream_index = ost->index;
742
743 if (debug_ts) {
744 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
745 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
746 av_get_media_type_string(ost->enc_ctx->codec_type),
747 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
748 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
749 pkt->size
750 );
751 }
752
753 ret = av_interleaved_write_frame(s, pkt);
754 if (ret < 0) {
755 print_error("av_interleaved_write_frame()", ret);
756 main_return_code = 1;
757 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
758 }
759 av_packet_unref(pkt);
760 }
761
762 static void close_output_stream(OutputStream *ost)
763 {
764 OutputFile *of = output_files[ost->file_index];
765
766 ost->finished |= ENCODER_FINISHED;
767 if (of->shortest) {
768 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
769 of->recording_time = FFMIN(of->recording_time, end);
770 }
771 }
772
773 static int check_recording_time(OutputStream *ost)
774 {
775 OutputFile *of = output_files[ost->file_index];
776
777 if (of->recording_time != INT64_MAX &&
778 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
779 AV_TIME_BASE_Q) >= 0) {
780 close_output_stream(ost);
781 return 0;
782 }
783 return 1;
784 }
785
786 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
787 AVFrame *frame)
788 {
789 AVCodecContext *enc = ost->enc_ctx;
790 AVPacket pkt;
791 int got_packet = 0;
792
793 av_init_packet(&pkt);
794 pkt.data = NULL;
795 pkt.size = 0;
796
797 if (!check_recording_time(ost))
798 return;
799
800 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
801 frame->pts = ost->sync_opts;
802 ost->sync_opts = frame->pts + frame->nb_samples;
803 ost->samples_encoded += frame->nb_samples;
804 ost->frames_encoded++;
805
806 av_assert0(pkt.size || !pkt.data);
807 update_benchmark(NULL);
808 if (debug_ts) {
809 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
810 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
811 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
812 enc->time_base.num, enc->time_base.den);
813 }
814
815 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
816 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
817 exit_program(1);
818 }
819 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
820
821 if (got_packet) {
822 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
823
824 if (debug_ts) {
825 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
826 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
827 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
828 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
829 }
830
831 write_frame(s, &pkt, ost);
832 }
833 }
834
835 static void do_subtitle_out(AVFormatContext *s,
836 OutputStream *ost,
837 InputStream *ist,
838 AVSubtitle *sub)
839 {
840 int subtitle_out_max_size = 1024 * 1024;
841 int subtitle_out_size, nb, i;
842 AVCodecContext *enc;
843 AVPacket pkt;
844 int64_t pts;
845
846 if (sub->pts == AV_NOPTS_VALUE) {
847 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
848 if (exit_on_error)
849 exit_program(1);
850 return;
851 }
852
853 enc = ost->enc_ctx;
854
855 if (!subtitle_out) {
856 subtitle_out = av_malloc(subtitle_out_max_size);
857 if (!subtitle_out) {
858 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
859 exit_program(1);
860 }
861 }
862
863 /* Note: DVB subtitle need one packet to draw them and one other
864 packet to clear them */
865 /* XXX: signal it in the codec context ? */
866 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
867 nb = 2;
868 else
869 nb = 1;
870
871 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
872 pts = sub->pts;
873 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
874 pts -= output_files[ost->file_index]->start_time;
875 for (i = 0; i < nb; i++) {
876 unsigned save_num_rects = sub->num_rects;
877
878 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
879 if (!check_recording_time(ost))
880 return;
881
882 sub->pts = pts;
883 // start_display_time is required to be 0
884 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
885 sub->end_display_time -= sub->start_display_time;
886 sub->start_display_time = 0;
887 if (i == 1)
888 sub->num_rects = 0;
889
890 ost->frames_encoded++;
891
892 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
893 subtitle_out_max_size, sub);
894 if (i == 1)
895 sub->num_rects = save_num_rects;
896 if (subtitle_out_size < 0) {
897 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
898 exit_program(1);
899 }
900
901 av_init_packet(&pkt);
902 pkt.data = subtitle_out;
903 pkt.size = subtitle_out_size;
904 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
905 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
906 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
907 /* XXX: the pts correction is handled here. Maybe handling
908 it in the codec would be better */
909 if (i == 0)
910 pkt.pts += 90 * sub->start_display_time;
911 else
912 pkt.pts += 90 * sub->end_display_time;
913 }
914 pkt.dts = pkt.pts;
915 write_frame(s, &pkt, ost);
916 }
917 }
918
919 static void do_video_out(AVFormatContext *s,
920 OutputStream *ost,
921 AVFrame *next_picture,
922 double sync_ipts)
923 {
924 int ret, format_video_sync;
925 AVPacket pkt;
926 AVCodecContext *enc = ost->enc_ctx;
927 AVCodecContext *mux_enc = ost->st->codec;
928 int nb_frames, nb0_frames, i;
929 double delta, delta0;
930 double duration = 0;
931 int frame_size = 0;
932 InputStream *ist = NULL;
933 AVFilterContext *filter = ost->filter->filter;
934
935 if (ost->source_index >= 0)
936 ist = input_streams[ost->source_index];
937
938 if (filter->inputs[0]->frame_rate.num > 0 &&
939 filter->inputs[0]->frame_rate.den > 0)
940 duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
941
942 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
943 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
944
945 if (!ost->filters_script &&
946 !ost->filters &&
947 next_picture &&
948 ist &&
949 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
950 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
951 }
952
953 if (!next_picture) {
954 //end, flushing
955 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
956 ost->last_nb0_frames[1],
957 ost->last_nb0_frames[2]);
958 } else {
959 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
960 delta = delta0 + duration;
961
962 /* by default, we output a single frame */
963 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
964 nb_frames = 1;
965
966 format_video_sync = video_sync_method;
967 if (format_video_sync == VSYNC_AUTO) {
968 if(!strcmp(s->oformat->name, "avi")) {
969 format_video_sync = VSYNC_VFR;
970 } else
971 format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
972 if ( ist
973 && format_video_sync == VSYNC_CFR
974 && input_files[ist->file_index]->ctx->nb_streams == 1
975 && input_files[ist->file_index]->input_ts_offset == 0) {
976 format_video_sync = VSYNC_VSCFR;
977 }
978 if (format_video_sync == VSYNC_CFR && copy_ts) {
979 format_video_sync = VSYNC_VSCFR;
980 }
981 }
982 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
983
984 if (delta0 < 0 &&
985 delta > 0 &&
986 format_video_sync != VSYNC_PASSTHROUGH &&
987 format_video_sync != VSYNC_DROP) {
988 if (delta0 < -0.6) {
989 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
990 } else
991 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
992 sync_ipts = ost->sync_opts;
993 duration += delta0;
994 delta0 = 0;
995 }
996
997 switch (format_video_sync) {
998 case VSYNC_VSCFR:
999 if (ost->frame_number == 0 && delta0 >= 0.5) {
1000 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1001 delta = duration;
1002 delta0 = 0;
1003 ost->sync_opts = lrint(sync_ipts);
1004 }
1005 case VSYNC_CFR:
1006 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1007 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1008 nb_frames = 0;
1009 } else if (delta < -1.1)
1010 nb_frames = 0;
1011 else if (delta > 1.1) {
1012 nb_frames = lrintf(delta);
1013 if (delta0 > 1.1)
1014 nb0_frames = lrintf(delta0 - 0.6);
1015 }
1016 break;
1017 case VSYNC_VFR:
1018 if (delta <= -0.6)
1019 nb_frames = 0;
1020 else if (delta > 0.6)
1021 ost->sync_opts = lrint(sync_ipts);
1022 break;
1023 case VSYNC_DROP:
1024 case VSYNC_PASSTHROUGH:
1025 ost->sync_opts = lrint(sync_ipts);
1026 break;
1027 default:
1028 av_assert0(0);
1029 }
1030 }
1031
1032 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1033 nb0_frames = FFMIN(nb0_frames, nb_frames);
1034
1035 memmove(ost->last_nb0_frames + 1,
1036 ost->last_nb0_frames,
1037 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1038 ost->last_nb0_frames[0] = nb0_frames;
1039
1040 if (nb0_frames == 0 && ost->last_dropped) {
1041 nb_frames_drop++;
1042 av_log(NULL, AV_LOG_VERBOSE,
1043 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1044 ost->frame_number, ost->st->index, ost->last_frame->pts);
1045 }
1046 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1047 if (nb_frames > dts_error_threshold * 30) {
1048 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1049 nb_frames_drop++;
1050 return;
1051 }
1052 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1053 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1054 }
1055 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1056
1057 /* duplicates frame if needed */
1058 for (i = 0; i < nb_frames; i++) {
1059 AVFrame *in_picture;
1060 av_init_packet(&pkt);
1061 pkt.data = NULL;
1062 pkt.size = 0;
1063
1064 if (i < nb0_frames && ost->last_frame) {
1065 in_picture = ost->last_frame;
1066 } else
1067 in_picture = next_picture;
1068
1069 if (!in_picture)
1070 return;
1071
1072 in_picture->pts = ost->sync_opts;
1073
1074 #if 1
1075 if (!check_recording_time(ost))
1076 #else
1077 if (ost->frame_number >= ost->max_frames)
1078 #endif
1079 return;
1080
1081 #if FF_API_LAVF_FMT_RAWPICTURE
1082 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1083 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1084 /* raw pictures are written as AVPicture structure to
1085 avoid any copies. We support temporarily the older
1086 method. */
1087 if (in_picture->interlaced_frame)
1088 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1089 else
1090 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1091 pkt.data = (uint8_t *)in_picture;
1092 pkt.size = sizeof(AVPicture);
1093 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1094 pkt.flags |= AV_PKT_FLAG_KEY;
1095
1096 write_frame(s, &pkt, ost);
1097 } else
1098 #endif
1099 {
1100 int got_packet, forced_keyframe = 0;
1101 double pts_time;
1102
1103 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1104 ost->top_field_first >= 0)
1105 in_picture->top_field_first = !!ost->top_field_first;
1106
1107 if (in_picture->interlaced_frame) {
1108 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1109 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1110 else
1111 mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1112 } else
1113 mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1114
1115 in_picture->quality = enc->global_quality;
1116 in_picture->pict_type = 0;
1117
1118 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1119 in_picture->pts * av_q2d(enc->time_base) : NAN;
1120 if (ost->forced_kf_index < ost->forced_kf_count &&
1121 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1122 ost->forced_kf_index++;
1123 forced_keyframe = 1;
1124 } else if (ost->forced_keyframes_pexpr) {
1125 double res;
1126 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1127 res = av_expr_eval(ost->forced_keyframes_pexpr,
1128 ost->forced_keyframes_expr_const_values, NULL);
1129 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1130 ost->forced_keyframes_expr_const_values[FKF_N],
1131 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1132 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1133 ost->forced_keyframes_expr_const_values[FKF_T],
1134 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1135 res);
1136 if (res) {
1137 forced_keyframe = 1;
1138 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1139 ost->forced_keyframes_expr_const_values[FKF_N];
1140 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1141 ost->forced_keyframes_expr_const_values[FKF_T];
1142 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1143 }
1144
1145 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1146 } else if ( ost->forced_keyframes
1147 && !strncmp(ost->forced_keyframes, "source", 6)
1148 && in_picture->key_frame==1) {
1149 forced_keyframe = 1;
1150 }
1151
1152 if (forced_keyframe) {
1153 in_picture->pict_type = AV_PICTURE_TYPE_I;
1154 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1155 }
1156
1157 update_benchmark(NULL);
1158 if (debug_ts) {
1159 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1160 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1161 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1162 enc->time_base.num, enc->time_base.den);
1163 }
1164
1165 ost->frames_encoded++;
1166
1167 ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1168 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1169 if (ret < 0) {
1170 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1171 exit_program(1);
1172 }
1173
1174 if (got_packet) {
1175 if (debug_ts) {
1176 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1177 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1178 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1179 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1180 }
1181
1182 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1183 pkt.pts = ost->sync_opts;
1184
1185 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1186
1187 if (debug_ts) {
1188 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1189 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1190 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1191 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1192 }
1193
1194 frame_size = pkt.size;
1195 write_frame(s, &pkt, ost);
1196
1197 /* if two pass, output log */
1198 if (ost->logfile && enc->stats_out) {
1199 fprintf(ost->logfile, "%s", enc->stats_out);
1200 }
1201 }
1202 }
1203 ost->sync_opts++;
1204 /*
1205 * For video, number of frames in == number of packets out.
1206 * But there may be reordering, so we can't throw away frames on encoder
1207 * flush, we need to limit them here, before they go into encoder.
1208 */
1209 ost->frame_number++;
1210
1211 if (vstats_filename && frame_size)
1212 do_video_stats(ost, frame_size);
1213 }
1214
1215 if (!ost->last_frame)
1216 ost->last_frame = av_frame_alloc();
1217 av_frame_unref(ost->last_frame);
1218 if (next_picture && ost->last_frame)
1219 av_frame_ref(ost->last_frame, next_picture);
1220 else
1221 av_frame_free(&ost->last_frame);
1222 }
1223
1224 static double psnr(double d)
1225 {
1226 return -10.0 * log10(d);
1227 }
1228
1229 static void do_video_stats(OutputStream *ost, int frame_size)
1230 {
1231 AVCodecContext *enc;
1232 int frame_number;
1233 double ti1, bitrate, avg_bitrate;
1234
1235 /* this is executed just the first time do_video_stats is called */
1236 if (!vstats_file) {
1237 vstats_file = fopen(vstats_filename, "w");
1238 if (!vstats_file) {
1239 perror("fopen");
1240 exit_program(1);
1241 }
1242 }
1243
1244 enc = ost->enc_ctx;
1245 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1246 frame_number = ost->st->nb_frames;
1247 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1248 ost->quality / (float)FF_QP2LAMBDA);
1249
1250 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1251 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1252
1253 fprintf(vstats_file,"f_size= %6d ", frame_size);
1254 /* compute pts value */
1255 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1256 if (ti1 < 0.01)
1257 ti1 = 0.01;
1258
1259 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1260 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1261 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1262 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1263 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1264 }
1265 }
1266
1267 static void finish_output_stream(OutputStream *ost)
1268 {
1269 OutputFile *of = output_files[ost->file_index];
1270 int i;
1271
1272 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1273
1274 if (of->shortest) {
1275 for (i = 0; i < of->ctx->nb_streams; i++)
1276 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1277 }
1278 }
1279
1280 /**
1281 * Get and encode new output from any of the filtergraphs, without causing
1282 * activity.
1283 *
1284 * @return 0 for success, <0 for severe errors
1285 */
1286 static int reap_filters(int flush)
1287 {
1288 AVFrame *filtered_frame = NULL;
1289 int i;
1290
1291 /* Reap all buffers present in the buffer sinks */
1292 for (i = 0; i < nb_output_streams; i++) {
1293 OutputStream *ost = output_streams[i];
1294 OutputFile *of = output_files[ost->file_index];
1295 AVFilterContext *filter;
1296 AVCodecContext *enc = ost->enc_ctx;
1297 int ret = 0;
1298
1299 if (!ost->filter)
1300 continue;
1301 filter = ost->filter->filter;
1302
1303 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1304 return AVERROR(ENOMEM);
1305 }
1306 filtered_frame = ost->filtered_frame;
1307
1308 while (1) {
1309 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1310 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1311 AV_BUFFERSINK_FLAG_NO_REQUEST);
1312 if (ret < 0) {
1313 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1314 av_log(NULL, AV_LOG_WARNING,
1315 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1316 } else if (flush && ret == AVERROR_EOF) {
1317 if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1318 do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1319 }
1320 break;
1321 }
1322 if (ost->finished) {
1323 av_frame_unref(filtered_frame);
1324 continue;
1325 }
1326 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1327 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1328 AVRational tb = enc->time_base;
1329 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1330
1331 tb.den <<= extra_bits;
1332 float_pts =
1333 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1334 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1335 float_pts /= 1 << extra_bits;
1336 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1337 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1338
1339 filtered_frame->pts =
1340 av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1341 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1342 }
1343 //if (ost->source_index >= 0)
1344 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1345
1346 switch (filter->inputs[0]->type) {
1347 case AVMEDIA_TYPE_VIDEO:
1348 if (!ost->frame_aspect_ratio.num)
1349 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1350
1351 if (debug_ts) {
1352 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1353 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1354 float_pts,
1355 enc->time_base.num, enc->time_base.den);
1356 }
1357
1358 do_video_out(of->ctx, ost, filtered_frame, float_pts);
1359 break;
1360 case AVMEDIA_TYPE_AUDIO:
1361 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1362 enc->channels != av_frame_get_channels(filtered_frame)) {
1363 av_log(NULL, AV_LOG_ERROR,
1364 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1365 break;
1366 }
1367 do_audio_out(of->ctx, ost, filtered_frame);
1368 break;
1369 default:
1370 // TODO support subtitle filters
1371 av_assert0(0);
1372 }
1373
1374 av_frame_unref(filtered_frame);
1375 }
1376 }
1377
1378 return 0;
1379 }
1380
1381 static void print_final_stats(int64_t total_size)
1382 {
1383 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1384 uint64_t subtitle_size = 0;
1385 uint64_t data_size = 0;
1386 float percent = -1.0;
1387 int i, j;
1388 int pass1_used = 1;
1389
1390 for (i = 0; i < nb_output_streams; i++) {
1391 OutputStream *ost = output_streams[i];
1392 switch (ost->enc_ctx->codec_type) {
1393 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1394 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1395 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1396 default: other_size += ost->data_size; break;
1397 }
1398 extra_size += ost->enc_ctx->extradata_size;
1399 data_size += ost->data_size;
1400 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1401 != AV_CODEC_FLAG_PASS1)
1402 pass1_used = 0;
1403 }
1404
1405 if (data_size && total_size>0 && total_size >= data_size)
1406 percent = 100.0 * (total_size - data_size) / data_size;
1407
1408 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1409 video_size / 1024.0,
1410 audio_size / 1024.0,
1411 subtitle_size / 1024.0,
1412 other_size / 1024.0,
1413 extra_size / 1024.0);
1414 if (percent >= 0.0)
1415 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1416 else
1417 av_log(NULL, AV_LOG_INFO, "unknown");
1418 av_log(NULL, AV_LOG_INFO, "\n");
1419
1420 /* print verbose per-stream stats */
1421 for (i = 0; i < nb_input_files; i++) {
1422 InputFile *f = input_files[i];
1423 uint64_t total_packets = 0, total_size = 0;
1424
1425 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1426 i, f->ctx->filename);
1427
1428 for (j = 0; j < f->nb_streams; j++) {
1429 InputStream *ist = input_streams[f->ist_index + j];
1430 enum AVMediaType type = ist->dec_ctx->codec_type;
1431
1432 total_size += ist->data_size;
1433 total_packets += ist->nb_packets;
1434
1435 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1436 i, j, media_type_string(type));
1437 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1438 ist->nb_packets, ist->data_size);
1439
1440 if (ist->decoding_needed) {
1441 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1442 ist->frames_decoded);
1443 if (type == AVMEDIA_TYPE_AUDIO)
1444 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1445 av_log(NULL, AV_LOG_VERBOSE, "; ");
1446 }
1447
1448 av_log(NULL, AV_LOG_VERBOSE, "\n");
1449 }
1450
1451 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1452 total_packets, total_size);
1453 }
1454
1455 for (i = 0; i < nb_output_files; i++) {
1456 OutputFile *of = output_files[i];
1457 uint64_t total_packets = 0, total_size = 0;
1458
1459 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1460 i, of->ctx->filename);
1461
1462 for (j = 0; j < of->ctx->nb_streams; j++) {
1463 OutputStream *ost = output_streams[of->ost_index + j];
1464 enum AVMediaType type = ost->enc_ctx->codec_type;
1465
1466 total_size += ost->data_size;
1467 total_packets += ost->packets_written;
1468
1469 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1470 i, j, media_type_string(type));
1471 if (ost->encoding_needed) {
1472 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1473 ost->frames_encoded);
1474 if (type == AVMEDIA_TYPE_AUDIO)
1475 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1476 av_log(NULL, AV_LOG_VERBOSE, "; ");
1477 }
1478
1479 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1480 ost->packets_written, ost->data_size);
1481
1482 av_log(NULL, AV_LOG_VERBOSE, "\n");
1483 }
1484
1485 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1486 total_packets, total_size);
1487 }
1488 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1489 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1490 if (pass1_used) {
1491 av_log(NULL, AV_LOG_WARNING, "\n");
1492 } else {
1493 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1494 }
1495 }
1496 }
1497
1498 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1499 {
1500 char buf[1024];
1501 AVBPrint buf_script;
1502 OutputStream *ost;
1503 AVFormatContext *oc;
1504 int64_t total_size;
1505 AVCodecContext *enc;
1506 int frame_number, vid, i;
1507 double bitrate;
1508 double speed;
1509 int64_t pts = INT64_MIN + 1;
1510 static int64_t last_time = -1;
1511 static int qp_histogram[52];
1512 int hours, mins, secs, us;
1513 int ret;
1514 float t;
1515
1516 if (!print_stats && !is_last_report && !progress_avio)
1517 return;
1518
1519 if (!is_last_report) {
1520 if (last_time == -1) {
1521 last_time = cur_time;
1522 return;
1523 }
1524 if ((cur_time - last_time) < 500000)
1525 return;
1526 last_time = cur_time;
1527 }
1528
1529 t = (cur_time-timer_start) / 1000000.0;
1530
1531
1532 oc = output_files[0]->ctx;
1533
1534 total_size = avio_size(oc->pb);
1535 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1536 total_size = avio_tell(oc->pb);
1537
1538 buf[0] = '\0';
1539 vid = 0;
1540 av_bprint_init(&buf_script, 0, 1);
1541 for (i = 0; i < nb_output_streams; i++) {
1542 float q = -1;
1543 ost = output_streams[i];
1544 enc = ost->enc_ctx;
1545 if (!ost->stream_copy)
1546 q = ost->quality / (float) FF_QP2LAMBDA;
1547
1548 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1549 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1550 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1551 ost->file_index, ost->index, q);
1552 }
1553 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1554 float fps;
1555
1556 frame_number = ost->frame_number;
1557 fps = t > 1 ? frame_number / t : 0;
1558 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1559 frame_number, fps < 9.95, fps, q);
1560 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1561 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1562 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1563 ost->file_index, ost->index, q);
1564 if (is_last_report)
1565 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1566 if (qp_hist) {
1567 int j;
1568 int qp = lrintf(q);
1569 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1570 qp_histogram[qp]++;
1571 for (j = 0; j < 32; j++)
1572 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1573 }
1574
1575 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1576 int j;
1577 double error, error_sum = 0;
1578 double scale, scale_sum = 0;
1579 double p;
1580 char type[3] = { 'Y','U','V' };
1581 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1582 for (j = 0; j < 3; j++) {
1583 if (is_last_report) {
1584 error = enc->error[j];
1585 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1586 } else {
1587 error = ost->error[j];
1588 scale = enc->width * enc->height * 255.0 * 255.0;
1589 }
1590 if (j)
1591 scale /= 4;
1592 error_sum += error;
1593 scale_sum += scale;
1594 p = psnr(error / scale);
1595 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1596 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1597 ost->file_index, ost->index, type[j] | 32, p);
1598 }
1599 p = psnr(error_sum / scale_sum);
1600 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1601 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1602 ost->file_index, ost->index, p);
1603 }
1604 vid = 1;
1605 }
1606 /* compute min output value */
1607 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1608 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1609 ost->st->time_base, AV_TIME_BASE_Q));
1610 if (is_last_report)
1611 nb_frames_drop += ost->last_dropped;
1612 }
1613
1614 secs = FFABS(pts) / AV_TIME_BASE;
1615 us = FFABS(pts) % AV_TIME_BASE;
1616 mins = secs / 60;
1617 secs %= 60;
1618 hours = mins / 60;
1619 mins %= 60;
1620
1621 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1622 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1623
1624 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1625 "size=N/A time=");
1626 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1627 "size=%8.0fkB time=", total_size / 1024.0);
1628 if (pts < 0)
1629 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1630 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1631 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1632 (100 * us) / AV_TIME_BASE);
1633
1634 if (bitrate < 0) {
1635 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1636 av_bprintf(&buf_script, "bitrate=N/A\n");
1637 }else{
1638 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1639 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1640 }
1641
1642 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1643 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1644 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1645 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1646 hours, mins, secs, us);
1647
1648 if (nb_frames_dup || nb_frames_drop)
1649 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1650 nb_frames_dup, nb_frames_drop);
1651 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1652 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1653
1654 if (speed < 0) {
1655 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1656 av_bprintf(&buf_script, "speed=N/A\n");
1657 } else {
1658 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1659 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1660 }
1661
1662 if (print_stats || is_last_report) {
1663 const char end = is_last_report ? '\n' : '\r';
1664 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1665 fprintf(stderr, "%s %c", buf, end);
1666 } else
1667 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1668
1669 fflush(stderr);
1670 }
1671
1672 if (progress_avio) {
1673 av_bprintf(&buf_script, "progress=%s\n",
1674 is_last_report ? "end" : "continue");
1675 avio_write(progress_avio, buf_script.str,
1676 FFMIN(buf_script.len, buf_script.size - 1));
1677 avio_flush(progress_avio);
1678 av_bprint_finalize(&buf_script, NULL);
1679 if (is_last_report) {
1680 if ((ret = avio_closep(&progress_avio)) < 0)
1681 av_log(NULL, AV_LOG_ERROR,
1682 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1683 }
1684 }
1685
1686 if (is_last_report)
1687 print_final_stats(total_size);
1688 }
1689
1690 static void flush_encoders(void)
1691 {
1692 int i, ret;
1693
1694 for (i = 0; i < nb_output_streams; i++) {
1695 OutputStream *ost = output_streams[i];
1696 AVCodecContext *enc = ost->enc_ctx;
1697 AVFormatContext *os = output_files[ost->file_index]->ctx;
1698 int stop_encoding = 0;
1699
1700 if (!ost->encoding_needed)
1701 continue;
1702
1703 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1704 continue;
1705 #if FF_API_LAVF_FMT_RAWPICTURE
1706 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1707 continue;
1708 #endif
1709
1710 for (;;) {
1711 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1712 const char *desc;
1713
1714 switch (enc->codec_type) {
1715 case AVMEDIA_TYPE_AUDIO:
1716 encode = avcodec_encode_audio2;
1717 desc = "audio";
1718 break;
1719 case AVMEDIA_TYPE_VIDEO:
1720 encode = avcodec_encode_video2;
1721 desc = "video";
1722 break;
1723 default:
1724 stop_encoding = 1;
1725 }
1726
1727 if (encode) {
1728 AVPacket pkt;
1729 int pkt_size;
1730 int got_packet;
1731 av_init_packet(&pkt);
1732 pkt.data = NULL;
1733 pkt.size = 0;
1734
1735 update_benchmark(NULL);
1736 ret = encode(enc, &pkt, NULL, &got_packet);
1737 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1738 if (ret < 0) {
1739 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1740 desc,
1741 av_err2str(ret));
1742 exit_program(1);
1743 }
1744 if (ost->logfile && enc->stats_out) {
1745 fprintf(ost->logfile, "%s", enc->stats_out);
1746 }
1747 if (!got_packet) {
1748 stop_encoding = 1;
1749 break;
1750 }
1751 if (ost->finished & MUXER_FINISHED) {
1752 av_packet_unref(&pkt);
1753 continue;
1754 }
1755 av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1756 pkt_size = pkt.size;
1757 write_frame(os, &pkt, ost);
1758 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1759 do_video_stats(ost, pkt_size);
1760 }
1761 }
1762
1763 if (stop_encoding)
1764 break;
1765 }
1766 }
1767 }
1768
1769 /*
1770 * Check whether a packet from ist should be written into ost at this time
1771 */
1772 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1773 {
1774 OutputFile *of = output_files[ost->file_index];
1775 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1776
1777 if (ost->source_index != ist_index)
1778 return 0;
1779
1780 if (ost->finished)
1781 return 0;
1782
1783 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1784 return 0;
1785
1786 return 1;
1787 }
1788
1789 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1790 {
1791 OutputFile *of = output_files[ost->file_index];
1792 InputFile *f = input_files [ist->file_index];
1793 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1794 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1795 AVPicture pict;
1796 AVPacket opkt;
1797
1798 av_init_packet(&opkt);
1799
1800 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1801 !ost->copy_initial_nonkeyframes)
1802 return;
1803
1804 if (!ost->frame_number && !ost->copy_prior_start) {
1805 int64_t comp_start = start_time;
1806 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1807 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1808 if (pkt->pts == AV_NOPTS_VALUE ?
1809 ist->pts < comp_start :
1810 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1811 return;
1812 }
1813
1814 if (of->recording_time != INT64_MAX &&
1815 ist->pts >= of->recording_time + start_time) {
1816 close_output_stream(ost);
1817 return;
1818 }
1819
1820 if (f->recording_time != INT64_MAX) {
1821 start_time = f->ctx->start_time;
1822 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1823 start_time += f->start_time;
1824 if (ist->pts >= f->recording_time + start_time) {
1825 close_output_stream(ost);
1826 return;
1827 }
1828 }
1829
1830 /* force the input stream PTS */
1831 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1832 ost->sync_opts++;
1833
1834 if (pkt->pts != AV_NOPTS_VALUE)
1835 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1836 else
1837 opkt.pts = AV_NOPTS_VALUE;
1838
1839 if (pkt->dts == AV_NOPTS_VALUE)
1840 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1841 else
1842 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1843 opkt.dts -= ost_tb_start_time;
1844
1845 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1846 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1847 if(!duration)
1848 duration = ist->dec_ctx->frame_size;
1849 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1850 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1851 ost->st->time_base) - ost_tb_start_time;
1852 }
1853
1854 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1855 opkt.flags = pkt->flags;
1856 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1857 if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1858 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1859 && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1860 && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1861 ) {
1862 int ret = av_parser_change(ost->parser, ost->st->codec,
1863 &opkt.data, &opkt.size,
1864 pkt->data, pkt->size,
1865 pkt->flags & AV_PKT_FLAG_KEY);
1866 if (ret < 0) {
1867 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1868 av_err2str(ret));
1869 exit_program(1);
1870 }
1871 if (ret) {
1872 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1873 if (!opkt.buf)
1874 exit_program(1);
1875 }
1876 } else {
1877 opkt.data = pkt->data;
1878 opkt.size = pkt->size;
1879 }
1880 av_copy_packet_side_data(&opkt, pkt);
1881
1882 #if FF_API_LAVF_FMT_RAWPICTURE
1883 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1884 ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1885 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1886 /* store AVPicture in AVPacket, as expected by the output format */
1887 int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1888 if (ret < 0) {
1889 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1890 av_err2str(ret));
1891 exit_program(1);
1892 }
1893 opkt.data = (uint8_t *)&pict;
1894 opkt.size = sizeof(AVPicture);
1895 opkt.flags |= AV_PKT_FLAG_KEY;
1896 }
1897 #endif
1898
1899 write_frame(of->ctx, &opkt, ost);
1900 }
1901
1902 int guess_input_channel_layout(InputStream *ist)
1903 {
1904 AVCodecContext *dec = ist->dec_ctx;
1905
1906 if (!dec->channel_layout) {
1907 char layout_name[256];
1908
1909 if (dec->channels > ist->guess_layout_max)
1910 return 0;
1911 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1912 if (!dec->channel_layout)
1913 return 0;
1914 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1915 dec->channels, dec->channel_layout);
1916 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1917 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1918 }
1919 return 1;
1920 }
1921
1922 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1923 {
1924 if (*got_output || ret<0)
1925 decode_error_stat[ret<0] ++;
1926
1927 if (ret < 0 && exit_on_error)
1928 exit_program(1);
1929
1930 if (exit_on_error && *got_output && ist) {
1931 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1932 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1933 exit_program(1);
1934 }
1935 }
1936 }
1937
1938 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1939 {
1940 AVFrame *decoded_frame, *f;
1941 AVCodecContext *avctx = ist->dec_ctx;
1942 int i, ret, err = 0, resample_changed;
1943 AVRational decoded_frame_tb;
1944
1945 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1946 return AVERROR(ENOMEM);
1947 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1948 return AVERROR(ENOMEM);
1949 decoded_frame = ist->decoded_frame;
1950
1951 update_benchmark(NULL);
1952 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1953 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1954
1955 if (ret >= 0 && avctx->sample_rate <= 0) {
1956 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1957 ret = AVERROR_INVALIDDATA;
1958 }
1959
1960 check_decode_result(ist, got_output, ret);
1961
1962 if (!*got_output || ret < 0)
1963 return ret;
1964
1965 ist->samples_decoded += decoded_frame->nb_samples;
1966 ist->frames_decoded++;
1967
1968 #if 1
1969 /* increment next_dts to use for the case where the input stream does not
1970 have timestamps or there are multiple frames in the packet */
1971 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1972 avctx->sample_rate;
1973 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1974 avctx->sample_rate;
1975 #endif
1976
1977 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1978 ist->resample_channels != avctx->channels ||
1979 ist->resample_channel_layout != decoded_frame->channel_layout ||
1980 ist->resample_sample_rate != decoded_frame->sample_rate;
1981 if (resample_changed) {
1982 char layout1[64], layout2[64];
1983
1984 if (!guess_input_channel_layout(ist)) {
1985 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1986 "layout for Input Stream #%d.%d\n", ist->file_index,
1987 ist->st->index);
1988 exit_program(1);
1989 }
1990 decoded_frame->channel_layout = avctx->channel_layout;
1991
1992 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1993 ist->resample_channel_layout);
1994 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1995 decoded_frame->channel_layout);
1996
1997 av_log(NULL, AV_LOG_INFO,
1998 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1999 ist->file_index, ist->st->index,
2000 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2001 ist->resample_channels, layout1,
2002 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2003 avctx->channels, layout2);
2004
2005 ist->resample_sample_fmt = decoded_frame->format;
2006 ist->resample_sample_rate = decoded_frame->sample_rate;
2007 ist->resample_channel_layout = decoded_frame->channel_layout;
2008 ist->resample_channels = avctx->channels;
2009
2010 for (i = 0; i < nb_filtergraphs; i++)
2011 if (ist_in_filtergraph(filtergraphs[i], ist)) {
2012 FilterGraph *fg = filtergraphs[i];
2013 if (configure_filtergraph(fg) < 0) {
2014 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2015 exit_program(1);
2016 }
2017 }
2018 }
2019
2020 /* if the decoder provides a pts, use it instead of the last packet pts.
2021 the decoder could be delaying output by a packet or more. */
2022 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2023 ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2024 decoded_frame_tb = avctx->time_base;
2025 } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2026 decoded_frame->pts = decoded_frame->pkt_pts;
2027 decoded_frame_tb = ist->st->time_base;
2028 } else if (pkt->pts != AV_NOPTS_VALUE) {
2029 decoded_frame->pts = pkt->pts;
2030 decoded_frame_tb = ist->st->time_base;
2031 }else {
2032 decoded_frame->pts = ist->dts;
2033 decoded_frame_tb = AV_TIME_BASE_Q;
2034 }
2035 pkt->pts = AV_NOPTS_VALUE;
2036 if (decoded_frame->pts != AV_NOPTS_VALUE)
2037 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2038 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2039 (AVRational){1, avctx->sample_rate});
2040 ist->nb_samples = decoded_frame->nb_samples;
2041 for (i = 0; i < ist->nb_filters; i++) {
2042 if (i < ist->nb_filters - 1) {
2043 f = ist->filter_frame;
2044 err = av_frame_ref(f, decoded_frame);
2045 if (err < 0)
2046 break;
2047 } else
2048 f = decoded_frame;
2049 err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2050 AV_BUFFERSRC_FLAG_PUSH);
2051 if (err == AVERROR_EOF)
2052 err = 0; /* ignore */
2053 if (err < 0)
2054 break;
2055 }
2056 decoded_frame->pts = AV_NOPTS_VALUE;
2057
2058 av_frame_unref(ist->filter_frame);
2059 av_frame_unref(decoded_frame);
2060 return err < 0 ? err : ret;
2061 }
2062
2063 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2064 {
2065 AVFrame *decoded_frame, *f;
2066 int i, ret = 0, err = 0, resample_changed;
2067 int64_t best_effort_timestamp;
2068 AVRational *frame_sample_aspect;
2069
2070 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2071 return AVERROR(ENOMEM);
2072 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2073 return AVERROR(ENOMEM);
2074 decoded_frame = ist->decoded_frame;
2075 pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2076
2077 update_benchmark(NULL);
2078 ret = avcodec_decode_video2(ist->dec_ctx,
2079 decoded_frame, got_output, pkt);
2080 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2081
2082 // The following line may be required in some cases where there is no parser
2083 // or the parser does not has_b_frames correctly
2084 if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2085 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2086 ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2087 } else
2088 av_log(ist->dec_ctx, AV_LOG_WARNING,
2089 "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2090 "If you want to help, upload a sample "
2091 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2092 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2093 ist->dec_ctx->has_b_frames,
2094 ist->st->codec->has_b_frames);
2095 }
2096
2097 check_decode_result(ist, got_output, ret);
2098
2099 if (*got_output && ret >= 0) {
2100 if (ist->dec_ctx->width != decoded_frame->width ||
2101 ist->dec_ctx->height != decoded_frame->height ||
2102 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2103 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2104 decoded_frame->width,
2105 decoded_frame->height,
2106 decoded_frame->format,
2107 ist->dec_ctx->width,
2108 ist->dec_ctx->height,
2109 ist->dec_ctx->pix_fmt);
2110 }
2111 }
2112
2113 if (!*got_output || ret < 0)
2114 return ret;
2115
2116 if(ist->top_field_first>=0)
2117 decoded_frame->top_field_first = ist->top_field_first;
2118
2119 ist->frames_decoded++;
2120
2121 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2122 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2123 if (err < 0)
2124 goto fail;
2125 }
2126 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2127
2128 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2129 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2130 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2131
2132 if (ts != AV_NOPTS_VALUE)
2133 ist->next_pts = ist->pts = ts;
2134 }
2135
2136 if (debug_ts) {
2137 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2138 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2139 ist->st->index, av_ts2str(decoded_frame->pts),
2140 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2141 best_effort_timestamp,
2142 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2143 decoded_frame->key_frame, decoded_frame->pict_type,
2144 ist->st->time_base.num, ist->st->time_base.den);
2145 }
2146
2147 pkt->size = 0;
2148
2149 if (ist->st->sample_aspect_ratio.num)
2150 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2151
2152 resample_changed = ist->resample_width != decoded_frame->width ||
2153 ist->resample_height != decoded_frame->height ||
2154 ist->resample_pix_fmt != decoded_frame->format;
2155 if (resample_changed) {
2156 av_log(NULL, AV_LOG_INFO,
2157 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2158 ist->file_index, ist->st->index,
2159 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2160 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2161
2162 ist->resample_width = decoded_frame->width;
2163 ist->resample_height = decoded_frame->height;
2164 ist->resample_pix_fmt = decoded_frame->format;
2165
2166 for (i = 0; i < nb_filtergraphs; i++) {
2167 if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2168 configure_filtergraph(filtergraphs[i]) < 0) {
2169 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2170 exit_program(1);
2171 }
2172 }
2173 }
2174
2175 frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2176 for (i = 0; i < ist->nb_filters; i++) {
2177 if (!frame_sample_aspect->num)
2178 *frame_sample_aspect = ist->st->sample_aspect_ratio;
2179
2180 if (i < ist->nb_filters - 1) {
2181 f = ist->filter_frame;
2182 err = av_frame_ref(f, decoded_frame);
2183 if (err < 0)
2184 break;
2185 } else
2186 f = decoded_frame;
2187 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
2188 if (ret == AVERROR_EOF) {
2189 ret = 0; /* ignore */
2190 } else if (ret < 0) {
2191 av_log(NULL, AV_LOG_FATAL,
2192 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2193 exit_program(1);
2194 }
2195 }
2196
2197 fail:
2198 av_frame_unref(ist->filter_frame);
2199 av_frame_unref(decoded_frame);
2200 return err < 0 ? err : ret;
2201 }
2202
2203 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2204 {
2205 AVSubtitle subtitle;
2206 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2207 &subtitle, got_output, pkt);
2208
2209 check_decode_result(NULL, got_output, ret);
2210
2211 if (ret < 0 || !*got_output) {
2212 if (!pkt->size)
2213 sub2video_flush(ist);
2214 return ret;
2215 }
2216
2217 if (ist->fix_sub_duration) {
2218 int end = 1;
2219 if (ist->prev_sub.got_output) {
2220 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2221 1000, AV_TIME_BASE);
2222 if (end < ist->prev_sub.subtitle.end_display_time) {
2223 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2224 "Subtitle duration reduced from %d to %d%s\n",
2225 ist->prev_sub.subtitle.end_display_time, end,
2226 end <= 0 ? ", dropping it" : "");
2227 ist->prev_sub.subtitle.end_display_time = end;
2228 }
2229 }
2230 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2231 FFSWAP(int, ret, ist->prev_sub.ret);
2232 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2233 if (end <= 0)
2234 goto out;
2235 }
2236
2237 if (!*got_output)
2238 return ret;
2239
2240 sub2video_update(ist, &subtitle);
2241
2242 if (!subtitle.num_rects)
2243 goto out;
2244
2245 ist->frames_decoded++;
2246
2247 for (i = 0; i < nb_output_streams; i++) {
2248 OutputStream *ost = output_streams[i];
2249
2250 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2251 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2252 continue;
2253
2254 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2255 }
2256
2257 out:
2258 avsubtitle_free(&subtitle);
2259 return ret;
2260 }
2261
2262 static int send_filter_eof(InputStream *ist)
2263 {
2264 int i, ret;
2265 for (i = 0; i < ist->nb_filters; i++) {
2266 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2267 if (ret < 0)
2268 return ret;
2269 }
2270 return 0;
2271 }
2272
2273 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2274 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2275 {
2276 int ret = 0, i;
2277 int got_output = 0;
2278
2279 AVPacket avpkt;
2280 if (!ist->saw_first_ts) {
2281 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2282 ist->pts = 0;
2283 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2284 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2285 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2286 }
2287 ist->saw_first_ts = 1;
2288 }
2289
2290 if (ist->next_dts == AV_NOPTS_VALUE)
2291 ist->next_dts = ist->dts;
2292 if (ist->next_pts == AV_NOPTS_VALUE)
2293 ist->next_pts = ist->pts;
2294
2295 if (!pkt) {
2296 /* EOF handling */
2297 av_init_packet(&avpkt);
2298 avpkt.data = NULL;
2299 avpkt.size = 0;
2300 goto handle_eof;
2301 } else {
2302 avpkt = *pkt;
2303 }
2304
2305 if (pkt->dts != AV_NOPTS_VALUE) {
2306 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2307 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2308 ist->next_pts = ist->pts = ist->dts;
2309 }
2310
2311 // while we have more to decode or while the decoder did output something on EOF
2312 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2313 int duration;
2314 handle_eof:
2315
2316 ist->pts = ist->next_pts;
2317 ist->dts = ist->next_dts;
2318
2319 if (avpkt.size && avpkt.size != pkt->size &&
2320 !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2321 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2322 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2323 ist->showed_multi_packet_warning = 1;
2324 }
2325
2326 switch (ist->dec_ctx->codec_type) {
2327 case AVMEDIA_TYPE_AUDIO:
2328 ret = decode_audio (ist, &avpkt, &got_output);
2329 break;
2330 case AVMEDIA_TYPE_VIDEO:
2331 ret = decode_video (ist, &avpkt, &got_output);
2332 if (avpkt.duration) {
2333 duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2334 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2335 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2336 duration = ((int64_t)AV_TIME_BASE *
2337 ist->dec_ctx->framerate.den * ticks) /
2338 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2339 } else
2340 duration = 0;
2341
2342 if(ist->dts != AV_NOPTS_VALUE && duration) {
2343 ist->next_dts += duration;
2344 }else
2345 ist->next_dts = AV_NOPTS_VALUE;
2346
2347 if (got_output)
2348 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2349 break;
2350 case AVMEDIA_TYPE_SUBTITLE:
2351 ret = transcode_subtitles(ist, &avpkt, &got_output);
2352 break;
2353 default:
2354 return -1;
2355 }
2356
2357 if (ret < 0) {
2358 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2359 ist->file_index, ist->st->index, av_err2str(ret));
2360 if (exit_on_error)
2361 exit_program(1);
2362 break;
2363 }
2364
2365 avpkt.dts=
2366 avpkt.pts= AV_NOPTS_VALUE;
2367
2368 // touch data and size only if not EOF
2369 if (pkt) {
2370 if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
2371 ret = avpkt.size;
2372 avpkt.data += ret;
2373 avpkt.size -= ret;
2374 }
2375 if (!got_output) {
2376 continue;
2377 }
2378 if (got_output && !pkt)
2379 break;
2380 }
2381
2382 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2383 /* except when looping we need to flush but not to send an EOF */
2384 if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2385 int ret = send_filter_eof(ist);
2386 if (ret < 0) {
2387 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2388 exit_program(1);
2389 }
2390 }
2391
2392 /* handle stream copy */
2393 if (!ist->decoding_needed) {
2394 ist->dts = ist->next_dts;
2395 switch (ist->dec_ctx->codec_type) {
2396 case AVMEDIA_TYPE_AUDIO:
2397 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2398 ist->dec_ctx->sample_rate;
2399 break;
2400 case AVMEDIA_TYPE_VIDEO:
2401 if (ist->framerate.num) {
2402 // TODO: Remove work-around for c99-to-c89 issue 7
2403 AVRational time_base_q = AV_TIME_BASE_Q;
2404 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2405 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2406 } else if (pkt->duration) {
2407 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2408 } else if(ist->dec_ctx->framerate.num != 0) {
2409 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2410 ist->next_dts += ((int64_t)AV_TIME_BASE *
2411 ist->dec_ctx->framerate.den * ticks) /
2412 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2413 }
2414 break;
2415 }
2416 ist->pts = ist->dts;
2417 ist->next_pts = ist->next_dts;
2418 }
2419 for (i = 0; pkt && i < nb_output_streams; i++) {
2420 OutputStream *ost = output_streams[i];
2421
2422 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2423 continue;
2424
2425 do_streamcopy(ist, ost, pkt);
2426 }
2427
2428 return got_output;
2429 }
2430
2431 static void print_sdp(void)
2432 {
2433 char sdp[16384];
2434 int i;
2435 int j;
2436 AVIOContext *sdp_pb;
2437 AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2438
2439 if (!avc)
2440 exit_program(1);
2441 for (i = 0, j = 0; i < nb_output_files; i++) {
2442 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2443 avc[j] = output_files[i]->ctx;
2444 j++;
2445 }
2446 }
2447
2448 if (!j)
2449 goto fail;
2450
2451 av_sdp_create(avc, j, sdp, sizeof(sdp));
2452
2453 if (!sdp_filename) {
2454 printf("SDP:\n%s\n", sdp);
2455 fflush(stdout);
2456 } else {
2457 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2458 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2459 } else {
2460 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2461 avio_closep(&sdp_pb);
2462 av_freep(&sdp_filename);
2463 }
2464 }
2465
2466 fail:
2467 av_freep(&avc);
2468 }
2469
2470 static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2471 {
2472 int i;
2473 for (i = 0; hwaccels[i].name; i++)
2474 if (hwaccels[i].pix_fmt == pix_fmt)
2475 return &hwaccels[i];
2476 return NULL;
2477 }
2478
2479 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2480 {
2481 InputStream *ist = s->opaque;
2482 const enum AVPixelFormat *p;
2483 int ret;
2484
2485 for (p = pix_fmts; *p != -1; p++) {
2486 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2487 const HWAccel *hwaccel;
2488
2489 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2490 break;
2491
2492 hwaccel = get_hwaccel(*p);
2493 if (!hwaccel ||
2494 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2495 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2496 continue;
2497
2498 ret = hwaccel->init(s);
2499 if (ret < 0) {
2500 if (ist->hwaccel_id == hwaccel->id) {
2501 av_log(NULL, AV_LOG_FATAL,
2502 "%s hwaccel requested for input stream #%d:%d, "
2503 "but cannot be initialized.\n", hwaccel->name,
2504 ist->file_index, ist->st->index);
2505 return AV_PIX_FMT_NONE;
2506 }
2507 continue;
2508 }
2509 ist->active_hwaccel_id = hwaccel->id;
2510 ist->hwaccel_pix_fmt = *p;
2511 break;
2512 }
2513
2514 return *p;
2515 }
2516
2517 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2518 {
2519 InputStream *ist = s->opaque;
2520
2521 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2522 return ist->hwaccel_get_buffer(s, frame, flags);
2523
2524 return avcodec_default_get_buffer2(s, frame, flags);
2525 }
2526
2527 static int init_input_stream(int ist_index, char *error, int error_len)
2528 {
2529 int ret;
2530 InputStream *ist = input_streams[ist_index];
2531
2532 if (ist->decoding_needed) {
2533 AVCodec *codec = ist->dec;
2534 if (!codec) {
2535 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2536 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2537 return AVERROR(EINVAL);
2538 }
2539
2540 ist->dec_ctx->opaque = ist;
2541 ist->dec_ctx->get_format = get_format;
2542 ist->dec_ctx->get_buffer2 = get_buffer;
2543 ist->dec_ctx->thread_safe_callbacks = 1;
2544
2545 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2546 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2547 (ist->decoding_needed & DECODING_FOR_OST)) {
2548 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2549 if (ist->decoding_needed & DECODING_FOR_FILTER)
2550 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2551 }
2552
2553 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2554
2555 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2556 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2557 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2558 if (ret == AVERROR_EXPERIMENTAL)
2559 abort_codec_experimental(codec, 0);
2560
2561 snprintf(error, error_len,
2562 "Error while opening decoder for input stream "
2563 "#%d:%d : %s",
2564 ist->file_index, ist->st->index, av_err2str(ret));
2565 return ret;
2566 }
2567 assert_avoptions(ist->decoder_opts);
2568 }
2569
2570 ist->next_pts = AV_NOPTS_VALUE;
2571 ist->next_dts = AV_NOPTS_VALUE;
2572
2573 return 0;
2574 }
2575
2576 static InputStream *get_input_stream(OutputStream *ost)
2577 {
2578 if (ost->source_index >= 0)
2579 return input_streams[ost->source_index];
2580 return NULL;
2581 }
2582
2583 static int compare_int64(const void *a, const void *b)
2584 {
2585 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2586 }
2587
2588 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2589 {
2590 int ret = 0;
2591
2592 if (ost->encoding_needed) {
2593 AVCodec *codec = ost->enc;
2594 AVCodecContext *dec = NULL;
2595 InputStream *ist;
2596
2597 if ((ist = get_input_stream(ost)))
2598 dec = ist->dec_ctx;
2599 if (dec && dec->subtitle_header) {
2600 /* ASS code assumes this buffer is null terminated so add extra byte. */
2601 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
2602 if (!ost->enc_ctx->subtitle_header)
2603 return AVERROR(ENOMEM);
2604 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2605 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
2606 }
2607 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2608 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2609 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2610 !codec->defaults &&
2611 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2612 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2613 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2614
2615 if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2616 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(ost->filter->filter->inputs[0]->hw_frames_ctx);
2617 if (!ost->enc_ctx->hw_frames_ctx)
2618 return AVERROR(ENOMEM);
2619 }
2620
2621 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2622 if (ret == AVERROR_EXPERIMENTAL)
2623 abort_codec_experimental(codec, 1);
2624 snprintf(error, error_len,
2625 "Error while opening encoder for output stream #%d:%d - "
2626 "maybe incorrect parameters such as bit_rate, rate, width or height",
2627 ost->file_index, ost->index);
2628 return ret;
2629 }
2630 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2631 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2632 av_buffersink_set_frame_size(ost->filter->filter,
2633 ost->enc_ctx->frame_size);
2634 assert_avoptions(ost->encoder_opts);
2635 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2636 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2637 " It takes bits/s as argument, not kbits/s\n");
2638
2639 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2640 if (ret < 0) {
2641 av_log(NULL, AV_LOG_FATAL,
2642 "Error initializing the output stream codec context.\n");
2643 exit_program(1);
2644 }
2645
2646 if (ost->enc_ctx->nb_coded_side_data) {
2647 int i;
2648
2649 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
2650 sizeof(*ost->st->side_data));
2651 if (!ost->st->side_data)
2652 return AVERROR(ENOMEM);
2653
2654 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2655 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2656 AVPacketSideData *sd_dst = &ost->st->side_data[i];
2657
2658 sd_dst->data = av_malloc(sd_src->size);
2659 if (!sd_dst->data)
2660 return AVERROR(ENOMEM);
2661 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2662 sd_dst->size = sd_src->size;
2663 sd_dst->type = sd_src->type;
2664 ost->st->nb_side_data++;
2665 }
2666 }
2667
2668 // copy timebase while removing common factors
2669 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2670 ost->st->codec->codec= ost->enc_ctx->codec;
2671 } else {
2672 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2673 if (ret < 0) {
2674 av_log(NULL, AV_LOG_FATAL,
2675 "Error setting up codec context options.\n");
2676 return ret;
2677 }
2678 // copy timebase while removing common factors
2679 ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2680 }
2681
2682 return ret;
2683 }
2684
2685 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2686 AVCodecContext *avctx)
2687 {
2688 char *p;
2689 int n = 1, i, size, index = 0;
2690 int64_t t, *pts;
2691
2692 for (p = kf; *p; p++)
2693 if (*p == ',')
2694 n++;
2695 size = n;
2696 pts = av_malloc_array(size, sizeof(*pts));
2697 if (!pts) {
2698 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2699 exit_program(1);
2700 }
2701
2702 p = kf;
2703 for (i = 0; i < n; i++) {
2704 char *next = strchr(p, ',');
2705
2706 if (next)
2707 *next++ = 0;
2708
2709 if (!memcmp(p, "chapters", 8)) {
2710
2711 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2712 int j;
2713
2714 if (avf->nb_chapters > INT_MAX - size ||
2715 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2716 sizeof(*pts)))) {
2717 av_log(NULL, AV_LOG_FATAL,
2718 "Could not allocate forced key frames array.\n");
2719 exit_program(1);
2720 }
2721 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2722 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2723
2724 for (j = 0; j < avf->nb_chapters; j++) {
2725 AVChapter *c = avf->chapters[j];
2726 av_assert1(index < size);
2727 pts[index++] = av_rescale_q(c->start, c->time_base,
2728 avctx->time_base) + t;
2729 }
2730
2731 } else {
2732
2733 t = parse_time_or_die("force_key_frames", p, 1);
2734 av_assert1(index < size);
2735 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2736
2737 }
2738
2739 p = next;
2740 }
2741
2742 av_assert0(index == size);
2743 qsort(pts, size, sizeof(*pts), compare_int64);
2744 ost->forced_kf_count = size;
2745 ost->forced_kf_pts = pts;
2746 }
2747
2748 static void report_new_stream(int input_index, AVPacket *pkt)
2749 {
2750 InputFile *file = input_files[input_index];
2751 AVStream *st = file->ctx->streams[pkt->stream_index];
2752
2753 if (pkt->stream_index < file->nb_streams_warn)
2754 return;
2755 av_log(file->ctx, AV_LOG_WARNING,
2756 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2757 av_get_media_type_string(st->codec->codec_type),
2758 input_index, pkt->stream_index,
2759 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2760 file->nb_streams_warn = pkt->stream_index + 1;
2761 }
2762
2763 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2764 {
2765 AVDictionaryEntry *e;
2766
2767 uint8_t *encoder_string;
2768 int encoder_string_len;
2769 int format_flags = 0;
2770 int codec_flags = 0;
2771
2772 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2773 return;
2774
2775 e = av_dict_get(of->opts, "fflags", NULL, 0);
2776 if (e) {
2777 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2778 if (!o)
2779 return;
2780 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2781 }
2782 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2783 if (e) {
2784 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2785 if (!o)
2786 return;
2787 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2788 }
2789
2790 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2791 encoder_string = av_mallocz(encoder_string_len);
2792 if (!encoder_string)
2793 exit_program(1);
2794
2795 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2796 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2797 else
2798 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2799 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2800 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2801 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2802 }
2803
2804 static int transcode_init(void)
2805 {
2806 int ret = 0, i, j, k;
2807 AVFormatContext *oc;
2808 OutputStream *ost;
2809 InputStream *ist;
2810 char error[1024] = {0};
2811 int want_sdp = 1;
2812
2813 for (i = 0; i < nb_filtergraphs; i++) {
2814 FilterGraph *fg = filtergraphs[i];
2815 for (j = 0; j < fg->nb_outputs; j++) {
2816 OutputFilter *ofilter = fg->outputs[j];
2817 if (!ofilter->ost || ofilter->ost->source_index >= 0)
2818 continue;
2819 if (fg->nb_inputs != 1)
2820 continue;
2821 for (k = nb_input_streams-1; k >= 0 ; k--)
2822 if (fg->inputs[0]->ist == input_streams[k])
2823 break;
2824 ofilter->ost->source_index = k;
2825 }
2826 }
2827
2828 /* init framerate emulation */
2829 for (i = 0; i < nb_input_files; i++) {
2830 InputFile *ifile = input_files[i];
2831 if (ifile->rate_emu)
2832 for (j = 0; j < ifile->nb_streams; j++)
2833 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2834 }
2835
2836 /* for each output stream, we compute the right encoding parameters */
2837 for (i = 0; i < nb_output_streams; i++) {
2838 AVCodecContext *enc_ctx;
2839 AVCodecContext *dec_ctx = NULL;
2840 ost = output_streams[i];
2841 oc = output_files[ost->file_index]->ctx;
2842 ist = get_input_stream(ost);
2843
2844 if (ost->attachment_filename)
2845 continue;
2846
2847 enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2848
2849 if (ist) {
2850 dec_ctx = ist->dec_ctx;
2851
2852 ost->st->disposition = ist->st->disposition;
2853 enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2854 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2855 } else {
2856 for (j=0; j<oc->nb_streams; j++) {
2857 AVStream *st = oc->streams[j];
2858 if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2859 break;
2860 }
2861 if (j == oc->nb_streams)
2862 if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2863 ost->st->disposition = AV_DISPOSITION_DEFAULT;
2864 }
2865
2866 if (ost->stream_copy) {
2867 AVRational sar;
2868 uint64_t extra_size;
2869
2870 av_assert0(ist && !ost->filter);
2871
2872 extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2873
2874 if (extra_size > INT_MAX) {
2875 return AVERROR(EINVAL);
2876 }
2877
2878 /* if stream_copy is selected, no need to decode or encode */
2879 enc_ctx->codec_id = dec_ctx->codec_id;
2880 enc_ctx->codec_type = dec_ctx->codec_type;
2881
2882 if (!enc_ctx->codec_tag) {
2883 unsigned int codec_tag;
2884 if (!oc->oformat->codec_tag ||
2885 av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2886 !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2887 enc_ctx->codec_tag = dec_ctx->codec_tag;
2888 }
2889
2890 enc_ctx->bit_rate = dec_ctx->bit_rate;
2891 enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2892 enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2893 enc_ctx->field_order = dec_ctx->field_order;
2894 if (dec_ctx->extradata_size) {
2895 enc_ctx->extradata = av_mallocz(extra_size);
2896 if (!enc_ctx->extradata) {
2897 return AVERROR(ENOMEM);
2898 }
2899 memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2900 }
2901 enc_ctx->extradata_size= dec_ctx->extradata_size;
2902 enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2903
2904 enc_ctx->time_base = ist->st->time_base;
2905 /*
2906 * Avi is a special case here because it supports variable fps but
2907 * having the fps and timebase differe significantly adds quite some
2908 * overhead
2909 */
2910 if(!strcmp(oc->oformat->name, "avi")) {
2911 if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2912 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2913 && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2914 && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2915 || copy_tb==2){
2916 enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2917 enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2918 enc_ctx->ticks_per_frame = 2;
2919 } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2920 && av_q2d(ist->st->time_base) < 1.0/500
2921 || copy_tb==0){
2922 enc_ctx->time_base = dec_ctx->time_base;
2923 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2924 enc_ctx->time_base.den *= 2;
2925 enc_ctx->ticks_per_frame = 2;
2926 }
2927 } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2928 && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2929 && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2930 && strcmp(oc->oformat->name, "f4v")
2931 ) {
2932 if( copy_tb<0 && dec_ctx->time_base.den
2933 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2934 && av_q2d(ist->st->time_base) < 1.0/500
2935 || copy_tb==0){
2936 enc_ctx->time_base = dec_ctx->time_base;
2937 enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2938 }
2939 }
2940 if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2941 && dec_ctx->time_base.num < dec_ctx->time_base.den
2942 && dec_ctx->time_base.num > 0
2943 && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2944 enc_ctx->time_base = dec_ctx->time_base;
2945 }
2946
2947 if (!ost->frame_rate.num)
2948 ost->frame_rate = ist->framerate;
2949 if(ost->frame_rate.num)
2950 enc_ctx->time_base = av_inv_q(ost->frame_rate);
2951
2952 av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2953 enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2954
2955 if (ist->st->nb_side_data) {
2956 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2957 sizeof(*ist->st->side_data));
2958 if (!ost->st->side_data)
2959 return AVERROR(ENOMEM);
2960
2961 ost->st->nb_side_data = 0;
2962 for (j = 0; j < ist->st->nb_side_data; j++) {
2963 const AVPacketSideData *sd_src = &ist->st->side_data[j];
2964 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2965
2966 if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2967 continue;
2968
2969 sd_dst->data = av_malloc(sd_src->size);
2970 if (!sd_dst->data)
2971 return AVERROR(ENOMEM);
2972 memcpy(sd_dst->data, sd_src->data, sd_src->size);
2973 sd_dst->size = sd_src->size;
2974 sd_dst->type = sd_src->type;
2975 ost->st->nb_side_data++;
2976 }
2977 }
2978
2979 ost->parser = av_parser_init(enc_ctx->codec_id);
2980
2981 switch (enc_ctx->codec_type) {
2982 case AVMEDIA_TYPE_AUDIO:
2983 if (audio_volume != 256) {
2984 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2985 exit_program(1);
2986 }
2987 enc_ctx->channel_layout = dec_ctx->channel_layout;
2988 enc_ctx->sample_rate = dec_ctx->sample_rate;
2989 enc_ctx->channels = dec_ctx->channels;
2990 enc_ctx->frame_size = dec_ctx->frame_size;
2991 enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2992 enc_ctx->block_align = dec_ctx->block_align;
2993 enc_ctx->initial_padding = dec_ctx->delay;
2994 enc_ctx->profile = dec_ctx->profile;
2995 #if FF_API_AUDIOENC_DELAY
2996 enc_ctx->delay = dec_ctx->delay;
2997 #endif
2998 if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2999 enc_ctx->block_align= 0;
3000 if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
3001 enc_ctx->block_align= 0;
3002 break;
3003 case AVMEDIA_TYPE_VIDEO:
3004 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3005 enc_ctx->width = dec_ctx->width;
3006 enc_ctx->height = dec_ctx->height;
3007 enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3008 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3009 sar =
3010 av_mul_q(ost->frame_aspect_ratio,
3011 (AVRational){ enc_ctx->height, enc_ctx->width });
3012 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3013 "with stream copy may produce invalid files\n");
3014 }
3015 else if (ist->st->sample_aspect_ratio.num)
3016 sar = ist->st->sample_aspect_ratio;
3017 else
3018 sar = dec_ctx->sample_aspect_ratio;
3019 ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3020 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3021 ost->st->r_frame_rate = ist->st->r_frame_rate;
3022 break;
3023 case AVMEDIA_TYPE_SUBTITLE:
3024 enc_ctx->width = dec_ctx->width;
3025 enc_ctx->height = dec_ctx->height;
3026 break;
3027 case AVMEDIA_TYPE_UNKNOWN:
3028 case AVMEDIA_TYPE_DATA:
3029 case AVMEDIA_TYPE_ATTACHMENT:
3030 break;
3031 default:
3032 abort();
3033 }
3034 } else {
3035 if (!ost->enc)
3036 ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3037 if (!ost->enc) {
3038 /* should only happen when a default codec is not present. */
3039 snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3040 avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3041 ret = AVERROR(EINVAL);
3042 goto dump_format;
3043 }
3044
3045 set_encoder_id(output_files[ost->file_index], ost);
3046
3047 #if CONFIG_LIBMFX
3048 if (qsv_transcode_init(ost))
3049 exit_program(1);
3050 #endif
3051
3052 if (!ost->filter &&
3053 (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3054 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3055 FilterGraph *fg;
3056 fg = init_simple_filtergraph(ist, ost);
3057 if (configure_filtergraph(fg)) {
3058 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3059 exit_program(1);
3060 }
3061 }
3062
3063 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3064 if (!ost->frame_rate.num)
3065 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3066 if (ist && !ost->frame_rate.num)
3067 ost->frame_rate = ist->framerate;
3068 if (ist && !ost->frame_rate.num)
3069 ost->frame_rate = ist->st->r_frame_rate;
3070 if (ist && !ost->frame_rate.num) {
3071 ost->frame_rate = (AVRational){25, 1};
3072 av_log(NULL, AV_LOG_WARNING,
3073 "No information "
3074 "about the input framerate is available. Falling "
3075 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3076 "if you want a different framerate.\n",
3077 ost->file_index, ost->index);
3078 }
3079 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3080 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3081 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3082 ost->frame_rate = ost->enc->supported_framerates[idx];
3083 }
3084 // reduce frame rate for mpeg4 to be within the spec limits
3085 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3086 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3087 ost->frame_rate.num, ost->frame_rate.den, 65535);
3088 }
3089 }
3090
3091 switch (enc_ctx->codec_type) {
3092 case AVMEDIA_TYPE_AUDIO:
3093 enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3094 enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3095 enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3096 enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3097 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3098 break;
3099 case AVMEDIA_TYPE_VIDEO:
3100 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3101 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3102 enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3103 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3104 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3105 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3106 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3107 }
3108 for (j = 0; j < ost->forced_kf_count; j++)
3109 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3110 AV_TIME_BASE_Q,
3111 enc_ctx->time_base);
3112
3113 enc_ctx->width = ost->filter->filter->inputs[0]->w;
3114 enc_ctx->height = ost->filter->filter->inputs[0]->h;