h264_metadata: Add option to delete filler data
[libav.git] / libavfilter / vf_framepack.c
1 /*
2 * Copyright (c) 2013 Vittorio Giovara
3 *
4 * This file is part of Libav.
5 *
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * Generate a frame packed video, by combining two views in a single surface.
24 */
25
26 #include <string.h>
27
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/rational.h"
33 #include "libavutil/stereo3d.h"
34
35 #include "avfilter.h"
36 #include "formats.h"
37 #include "internal.h"
38 #include "video.h"
39
40 #define LEFT 0
41 #define RIGHT 1
42
43 typedef struct FramepackContext {
44 const AVClass *class;
45
46 const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
47
48 enum AVStereo3DType format; ///< frame pack type output
49
50 AVFrame *input_views[2]; ///< input frames
51
52 int64_t double_pts; ///< new pts for frameseq mode
53 } FramepackContext;
54
55 static const enum AVPixelFormat formats_supported[] = {
56 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
57 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVJ420P,
58 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
59 AV_PIX_FMT_NONE
60 };
61
62 static int query_formats(AVFilterContext *ctx)
63 {
64 // this will ensure that formats are the same on all pads
65 ff_set_common_formats(ctx, ff_make_format_list(formats_supported));
66 return 0;
67 }
68
69 static av_cold void framepack_uninit(AVFilterContext *ctx)
70 {
71 FramepackContext *s = ctx->priv;
72
73 // clean any leftover frame
74 av_frame_free(&s->input_views[LEFT]);
75 av_frame_free(&s->input_views[RIGHT]);
76 }
77
78 static int config_output(AVFilterLink *outlink)
79 {
80 AVFilterContext *ctx = outlink->src;
81 FramepackContext *s = outlink->src->priv;
82
83 int width = ctx->inputs[LEFT]->w;
84 int height = ctx->inputs[LEFT]->h;
85 AVRational time_base = ctx->inputs[LEFT]->time_base;
86 AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
87
88 // check size and fps match on the other input
89 if (width != ctx->inputs[RIGHT]->w ||
90 height != ctx->inputs[RIGHT]->h) {
91 av_log(ctx, AV_LOG_ERROR,
92 "Left and right sizes differ (%dx%d vs %dx%d).\n",
93 width, height,
94 ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
95 return AVERROR_INVALIDDATA;
96 } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
97 av_log(ctx, AV_LOG_ERROR,
98 "Left and right time bases differ (%d/%d vs %d/%d).\n",
99 time_base.num, time_base.den,
100 ctx->inputs[RIGHT]->time_base.num,
101 ctx->inputs[RIGHT]->time_base.den);
102 return AVERROR_INVALIDDATA;
103 } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
104 av_log(ctx, AV_LOG_ERROR,
105 "Left and right framerates differ (%d/%d vs %d/%d).\n",
106 frame_rate.num, frame_rate.den,
107 ctx->inputs[RIGHT]->frame_rate.num,
108 ctx->inputs[RIGHT]->frame_rate.den);
109 return AVERROR_INVALIDDATA;
110 }
111
112 s->pix_desc = av_pix_fmt_desc_get(outlink->format);
113 if (!s->pix_desc)
114 return AVERROR_BUG;
115
116 // modify output properties as needed
117 switch (s->format) {
118 case AV_STEREO3D_FRAMESEQUENCE:
119 time_base.den *= 2;
120 frame_rate.num *= 2;
121
122 s->double_pts = AV_NOPTS_VALUE;
123 break;
124 case AV_STEREO3D_COLUMNS:
125 case AV_STEREO3D_SIDEBYSIDE:
126 width *= 2;
127 break;
128 case AV_STEREO3D_LINES:
129 case AV_STEREO3D_TOPBOTTOM:
130 height *= 2;
131 break;
132 default:
133 av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
134 return AVERROR_INVALIDDATA;
135 }
136
137 outlink->w = width;
138 outlink->h = height;
139 outlink->time_base = time_base;
140 outlink->frame_rate = frame_rate;
141
142 return 0;
143 }
144
145 static void horizontal_frame_pack(AVFilterLink *outlink,
146 AVFrame *out,
147 int interleaved)
148 {
149 AVFilterContext *ctx = outlink->src;
150 FramepackContext *s = ctx->priv;
151 int i, plane;
152
153 if (interleaved) {
154 const uint8_t *leftp = s->input_views[LEFT]->data[0];
155 const uint8_t *rightp = s->input_views[RIGHT]->data[0];
156 uint8_t *dstp = out->data[0];
157 int length = out->width / 2;
158 int lines = out->height;
159
160 for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
161 if (plane == 1 || plane == 2) {
162 length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
163 lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
164 }
165 for (i = 0; i < lines; i++) {
166 int j;
167 leftp = s->input_views[LEFT]->data[plane] +
168 s->input_views[LEFT]->linesize[plane] * i;
169 rightp = s->input_views[RIGHT]->data[plane] +
170 s->input_views[RIGHT]->linesize[plane] * i;
171 dstp = out->data[plane] + out->linesize[plane] * i;
172 for (j = 0; j < length; j++) {
173 // interpolate chroma as necessary
174 if ((s->pix_desc->log2_chroma_w ||
175 s->pix_desc->log2_chroma_h) &&
176 (plane == 1 || plane == 2)) {
177 *dstp++ = (*leftp + *rightp) / 2;
178 *dstp++ = (*leftp + *rightp) / 2;
179 } else {
180 *dstp++ = *leftp;
181 *dstp++ = *rightp;
182 }
183 leftp += 1;
184 rightp += 1;
185 }
186 }
187 }
188 } else {
189 for (i = 0; i < 2; i++) {
190 const uint8_t *src[4];
191 uint8_t *dst[4];
192 int sub_w = s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
193
194 src[0] = s->input_views[i]->data[0];
195 src[1] = s->input_views[i]->data[1];
196 src[2] = s->input_views[i]->data[2];
197
198 dst[0] = out->data[0] + i * s->input_views[i]->width;
199 dst[1] = out->data[1] + i * sub_w;
200 dst[2] = out->data[2] + i * sub_w;
201
202 av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
203 s->input_views[i]->format,
204 s->input_views[i]->width,
205 s->input_views[i]->height);
206 }
207 }
208 }
209
210 static void vertical_frame_pack(AVFilterLink *outlink,
211 AVFrame *out,
212 int interleaved)
213 {
214 AVFilterContext *ctx = outlink->src;
215 FramepackContext *s = ctx->priv;
216 int i;
217
218 for (i = 0; i < 2; i++) {
219 const uint8_t *src[4];
220 uint8_t *dst[4];
221 int linesizes[4];
222 int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
223
224 src[0] = s->input_views[i]->data[0];
225 src[1] = s->input_views[i]->data[1];
226 src[2] = s->input_views[i]->data[2];
227
228 dst[0] = out->data[0] + i * out->linesize[0] *
229 (interleaved + s->input_views[i]->height * (1 - interleaved));
230 dst[1] = out->data[1] + i * out->linesize[1] *
231 (interleaved + sub_h * (1 - interleaved));
232 dst[2] = out->data[2] + i * out->linesize[2] *
233 (interleaved + sub_h * (1 - interleaved));
234
235 linesizes[0] = out->linesize[0] +
236 interleaved * out->linesize[0];
237 linesizes[1] = out->linesize[1] +
238 interleaved * out->linesize[1];
239 linesizes[2] = out->linesize[2] +
240 interleaved * out->linesize[2];
241
242 av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
243 s->input_views[i]->format,
244 s->input_views[i]->width,
245 s->input_views[i]->height);
246 }
247 }
248
249 static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
250 AVFrame *dst)
251 {
252 AVFilterContext *ctx = outlink->src;
253 FramepackContext *s = ctx->priv;
254 switch (s->format) {
255 case AV_STEREO3D_SIDEBYSIDE:
256 horizontal_frame_pack(outlink, dst, 0);
257 break;
258 case AV_STEREO3D_COLUMNS:
259 horizontal_frame_pack(outlink, dst, 1);
260 break;
261 case AV_STEREO3D_TOPBOTTOM:
262 vertical_frame_pack(outlink, dst, 0);
263 break;
264 case AV_STEREO3D_LINES:
265 vertical_frame_pack(outlink, dst, 1);
266 break;
267 }
268 }
269
270 static int filter_frame_left(AVFilterLink *inlink, AVFrame *frame)
271 {
272 FramepackContext *s = inlink->dst->priv;
273 s->input_views[LEFT] = frame;
274 return 0;
275 }
276
277 static int filter_frame_right(AVFilterLink *inlink, AVFrame *frame)
278 {
279 FramepackContext *s = inlink->dst->priv;
280 s->input_views[RIGHT] = frame;
281 return 0;
282 }
283
284 static int request_frame(AVFilterLink *outlink)
285 {
286 AVFilterContext *ctx = outlink->src;
287 FramepackContext *s = ctx->priv;
288 AVStereo3D *stereo;
289 int ret, i;
290
291 /* get a frame on the either input, stop as soon as a video ends */
292 for (i = 0; i < 2; i++) {
293 if (!s->input_views[i]) {
294 ret = ff_request_frame(ctx->inputs[i]);
295 if (ret < 0)
296 return ret;
297 }
298 }
299
300 if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
301 if (s->double_pts == AV_NOPTS_VALUE)
302 s->double_pts = s->input_views[LEFT]->pts;
303
304 for (i = 0; i < 2; i++) {
305 // set correct timestamps
306 s->input_views[i]->pts = s->double_pts++;
307
308 // set stereo3d side data
309 stereo = av_stereo3d_create_side_data(s->input_views[i]);
310 if (!stereo)
311 return AVERROR(ENOMEM);
312 stereo->type = s->format;
313 stereo->view = i == LEFT ? AV_STEREO3D_VIEW_LEFT
314 : AV_STEREO3D_VIEW_RIGHT;
315
316 // filter the frame and immediately relinquish its pointer
317 ret = ff_filter_frame(outlink, s->input_views[i]);
318 s->input_views[i] = NULL;
319 if (ret < 0)
320 return ret;
321 }
322 return ret;
323 } else {
324 AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
325 if (!dst)
326 return AVERROR(ENOMEM);
327
328 spatial_frame_pack(outlink, dst);
329
330 // get any property from the original frame
331 ret = av_frame_copy_props(dst, s->input_views[LEFT]);
332 if (ret < 0) {
333 av_frame_free(&dst);
334 return ret;
335 }
336
337 for (i = 0; i < 2; i++)
338 av_frame_free(&s->input_views[i]);
339
340 // set stereo3d side data
341 stereo = av_stereo3d_create_side_data(dst);
342 if (!stereo) {
343 av_frame_free(&dst);
344 return AVERROR(ENOMEM);
345 }
346 stereo->type = s->format;
347
348 return ff_filter_frame(outlink, dst);
349 }
350 }
351
352 #define OFFSET(x) offsetof(FramepackContext, x)
353 #define V AV_OPT_FLAG_VIDEO_PARAM
354 static const AVOption options[] = {
355 { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
356 { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = V, .unit = "format" },
357 { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
358 { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
359 { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
360 { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
361 { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
362 { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
363 { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
364 { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
365 { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
366 { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
367 { NULL },
368 };
369
370 static const AVClass framepack_class = {
371 .class_name = "framepack",
372 .item_name = av_default_item_name,
373 .option = options,
374 .version = LIBAVUTIL_VERSION_INT,
375 };
376
377 static const AVFilterPad framepack_inputs[] = {
378 {
379 .name = "left",
380 .type = AVMEDIA_TYPE_VIDEO,
381 .filter_frame = filter_frame_left,
382 .needs_fifo = 1,
383 },
384 {
385 .name = "right",
386 .type = AVMEDIA_TYPE_VIDEO,
387 .filter_frame = filter_frame_right,
388 .needs_fifo = 1,
389 },
390 { NULL }
391 };
392
393 static const AVFilterPad framepack_outputs[] = {
394 {
395 .name = "packed",
396 .type = AVMEDIA_TYPE_VIDEO,
397 .config_props = config_output,
398 .request_frame = request_frame,
399 },
400 { NULL }
401 };
402
403 AVFilter ff_vf_framepack = {
404 .name = "framepack",
405 .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
406 .priv_size = sizeof(FramepackContext),
407 .priv_class = &framepack_class,
408 .query_formats = query_formats,
409 .inputs = framepack_inputs,
410 .outputs = framepack_outputs,
411 .uninit = framepack_uninit,
412 };