Commit | Line | Data |
---|---|---|
98114d70 MT |
1 | /* |
2 | * This file is part of Libav. | |
3 | * | |
4 | * Libav is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU Lesser General Public | |
6 | * License as published by the Free Software Foundation; either | |
7 | * version 2.1 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Libav is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * Lesser General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU Lesser General Public | |
15 | * License along with Libav; if not, write to the Free Software | |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
17 | */ | |
18 | ||
19 | #include <string.h> | |
20 | ||
21 | #include <va/va.h> | |
22 | #include <va/va_vpp.h> | |
23 | ||
24 | #include "libavutil/avassert.h" | |
25 | #include "libavutil/hwcontext.h" | |
26 | #include "libavutil/hwcontext_vaapi.h" | |
27 | #include "libavutil/mem.h" | |
28 | #include "libavutil/opt.h" | |
29 | #include "libavutil/pixdesc.h" | |
30 | ||
31 | #include "avfilter.h" | |
32 | #include "formats.h" | |
33 | #include "internal.h" | |
7e2561fa | 34 | #include "video.h" |
98114d70 MT |
35 | |
36 | typedef struct ScaleVAAPIContext { | |
37 | const AVClass *class; | |
38 | ||
39 | AVVAAPIDeviceContext *hwctx; | |
40 | AVBufferRef *device_ref; | |
41 | ||
42 | int valid_ids; | |
43 | VAConfigID va_config; | |
44 | VAContextID va_context; | |
45 | ||
46 | AVBufferRef *input_frames_ref; | |
47 | AVHWFramesContext *input_frames; | |
48 | ||
49 | AVBufferRef *output_frames_ref; | |
50 | AVHWFramesContext *output_frames; | |
51 | ||
52 | char *output_format_string; | |
53 | enum AVPixelFormat output_format; | |
54 | int output_width; | |
55 | int output_height; | |
56 | ||
57 | } ScaleVAAPIContext; | |
58 | ||
59 | ||
60 | static int scale_vaapi_query_formats(AVFilterContext *avctx) | |
61 | { | |
62 | enum AVPixelFormat pix_fmts[] = { | |
63 | AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE, | |
64 | }; | |
65 | ||
66 | ff_formats_ref(ff_make_format_list(pix_fmts), | |
67 | &avctx->inputs[0]->out_formats); | |
68 | ff_formats_ref(ff_make_format_list(pix_fmts), | |
69 | &avctx->outputs[0]->in_formats); | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
74 | static int scale_vaapi_pipeline_uninit(ScaleVAAPIContext *ctx) | |
75 | { | |
76 | if (ctx->va_context != VA_INVALID_ID) { | |
77 | vaDestroyContext(ctx->hwctx->display, ctx->va_context); | |
78 | ctx->va_context = VA_INVALID_ID; | |
79 | } | |
80 | ||
81 | if (ctx->va_config != VA_INVALID_ID) { | |
82 | vaDestroyConfig(ctx->hwctx->display, ctx->va_config); | |
83 | ctx->va_config = VA_INVALID_ID; | |
84 | } | |
85 | ||
86 | av_buffer_unref(&ctx->output_frames_ref); | |
87 | av_buffer_unref(&ctx->device_ref); | |
88 | ctx->hwctx = 0; | |
89 | ||
90 | return 0; | |
91 | } | |
92 | ||
93 | static int scale_vaapi_config_input(AVFilterLink *inlink) | |
94 | { | |
95 | AVFilterContext *avctx = inlink->dst; | |
96 | ScaleVAAPIContext *ctx = avctx->priv; | |
97 | ||
98 | scale_vaapi_pipeline_uninit(ctx); | |
99 | ||
100 | if (!inlink->hw_frames_ctx) { | |
101 | av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is " | |
102 | "required to associate the processing device.\n"); | |
103 | return AVERROR(EINVAL); | |
104 | } | |
105 | ||
106 | ctx->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx); | |
107 | ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data; | |
108 | ||
109 | return 0; | |
110 | } | |
111 | ||
112 | static int scale_vaapi_config_output(AVFilterLink *outlink) | |
113 | { | |
114 | AVFilterContext *avctx = outlink->src; | |
115 | ScaleVAAPIContext *ctx = avctx->priv; | |
116 | AVVAAPIHWConfig *hwconfig = NULL; | |
117 | AVHWFramesConstraints *constraints = NULL; | |
118 | AVVAAPIFramesContext *va_frames; | |
119 | VAStatus vas; | |
120 | int err, i; | |
121 | ||
122 | scale_vaapi_pipeline_uninit(ctx); | |
123 | ||
124 | ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref); | |
125 | ctx->hwctx = ((AVHWDeviceContext*)ctx->device_ref->data)->hwctx; | |
126 | ||
127 | av_assert0(ctx->va_config == VA_INVALID_ID); | |
128 | vas = vaCreateConfig(ctx->hwctx->display, VAProfileNone, | |
129 | VAEntrypointVideoProc, 0, 0, &ctx->va_config); | |
130 | if (vas != VA_STATUS_SUCCESS) { | |
131 | av_log(ctx, AV_LOG_ERROR, "Failed to create processing pipeline " | |
132 | "config: %d (%s).\n", vas, vaErrorStr(vas)); | |
133 | err = AVERROR(EIO); | |
134 | goto fail; | |
135 | } | |
136 | ||
137 | hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref); | |
138 | if (!hwconfig) { | |
139 | err = AVERROR(ENOMEM); | |
140 | goto fail; | |
141 | } | |
142 | hwconfig->config_id = ctx->va_config; | |
143 | ||
144 | constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref, | |
145 | hwconfig); | |
146 | if (!constraints) { | |
147 | err = AVERROR(ENOMEM); | |
148 | goto fail; | |
149 | } | |
150 | ||
151 | if (ctx->output_format == AV_PIX_FMT_NONE) | |
152 | ctx->output_format = ctx->input_frames->sw_format; | |
153 | if (constraints->valid_sw_formats) { | |
154 | for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { | |
155 | if (ctx->output_format == constraints->valid_sw_formats[i]) | |
156 | break; | |
157 | } | |
158 | if (constraints->valid_sw_formats[i] == AV_PIX_FMT_NONE) { | |
159 | av_log(ctx, AV_LOG_ERROR, "Hardware does not support output " | |
160 | "format %s.\n", av_get_pix_fmt_name(ctx->output_format)); | |
161 | err = AVERROR(EINVAL); | |
162 | goto fail; | |
163 | } | |
164 | } | |
165 | ||
166 | if (ctx->output_width < constraints->min_width || | |
167 | ctx->output_height < constraints->min_height || | |
168 | ctx->output_width > constraints->max_width || | |
169 | ctx->output_height > constraints->max_height) { | |
170 | av_log(ctx, AV_LOG_ERROR, "Hardware does not support scaling to " | |
171 | "size %dx%d (constraints: width %d-%d height %d-%d).\n", | |
172 | ctx->output_width, ctx->output_height, | |
173 | constraints->min_width, constraints->max_width, | |
174 | constraints->min_height, constraints->max_height); | |
175 | err = AVERROR(EINVAL); | |
176 | goto fail; | |
177 | } | |
178 | ||
179 | ctx->output_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref); | |
180 | if (!ctx->output_frames_ref) { | |
181 | av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context " | |
182 | "for output.\n"); | |
183 | err = AVERROR(ENOMEM); | |
184 | goto fail; | |
185 | } | |
186 | ||
187 | ctx->output_frames = (AVHWFramesContext*)ctx->output_frames_ref->data; | |
188 | ||
189 | ctx->output_frames->format = AV_PIX_FMT_VAAPI; | |
190 | ctx->output_frames->sw_format = ctx->output_format; | |
191 | ctx->output_frames->width = ctx->output_width; | |
192 | ctx->output_frames->height = ctx->output_height; | |
193 | ||
194 | // The number of output frames we need is determined by what follows | |
195 | // the filter. If it's an encoder with complex frame reference | |
196 | // structures then this could be very high. | |
197 | ctx->output_frames->initial_pool_size = 10; | |
198 | ||
199 | err = av_hwframe_ctx_init(ctx->output_frames_ref); | |
200 | if (err < 0) { | |
201 | av_log(ctx, AV_LOG_ERROR, "Failed to initialise VAAPI frame " | |
202 | "context for output: %d\n", err); | |
203 | goto fail; | |
204 | } | |
205 | ||
206 | va_frames = ctx->output_frames->hwctx; | |
207 | ||
208 | av_assert0(ctx->va_context == VA_INVALID_ID); | |
209 | vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, | |
210 | ctx->output_width, ctx->output_height, | |
211 | VA_PROGRESSIVE, | |
212 | va_frames->surface_ids, va_frames->nb_surfaces, | |
213 | &ctx->va_context); | |
214 | if (vas != VA_STATUS_SUCCESS) { | |
215 | av_log(ctx, AV_LOG_ERROR, "Failed to create processing pipeline " | |
216 | "context: %d (%s).\n", vas, vaErrorStr(vas)); | |
217 | return AVERROR(EIO); | |
218 | } | |
219 | ||
220 | outlink->w = ctx->output_width; | |
221 | outlink->h = ctx->output_height; | |
222 | ||
223 | outlink->hw_frames_ctx = av_buffer_ref(ctx->output_frames_ref); | |
224 | if (!outlink->hw_frames_ctx) { | |
225 | err = AVERROR(ENOMEM); | |
226 | goto fail; | |
227 | } | |
228 | ||
229 | av_freep(&hwconfig); | |
230 | av_hwframe_constraints_free(&constraints); | |
231 | return 0; | |
232 | ||
233 | fail: | |
234 | av_buffer_unref(&ctx->output_frames_ref); | |
235 | av_freep(&hwconfig); | |
236 | av_hwframe_constraints_free(&constraints); | |
237 | return err; | |
238 | } | |
239 | ||
240 | static int vaapi_proc_colour_standard(enum AVColorSpace av_cs) | |
241 | { | |
242 | switch(av_cs) { | |
243 | #define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va; | |
244 | CS(BT709, BT709); | |
245 | CS(BT470BG, BT601); | |
246 | CS(SMPTE170M, SMPTE170M); | |
247 | CS(SMPTE240M, SMPTE240M); | |
248 | #undef CS | |
249 | default: | |
250 | return VAProcColorStandardNone; | |
251 | } | |
252 | } | |
253 | ||
254 | static int scale_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) | |
255 | { | |
256 | AVFilterContext *avctx = inlink->dst; | |
257 | AVFilterLink *outlink = avctx->outputs[0]; | |
258 | ScaleVAAPIContext *ctx = avctx->priv; | |
259 | AVFrame *output_frame = NULL; | |
260 | VASurfaceID input_surface, output_surface; | |
261 | VAProcPipelineParameterBuffer params; | |
262 | VABufferID params_id; | |
bdf7610e | 263 | VARectangle input_region; |
98114d70 MT |
264 | VAStatus vas; |
265 | int err; | |
266 | ||
267 | av_log(ctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", | |
268 | av_get_pix_fmt_name(input_frame->format), | |
269 | input_frame->width, input_frame->height, input_frame->pts); | |
270 | ||
271 | if (ctx->va_context == VA_INVALID_ID) | |
272 | return AVERROR(EINVAL); | |
273 | ||
274 | input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; | |
275 | av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale input.\n", | |
276 | input_surface); | |
277 | ||
7e2561fa MT |
278 | output_frame = ff_get_video_buffer(outlink, ctx->output_width, |
279 | ctx->output_height); | |
98114d70 | 280 | if (!output_frame) { |
98114d70 MT |
281 | err = AVERROR(ENOMEM); |
282 | goto fail; | |
283 | } | |
284 | ||
98114d70 MT |
285 | output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3]; |
286 | av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale output.\n", | |
287 | output_surface); | |
288 | ||
289 | memset(¶ms, 0, sizeof(params)); | |
290 | ||
bdf7610e MT |
291 | // If there were top/left cropping, it could be taken into |
292 | // account here. | |
293 | input_region = (VARectangle) { | |
294 | .x = 0, | |
295 | .y = 0, | |
296 | .width = input_frame->width, | |
297 | .height = input_frame->height, | |
298 | }; | |
299 | ||
98114d70 | 300 | params.surface = input_surface; |
bdf7610e | 301 | params.surface_region = &input_region; |
98114d70 MT |
302 | params.surface_color_standard = |
303 | vaapi_proc_colour_standard(input_frame->colorspace); | |
304 | ||
305 | params.output_region = 0; | |
306 | params.output_background_color = 0xff000000; | |
307 | params.output_color_standard = params.surface_color_standard; | |
308 | ||
309 | params.pipeline_flags = 0; | |
310 | params.filter_flags = VA_FILTER_SCALING_HQ; | |
311 | ||
312 | vas = vaBeginPicture(ctx->hwctx->display, | |
313 | ctx->va_context, output_surface); | |
314 | if (vas != VA_STATUS_SUCCESS) { | |
315 | av_log(ctx, AV_LOG_ERROR, "Failed to attach new picture: " | |
316 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
317 | err = AVERROR(EIO); | |
318 | goto fail; | |
319 | } | |
320 | ||
321 | vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, | |
322 | VAProcPipelineParameterBufferType, | |
323 | sizeof(params), 1, ¶ms, ¶ms_id); | |
324 | if (vas != VA_STATUS_SUCCESS) { | |
325 | av_log(ctx, AV_LOG_ERROR, "Failed to create parameter buffer: " | |
326 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
327 | err = AVERROR(EIO); | |
328 | goto fail_after_begin; | |
329 | } | |
330 | av_log(ctx, AV_LOG_DEBUG, "Pipeline parameter buffer is %#x.\n", | |
331 | params_id); | |
332 | ||
333 | vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, | |
334 | ¶ms_id, 1); | |
335 | if (vas != VA_STATUS_SUCCESS) { | |
336 | av_log(ctx, AV_LOG_ERROR, "Failed to render parameter buffer: " | |
337 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
338 | err = AVERROR(EIO); | |
339 | goto fail_after_begin; | |
340 | } | |
341 | ||
342 | vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); | |
343 | if (vas != VA_STATUS_SUCCESS) { | |
344 | av_log(ctx, AV_LOG_ERROR, "Failed to start picture processing: " | |
345 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
346 | err = AVERROR(EIO); | |
347 | goto fail_after_render; | |
348 | } | |
349 | ||
bfc83acf | 350 | if (HAVE_VAAPI_1 || ctx->hwctx->driver_quirks & |
582d4211 MT |
351 | AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) { |
352 | vas = vaDestroyBuffer(ctx->hwctx->display, params_id); | |
353 | if (vas != VA_STATUS_SUCCESS) { | |
354 | av_log(ctx, AV_LOG_ERROR, "Failed to free parameter buffer: " | |
355 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
356 | // And ignore. | |
357 | } | |
98114d70 MT |
358 | } |
359 | ||
360 | av_frame_copy_props(output_frame, input_frame); | |
361 | av_frame_free(&input_frame); | |
362 | ||
363 | av_log(ctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", | |
364 | av_get_pix_fmt_name(output_frame->format), | |
365 | output_frame->width, output_frame->height, output_frame->pts); | |
366 | ||
367 | return ff_filter_frame(outlink, output_frame); | |
368 | ||
369 | // We want to make sure that if vaBeginPicture has been called, we also | |
370 | // call vaRenderPicture and vaEndPicture. These calls may well fail or | |
371 | // do something else nasty, but once we're in this failure case there | |
372 | // isn't much else we can do. | |
373 | fail_after_begin: | |
374 | vaRenderPicture(ctx->hwctx->display, ctx->va_context, ¶ms_id, 1); | |
375 | fail_after_render: | |
376 | vaEndPicture(ctx->hwctx->display, ctx->va_context); | |
377 | fail: | |
378 | av_frame_free(&input_frame); | |
379 | av_frame_free(&output_frame); | |
380 | return err; | |
381 | } | |
382 | ||
383 | static av_cold int scale_vaapi_init(AVFilterContext *avctx) | |
384 | { | |
385 | ScaleVAAPIContext *ctx = avctx->priv; | |
386 | ||
387 | ctx->va_config = VA_INVALID_ID; | |
388 | ctx->va_context = VA_INVALID_ID; | |
389 | ctx->valid_ids = 1; | |
390 | ||
391 | if (ctx->output_format_string) { | |
392 | ctx->output_format = av_get_pix_fmt(ctx->output_format_string); | |
393 | if (ctx->output_format == AV_PIX_FMT_NONE) { | |
394 | av_log(ctx, AV_LOG_ERROR, "Invalid output format.\n"); | |
395 | return AVERROR(EINVAL); | |
396 | } | |
397 | } else { | |
398 | // Use the input format once that is configured. | |
399 | ctx->output_format = AV_PIX_FMT_NONE; | |
400 | } | |
401 | ||
402 | return 0; | |
403 | } | |
404 | ||
405 | static av_cold void scale_vaapi_uninit(AVFilterContext *avctx) | |
406 | { | |
407 | ScaleVAAPIContext *ctx = avctx->priv; | |
408 | ||
409 | if (ctx->valid_ids) | |
410 | scale_vaapi_pipeline_uninit(ctx); | |
411 | ||
412 | av_buffer_unref(&ctx->input_frames_ref); | |
413 | av_buffer_unref(&ctx->output_frames_ref); | |
414 | av_buffer_unref(&ctx->device_ref); | |
415 | } | |
416 | ||
417 | ||
418 | #define OFFSET(x) offsetof(ScaleVAAPIContext, x) | |
419 | #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM) | |
420 | static const AVOption scale_vaapi_options[] = { | |
421 | { "w", "Output video width", | |
422 | OFFSET(output_width), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS }, | |
423 | { "h", "Output video height", | |
424 | OFFSET(output_height), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS }, | |
425 | { "format", "Output video format (software format of hardware frames)", | |
426 | OFFSET(output_format_string), AV_OPT_TYPE_STRING, .flags = FLAGS }, | |
427 | { NULL }, | |
428 | }; | |
429 | ||
430 | static const AVClass scale_vaapi_class = { | |
431 | .class_name = "scale_vaapi", | |
432 | .item_name = av_default_item_name, | |
433 | .option = scale_vaapi_options, | |
434 | .version = LIBAVUTIL_VERSION_INT, | |
435 | }; | |
436 | ||
437 | static const AVFilterPad scale_vaapi_inputs[] = { | |
438 | { | |
439 | .name = "default", | |
440 | .type = AVMEDIA_TYPE_VIDEO, | |
441 | .filter_frame = &scale_vaapi_filter_frame, | |
442 | .config_props = &scale_vaapi_config_input, | |
443 | }, | |
444 | { NULL } | |
445 | }; | |
446 | ||
447 | static const AVFilterPad scale_vaapi_outputs[] = { | |
448 | { | |
449 | .name = "default", | |
450 | .type = AVMEDIA_TYPE_VIDEO, | |
451 | .config_props = &scale_vaapi_config_output, | |
452 | }, | |
453 | { NULL } | |
454 | }; | |
455 | ||
456 | AVFilter ff_vf_scale_vaapi = { | |
457 | .name = "scale_vaapi", | |
458 | .description = NULL_IF_CONFIG_SMALL("Scale to/from VAAPI surfaces."), | |
459 | .priv_size = sizeof(ScaleVAAPIContext), | |
460 | .init = &scale_vaapi_init, | |
461 | .uninit = &scale_vaapi_uninit, | |
462 | .query_formats = &scale_vaapi_query_formats, | |
463 | .inputs = scale_vaapi_inputs, | |
464 | .outputs = scale_vaapi_outputs, | |
465 | .priv_class = &scale_vaapi_class, | |
e3fb74f7 | 466 | .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, |
98114d70 | 467 | }; |