2 * This file is part of Libav.
4 * Libav is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * Libav is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with Libav; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <va/va_vpp.h>
24 #include "libavutil/avassert.h"
25 #include "libavutil/hwcontext.h"
26 #include "libavutil/hwcontext_vaapi.h"
27 #include "libavutil/mem.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/pixdesc.h"
35 typedef struct ScaleVAAPIContext
{
38 AVVAAPIDeviceContext
*hwctx
;
39 AVBufferRef
*device_ref
;
43 VAContextID va_context
;
45 AVBufferRef
*input_frames_ref
;
46 AVHWFramesContext
*input_frames
;
48 AVBufferRef
*output_frames_ref
;
49 AVHWFramesContext
*output_frames
;
51 char *output_format_string
;
52 enum AVPixelFormat output_format
;
59 static int scale_vaapi_query_formats(AVFilterContext
*avctx
)
61 enum AVPixelFormat pix_fmts
[] = {
62 AV_PIX_FMT_VAAPI
, AV_PIX_FMT_NONE
,
65 ff_formats_ref(ff_make_format_list(pix_fmts
),
66 &avctx
->inputs
[0]->out_formats
);
67 ff_formats_ref(ff_make_format_list(pix_fmts
),
68 &avctx
->outputs
[0]->in_formats
);
73 static int scale_vaapi_pipeline_uninit(ScaleVAAPIContext
*ctx
)
75 if (ctx
->va_context
!= VA_INVALID_ID
) {
76 vaDestroyContext(ctx
->hwctx
->display
, ctx
->va_context
);
77 ctx
->va_context
= VA_INVALID_ID
;
80 if (ctx
->va_config
!= VA_INVALID_ID
) {
81 vaDestroyConfig(ctx
->hwctx
->display
, ctx
->va_config
);
82 ctx
->va_config
= VA_INVALID_ID
;
85 av_buffer_unref(&ctx
->output_frames_ref
);
86 av_buffer_unref(&ctx
->device_ref
);
92 static int scale_vaapi_config_input(AVFilterLink
*inlink
)
94 AVFilterContext
*avctx
= inlink
->dst
;
95 ScaleVAAPIContext
*ctx
= avctx
->priv
;
97 scale_vaapi_pipeline_uninit(ctx
);
99 if (!inlink
->hw_frames_ctx
) {
100 av_log(avctx
, AV_LOG_ERROR
, "A hardware frames reference is "
101 "required to associate the processing device.\n");
102 return AVERROR(EINVAL
);
105 ctx
->input_frames_ref
= av_buffer_ref(inlink
->hw_frames_ctx
);
106 ctx
->input_frames
= (AVHWFramesContext
*)ctx
->input_frames_ref
->data
;
111 static int scale_vaapi_config_output(AVFilterLink
*outlink
)
113 AVFilterContext
*avctx
= outlink
->src
;
114 ScaleVAAPIContext
*ctx
= avctx
->priv
;
115 AVVAAPIHWConfig
*hwconfig
= NULL
;
116 AVHWFramesConstraints
*constraints
= NULL
;
117 AVVAAPIFramesContext
*va_frames
;
121 scale_vaapi_pipeline_uninit(ctx
);
123 ctx
->device_ref
= av_buffer_ref(ctx
->input_frames
->device_ref
);
124 ctx
->hwctx
= ((AVHWDeviceContext
*)ctx
->device_ref
->data
)->hwctx
;
126 av_assert0(ctx
->va_config
== VA_INVALID_ID
);
127 vas
= vaCreateConfig(ctx
->hwctx
->display
, VAProfileNone
,
128 VAEntrypointVideoProc
, 0, 0, &ctx
->va_config
);
129 if (vas
!= VA_STATUS_SUCCESS
) {
130 av_log(ctx
, AV_LOG_ERROR
, "Failed to create processing pipeline "
131 "config: %d (%s).\n", vas
, vaErrorStr(vas
));
136 hwconfig
= av_hwdevice_hwconfig_alloc(ctx
->device_ref
);
138 err
= AVERROR(ENOMEM
);
141 hwconfig
->config_id
= ctx
->va_config
;
143 constraints
= av_hwdevice_get_hwframe_constraints(ctx
->device_ref
,
146 err
= AVERROR(ENOMEM
);
150 if (ctx
->output_format
== AV_PIX_FMT_NONE
)
151 ctx
->output_format
= ctx
->input_frames
->sw_format
;
152 if (constraints
->valid_sw_formats
) {
153 for (i
= 0; constraints
->valid_sw_formats
[i
] != AV_PIX_FMT_NONE
; i
++) {
154 if (ctx
->output_format
== constraints
->valid_sw_formats
[i
])
157 if (constraints
->valid_sw_formats
[i
] == AV_PIX_FMT_NONE
) {
158 av_log(ctx
, AV_LOG_ERROR
, "Hardware does not support output "
159 "format %s.\n", av_get_pix_fmt_name(ctx
->output_format
));
160 err
= AVERROR(EINVAL
);
165 if (ctx
->output_width
< constraints
->min_width
||
166 ctx
->output_height
< constraints
->min_height
||
167 ctx
->output_width
> constraints
->max_width
||
168 ctx
->output_height
> constraints
->max_height
) {
169 av_log(ctx
, AV_LOG_ERROR
, "Hardware does not support scaling to "
170 "size %dx%d (constraints: width %d-%d height %d-%d).\n",
171 ctx
->output_width
, ctx
->output_height
,
172 constraints
->min_width
, constraints
->max_width
,
173 constraints
->min_height
, constraints
->max_height
);
174 err
= AVERROR(EINVAL
);
178 ctx
->output_frames_ref
= av_hwframe_ctx_alloc(ctx
->device_ref
);
179 if (!ctx
->output_frames_ref
) {
180 av_log(ctx
, AV_LOG_ERROR
, "Failed to create HW frame context "
182 err
= AVERROR(ENOMEM
);
186 ctx
->output_frames
= (AVHWFramesContext
*)ctx
->output_frames_ref
->data
;
188 ctx
->output_frames
->format
= AV_PIX_FMT_VAAPI
;
189 ctx
->output_frames
->sw_format
= ctx
->output_format
;
190 ctx
->output_frames
->width
= ctx
->output_width
;
191 ctx
->output_frames
->height
= ctx
->output_height
;
193 // The number of output frames we need is determined by what follows
194 // the filter. If it's an encoder with complex frame reference
195 // structures then this could be very high.
196 ctx
->output_frames
->initial_pool_size
= 10;
198 err
= av_hwframe_ctx_init(ctx
->output_frames_ref
);
200 av_log(ctx
, AV_LOG_ERROR
, "Failed to initialise VAAPI frame "
201 "context for output: %d\n", err
);
205 va_frames
= ctx
->output_frames
->hwctx
;
207 av_assert0(ctx
->va_context
== VA_INVALID_ID
);
208 vas
= vaCreateContext(ctx
->hwctx
->display
, ctx
->va_config
,
209 ctx
->output_width
, ctx
->output_height
,
211 va_frames
->surface_ids
, va_frames
->nb_surfaces
,
213 if (vas
!= VA_STATUS_SUCCESS
) {
214 av_log(ctx
, AV_LOG_ERROR
, "Failed to create processing pipeline "
215 "context: %d (%s).\n", vas
, vaErrorStr(vas
));
219 outlink
->w
= ctx
->output_width
;
220 outlink
->h
= ctx
->output_height
;
222 outlink
->hw_frames_ctx
= av_buffer_ref(ctx
->output_frames_ref
);
223 if (!outlink
->hw_frames_ctx
) {
224 err
= AVERROR(ENOMEM
);
229 av_hwframe_constraints_free(&constraints
);
233 av_buffer_unref(&ctx
->output_frames_ref
);
235 av_hwframe_constraints_free(&constraints
);
239 static int vaapi_proc_colour_standard(enum AVColorSpace av_cs
)
242 #define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va;
245 CS(SMPTE170M
, SMPTE170M
);
246 CS(SMPTE240M
, SMPTE240M
);
249 return VAProcColorStandardNone
;
253 static int scale_vaapi_filter_frame(AVFilterLink
*inlink
, AVFrame
*input_frame
)
255 AVFilterContext
*avctx
= inlink
->dst
;
256 AVFilterLink
*outlink
= avctx
->outputs
[0];
257 ScaleVAAPIContext
*ctx
= avctx
->priv
;
258 AVFrame
*output_frame
= NULL
;
259 VASurfaceID input_surface
, output_surface
;
260 VAProcPipelineParameterBuffer params
;
261 VABufferID params_id
;
265 av_log(ctx
, AV_LOG_DEBUG
, "Filter input: %s, %ux%u (%"PRId64
").\n",
266 av_get_pix_fmt_name(input_frame
->format
),
267 input_frame
->width
, input_frame
->height
, input_frame
->pts
);
269 if (ctx
->va_context
== VA_INVALID_ID
)
270 return AVERROR(EINVAL
);
272 input_surface
= (VASurfaceID
)(uintptr_t)input_frame
->data
[3];
273 av_log(ctx
, AV_LOG_DEBUG
, "Using surface %#x for scale input.\n",
276 output_frame
= av_frame_alloc();
278 av_log(ctx
, AV_LOG_ERROR
, "Failed to allocate output frame.");
279 err
= AVERROR(ENOMEM
);
283 err
= av_hwframe_get_buffer(ctx
->output_frames_ref
, output_frame
, 0);
285 av_log(ctx
, AV_LOG_ERROR
, "Failed to get surface for "
286 "output: %d\n.", err
);
289 output_surface
= (VASurfaceID
)(uintptr_t)output_frame
->data
[3];
290 av_log(ctx
, AV_LOG_DEBUG
, "Using surface %#x for scale output.\n",
293 memset(¶ms
, 0, sizeof(params
));
295 params
.surface
= input_surface
;
296 params
.surface_region
= 0;
297 params
.surface_color_standard
=
298 vaapi_proc_colour_standard(input_frame
->colorspace
);
300 params
.output_region
= 0;
301 params
.output_background_color
= 0xff000000;
302 params
.output_color_standard
= params
.surface_color_standard
;
304 params
.pipeline_flags
= 0;
305 params
.filter_flags
= VA_FILTER_SCALING_HQ
;
307 vas
= vaBeginPicture(ctx
->hwctx
->display
,
308 ctx
->va_context
, output_surface
);
309 if (vas
!= VA_STATUS_SUCCESS
) {
310 av_log(ctx
, AV_LOG_ERROR
, "Failed to attach new picture: "
311 "%d (%s).\n", vas
, vaErrorStr(vas
));
316 vas
= vaCreateBuffer(ctx
->hwctx
->display
, ctx
->va_context
,
317 VAProcPipelineParameterBufferType
,
318 sizeof(params
), 1, ¶ms
, ¶ms_id
);
319 if (vas
!= VA_STATUS_SUCCESS
) {
320 av_log(ctx
, AV_LOG_ERROR
, "Failed to create parameter buffer: "
321 "%d (%s).\n", vas
, vaErrorStr(vas
));
323 goto fail_after_begin
;
325 av_log(ctx
, AV_LOG_DEBUG
, "Pipeline parameter buffer is %#x.\n",
328 vas
= vaRenderPicture(ctx
->hwctx
->display
, ctx
->va_context
,
330 if (vas
!= VA_STATUS_SUCCESS
) {
331 av_log(ctx
, AV_LOG_ERROR
, "Failed to render parameter buffer: "
332 "%d (%s).\n", vas
, vaErrorStr(vas
));
334 goto fail_after_begin
;
337 vas
= vaEndPicture(ctx
->hwctx
->display
, ctx
->va_context
);
338 if (vas
!= VA_STATUS_SUCCESS
) {
339 av_log(ctx
, AV_LOG_ERROR
, "Failed to start picture processing: "
340 "%d (%s).\n", vas
, vaErrorStr(vas
));
342 goto fail_after_render
;
345 // This doesn't get freed automatically for some reason.
346 vas
= vaDestroyBuffer(ctx
->hwctx
->display
, params_id
);
347 if (vas
!= VA_STATUS_SUCCESS
) {
348 av_log(ctx
, AV_LOG_ERROR
, "Failed to free parameter buffer: "
349 "%d (%s).\n", vas
, vaErrorStr(vas
));
354 av_frame_copy_props(output_frame
, input_frame
);
355 av_frame_free(&input_frame
);
357 av_log(ctx
, AV_LOG_DEBUG
, "Filter output: %s, %ux%u (%"PRId64
").\n",
358 av_get_pix_fmt_name(output_frame
->format
),
359 output_frame
->width
, output_frame
->height
, output_frame
->pts
);
361 return ff_filter_frame(outlink
, output_frame
);
363 // We want to make sure that if vaBeginPicture has been called, we also
364 // call vaRenderPicture and vaEndPicture. These calls may well fail or
365 // do something else nasty, but once we're in this failure case there
366 // isn't much else we can do.
368 vaRenderPicture(ctx
->hwctx
->display
, ctx
->va_context
, ¶ms_id
, 1);
370 vaEndPicture(ctx
->hwctx
->display
, ctx
->va_context
);
372 av_frame_free(&input_frame
);
373 av_frame_free(&output_frame
);
377 static av_cold
int scale_vaapi_init(AVFilterContext
*avctx
)
379 ScaleVAAPIContext
*ctx
= avctx
->priv
;
381 ctx
->va_config
= VA_INVALID_ID
;
382 ctx
->va_context
= VA_INVALID_ID
;
385 if (ctx
->output_format_string
) {
386 ctx
->output_format
= av_get_pix_fmt(ctx
->output_format_string
);
387 if (ctx
->output_format
== AV_PIX_FMT_NONE
) {
388 av_log(ctx
, AV_LOG_ERROR
, "Invalid output format.\n");
389 return AVERROR(EINVAL
);
392 // Use the input format once that is configured.
393 ctx
->output_format
= AV_PIX_FMT_NONE
;
399 static av_cold
void scale_vaapi_uninit(AVFilterContext
*avctx
)
401 ScaleVAAPIContext
*ctx
= avctx
->priv
;
404 scale_vaapi_pipeline_uninit(ctx
);
406 av_buffer_unref(&ctx
->input_frames_ref
);
407 av_buffer_unref(&ctx
->output_frames_ref
);
408 av_buffer_unref(&ctx
->device_ref
);
412 #define OFFSET(x) offsetof(ScaleVAAPIContext, x)
413 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM)
414 static const AVOption scale_vaapi_options
[] = {
415 { "w", "Output video width",
416 OFFSET(output_width
), AV_OPT_TYPE_INT
, { .i64
= 0 }, 0, INT_MAX
, .flags
= FLAGS
},
417 { "h", "Output video height",
418 OFFSET(output_height
), AV_OPT_TYPE_INT
, { .i64
= 0 }, 0, INT_MAX
, .flags
= FLAGS
},
419 { "format", "Output video format (software format of hardware frames)",
420 OFFSET(output_format_string
), AV_OPT_TYPE_STRING
, .flags
= FLAGS
},
424 static const AVClass scale_vaapi_class
= {
425 .class_name
= "scale_vaapi",
426 .item_name
= av_default_item_name
,
427 .option
= scale_vaapi_options
,
428 .version
= LIBAVUTIL_VERSION_INT
,
431 static const AVFilterPad scale_vaapi_inputs
[] = {
434 .type
= AVMEDIA_TYPE_VIDEO
,
435 .filter_frame
= &scale_vaapi_filter_frame
,
436 .config_props
= &scale_vaapi_config_input
,
441 static const AVFilterPad scale_vaapi_outputs
[] = {
444 .type
= AVMEDIA_TYPE_VIDEO
,
445 .config_props
= &scale_vaapi_config_output
,
450 AVFilter ff_vf_scale_vaapi
= {
451 .name
= "scale_vaapi",
452 .description
= NULL_IF_CONFIG_SMALL("Scale to/from VAAPI surfaces."),
453 .priv_size
= sizeof(ScaleVAAPIContext
),
454 .init
= &scale_vaapi_init
,
455 .uninit
= &scale_vaapi_uninit
,
456 .query_formats
= &scale_vaapi_query_formats
,
457 .inputs
= scale_vaapi_inputs
,
458 .outputs
= scale_vaapi_outputs
,
459 .priv_class
= &scale_vaapi_class
,