Commit | Line | Data |
---|---|---|
ade370a4 MT |
1 | /* |
2 | * This file is part of Libav. | |
3 | * | |
4 | * Libav is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU Lesser General Public | |
6 | * License as published by the Free Software Foundation; either | |
7 | * version 2.1 of the License, or (at your option) any later version. | |
8 | * | |
9 | * Libav is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * Lesser General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU Lesser General Public | |
15 | * License along with Libav; if not, write to the Free Software | |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
17 | */ | |
18 | ||
19 | #include <string.h> | |
20 | ||
21 | #include <va/va.h> | |
22 | #include <va/va_vpp.h> | |
23 | ||
24 | #include "libavutil/avassert.h" | |
25 | #include "libavutil/hwcontext.h" | |
26 | #include "libavutil/hwcontext_vaapi.h" | |
27 | #include "libavutil/mem.h" | |
28 | #include "libavutil/opt.h" | |
29 | #include "libavutil/pixdesc.h" | |
30 | ||
31 | #include "avfilter.h" | |
32 | #include "formats.h" | |
33 | #include "internal.h" | |
34 | #include "video.h" | |
35 | ||
36 | #define MAX_REFERENCES 8 | |
37 | ||
38 | typedef struct DeintVAAPIContext { | |
39 | const AVClass *class; | |
40 | ||
41 | AVVAAPIDeviceContext *hwctx; | |
42 | AVBufferRef *device_ref; | |
43 | ||
44 | int mode; | |
45 | ||
46 | int valid_ids; | |
47 | VAConfigID va_config; | |
48 | VAContextID va_context; | |
49 | ||
50 | AVBufferRef *input_frames_ref; | |
51 | AVHWFramesContext *input_frames; | |
52 | ||
53 | AVBufferRef *output_frames_ref; | |
54 | AVHWFramesContext *output_frames; | |
55 | int output_height; | |
56 | int output_width; | |
57 | ||
58 | VAProcFilterCapDeinterlacing | |
59 | deint_caps[VAProcDeinterlacingCount]; | |
60 | int nb_deint_caps; | |
61 | VAProcPipelineCaps pipeline_caps; | |
62 | ||
63 | int queue_depth; | |
64 | int queue_count; | |
65 | AVFrame *frame_queue[MAX_REFERENCES]; | |
66 | ||
67 | VABufferID filter_buffer; | |
68 | } DeintVAAPIContext; | |
69 | ||
70 | static const char *deint_vaapi_mode_name(int mode) | |
71 | { | |
72 | switch (mode) { | |
73 | #define D(name) case VAProcDeinterlacing ## name: return #name | |
74 | D(Bob); | |
75 | D(Weave); | |
76 | D(MotionAdaptive); | |
77 | D(MotionCompensated); | |
78 | #undef D | |
79 | default: | |
80 | return "Invalid"; | |
81 | } | |
82 | } | |
83 | ||
84 | static int deint_vaapi_query_formats(AVFilterContext *avctx) | |
85 | { | |
86 | enum AVPixelFormat pix_fmts[] = { | |
87 | AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE, | |
88 | }; | |
89 | ||
90 | ff_formats_ref(ff_make_format_list(pix_fmts), | |
91 | &avctx->inputs[0]->out_formats); | |
92 | ff_formats_ref(ff_make_format_list(pix_fmts), | |
93 | &avctx->outputs[0]->in_formats); | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | static int deint_vaapi_pipeline_uninit(AVFilterContext *avctx) | |
99 | { | |
100 | DeintVAAPIContext *ctx = avctx->priv; | |
101 | int i; | |
102 | ||
103 | for (i = 0; i < ctx->queue_count; i++) | |
104 | av_frame_free(&ctx->frame_queue[i]); | |
105 | ctx->queue_count = 0; | |
106 | ||
107 | if (ctx->filter_buffer != VA_INVALID_ID) { | |
108 | vaDestroyBuffer(ctx->hwctx->display, ctx->filter_buffer); | |
109 | ctx->filter_buffer = VA_INVALID_ID; | |
110 | } | |
111 | ||
112 | if (ctx->va_context != VA_INVALID_ID) { | |
113 | vaDestroyContext(ctx->hwctx->display, ctx->va_context); | |
114 | ctx->va_context = VA_INVALID_ID; | |
115 | } | |
116 | ||
117 | if (ctx->va_config != VA_INVALID_ID) { | |
118 | vaDestroyConfig(ctx->hwctx->display, ctx->va_config); | |
119 | ctx->va_config = VA_INVALID_ID; | |
120 | } | |
121 | ||
122 | av_buffer_unref(&ctx->device_ref); | |
123 | ctx->hwctx = NULL; | |
124 | ||
125 | return 0; | |
126 | } | |
127 | ||
128 | static int deint_vaapi_config_input(AVFilterLink *inlink) | |
129 | { | |
130 | AVFilterContext *avctx = inlink->dst; | |
131 | DeintVAAPIContext *ctx = avctx->priv; | |
132 | ||
133 | deint_vaapi_pipeline_uninit(avctx); | |
134 | ||
135 | if (!inlink->hw_frames_ctx) { | |
136 | av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is " | |
137 | "required to associate the processing device.\n"); | |
138 | return AVERROR(EINVAL); | |
139 | } | |
140 | ||
141 | ctx->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx); | |
142 | ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data; | |
143 | ||
144 | return 0; | |
145 | } | |
146 | ||
147 | static int deint_vaapi_build_filter_params(AVFilterContext *avctx) | |
148 | { | |
149 | DeintVAAPIContext *ctx = avctx->priv; | |
150 | VAStatus vas; | |
151 | VAProcFilterParameterBufferDeinterlacing params; | |
152 | int i; | |
153 | ||
154 | ctx->nb_deint_caps = VAProcDeinterlacingCount; | |
155 | vas = vaQueryVideoProcFilterCaps(ctx->hwctx->display, | |
156 | ctx->va_context, | |
157 | VAProcFilterDeinterlacing, | |
158 | &ctx->deint_caps, | |
159 | &ctx->nb_deint_caps); | |
160 | if (vas != VA_STATUS_SUCCESS) { | |
161 | av_log(avctx, AV_LOG_ERROR, "Failed to query deinterlacing " | |
162 | "caps: %d (%s).\n", vas, vaErrorStr(vas)); | |
163 | return AVERROR(EIO); | |
164 | } | |
165 | ||
166 | if (ctx->mode == VAProcDeinterlacingNone) { | |
167 | for (i = 0; i < ctx->nb_deint_caps; i++) { | |
168 | if (ctx->deint_caps[i].type > ctx->mode) | |
169 | ctx->mode = ctx->deint_caps[i].type; | |
170 | } | |
171 | av_log(avctx, AV_LOG_VERBOSE, "Picking %d (%s) as default " | |
172 | "deinterlacing mode.\n", ctx->mode, | |
173 | deint_vaapi_mode_name(ctx->mode)); | |
174 | } else { | |
175 | for (i = 0; i < ctx->nb_deint_caps; i++) { | |
176 | if (ctx->deint_caps[i].type == ctx->mode) | |
177 | break; | |
178 | } | |
179 | if (i >= ctx->nb_deint_caps) { | |
180 | av_log(avctx, AV_LOG_ERROR, "Deinterlacing mode %d (%s) is " | |
181 | "not supported.\n", ctx->mode, | |
182 | deint_vaapi_mode_name(ctx->mode)); | |
183 | } | |
184 | } | |
185 | ||
186 | params.type = VAProcFilterDeinterlacing; | |
187 | params.algorithm = ctx->mode; | |
188 | params.flags = 0; | |
189 | ||
190 | av_assert0(ctx->filter_buffer == VA_INVALID_ID); | |
191 | vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, | |
192 | VAProcFilterParameterBufferType, | |
193 | sizeof(params), 1, ¶ms, | |
194 | &ctx->filter_buffer); | |
195 | if (vas != VA_STATUS_SUCCESS) { | |
196 | av_log(avctx, AV_LOG_ERROR, "Failed to create deinterlace " | |
197 | "parameter buffer: %d (%s).\n", vas, vaErrorStr(vas)); | |
198 | return AVERROR(EIO); | |
199 | } | |
200 | ||
201 | vas = vaQueryVideoProcPipelineCaps(ctx->hwctx->display, | |
202 | ctx->va_context, | |
203 | &ctx->filter_buffer, 1, | |
204 | &ctx->pipeline_caps); | |
205 | if (vas != VA_STATUS_SUCCESS) { | |
206 | av_log(avctx, AV_LOG_ERROR, "Failed to query pipeline " | |
207 | "caps: %d (%s).\n", vas, vaErrorStr(vas)); | |
208 | return AVERROR(EIO); | |
209 | } | |
210 | ||
211 | ctx->queue_depth = ctx->pipeline_caps.num_backward_references + | |
212 | ctx->pipeline_caps.num_forward_references + 1; | |
213 | if (ctx->queue_depth > MAX_REFERENCES) { | |
214 | av_log(avctx, AV_LOG_ERROR, "Pipeline requires too many " | |
215 | "references (%u forward, %u back).\n", | |
216 | ctx->pipeline_caps.num_forward_references, | |
217 | ctx->pipeline_caps.num_backward_references); | |
218 | return AVERROR(ENOSYS); | |
219 | } | |
220 | ||
221 | return 0; | |
222 | } | |
223 | ||
224 | static int deint_vaapi_config_output(AVFilterLink *outlink) | |
225 | { | |
226 | AVFilterContext *avctx = outlink->src; | |
227 | DeintVAAPIContext *ctx = avctx->priv; | |
228 | AVVAAPIHWConfig *hwconfig = NULL; | |
229 | AVHWFramesConstraints *constraints = NULL; | |
230 | AVVAAPIFramesContext *va_frames; | |
231 | VAStatus vas; | |
232 | int err; | |
233 | ||
234 | deint_vaapi_pipeline_uninit(avctx); | |
235 | ||
236 | av_assert0(ctx->input_frames); | |
237 | ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref); | |
238 | ctx->hwctx = ((AVHWDeviceContext*)ctx->device_ref->data)->hwctx; | |
239 | ||
240 | ctx->output_width = ctx->input_frames->width; | |
241 | ctx->output_height = ctx->input_frames->height; | |
242 | ||
243 | av_assert0(ctx->va_config == VA_INVALID_ID); | |
244 | vas = vaCreateConfig(ctx->hwctx->display, VAProfileNone, | |
245 | VAEntrypointVideoProc, 0, 0, &ctx->va_config); | |
246 | if (vas != VA_STATUS_SUCCESS) { | |
247 | av_log(avctx, AV_LOG_ERROR, "Failed to create processing pipeline " | |
248 | "config: %d (%s).\n", vas, vaErrorStr(vas)); | |
249 | err = AVERROR(EIO); | |
250 | goto fail; | |
251 | } | |
252 | ||
253 | hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref); | |
254 | if (!hwconfig) { | |
255 | err = AVERROR(ENOMEM); | |
256 | goto fail; | |
257 | } | |
258 | hwconfig->config_id = ctx->va_config; | |
259 | ||
260 | constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref, | |
261 | hwconfig); | |
262 | if (!constraints) { | |
263 | err = AVERROR(ENOMEM); | |
264 | goto fail; | |
265 | } | |
266 | ||
267 | if (ctx->output_width < constraints->min_width || | |
268 | ctx->output_height < constraints->min_height || | |
269 | ctx->output_width > constraints->max_width || | |
270 | ctx->output_height > constraints->max_height) { | |
271 | av_log(avctx, AV_LOG_ERROR, "Hardware does not support " | |
272 | "deinterlacing to size %dx%d " | |
273 | "(constraints: width %d-%d height %d-%d).\n", | |
274 | ctx->output_width, ctx->output_height, | |
275 | constraints->min_width, constraints->max_width, | |
276 | constraints->min_height, constraints->max_height); | |
277 | err = AVERROR(EINVAL); | |
278 | goto fail; | |
279 | } | |
280 | ||
ade370a4 MT |
281 | ctx->output_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref); |
282 | if (!ctx->output_frames_ref) { | |
283 | av_log(avctx, AV_LOG_ERROR, "Failed to create HW frame context " | |
284 | "for output.\n"); | |
285 | err = AVERROR(ENOMEM); | |
286 | goto fail; | |
287 | } | |
288 | ||
289 | ctx->output_frames = (AVHWFramesContext*)ctx->output_frames_ref->data; | |
290 | ||
291 | ctx->output_frames->format = AV_PIX_FMT_VAAPI; | |
292 | ctx->output_frames->sw_format = ctx->input_frames->sw_format; | |
293 | ctx->output_frames->width = ctx->output_width; | |
294 | ctx->output_frames->height = ctx->output_height; | |
295 | ||
296 | // The number of output frames we need is determined by what follows | |
297 | // the filter. If it's an encoder with complex frame reference | |
298 | // structures then this could be very high. | |
299 | ctx->output_frames->initial_pool_size = 10; | |
300 | ||
301 | err = av_hwframe_ctx_init(ctx->output_frames_ref); | |
302 | if (err < 0) { | |
303 | av_log(avctx, AV_LOG_ERROR, "Failed to initialise VAAPI frame " | |
304 | "context for output: %d\n", err); | |
305 | goto fail; | |
306 | } | |
307 | ||
308 | va_frames = ctx->output_frames->hwctx; | |
309 | ||
310 | av_assert0(ctx->va_context == VA_INVALID_ID); | |
311 | vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, | |
312 | ctx->output_width, ctx->output_height, 0, | |
313 | va_frames->surface_ids, va_frames->nb_surfaces, | |
314 | &ctx->va_context); | |
315 | if (vas != VA_STATUS_SUCCESS) { | |
316 | av_log(avctx, AV_LOG_ERROR, "Failed to create processing pipeline " | |
317 | "context: %d (%s).\n", vas, vaErrorStr(vas)); | |
2d518aec MT |
318 | err = AVERROR(EIO); |
319 | goto fail; | |
ade370a4 MT |
320 | } |
321 | ||
2d518aec MT |
322 | err = deint_vaapi_build_filter_params(avctx); |
323 | if (err < 0) | |
324 | goto fail; | |
325 | ||
ade370a4 MT |
326 | outlink->w = ctx->output_width; |
327 | outlink->h = ctx->output_height; | |
328 | ||
329 | outlink->hw_frames_ctx = av_buffer_ref(ctx->output_frames_ref); | |
330 | if (!outlink->hw_frames_ctx) { | |
331 | err = AVERROR(ENOMEM); | |
332 | goto fail; | |
333 | } | |
334 | ||
335 | av_freep(&hwconfig); | |
336 | av_hwframe_constraints_free(&constraints); | |
337 | return 0; | |
338 | ||
339 | fail: | |
340 | av_buffer_unref(&ctx->output_frames_ref); | |
341 | av_freep(&hwconfig); | |
342 | av_hwframe_constraints_free(&constraints); | |
343 | return err; | |
344 | } | |
345 | ||
346 | static int vaapi_proc_colour_standard(enum AVColorSpace av_cs) | |
347 | { | |
348 | switch(av_cs) { | |
349 | #define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va; | |
350 | CS(BT709, BT709); | |
351 | CS(BT470BG, BT470BG); | |
352 | CS(SMPTE170M, SMPTE170M); | |
353 | CS(SMPTE240M, SMPTE240M); | |
354 | #undef CS | |
355 | default: | |
356 | return VAProcColorStandardNone; | |
357 | } | |
358 | } | |
359 | ||
360 | static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) | |
361 | { | |
362 | AVFilterContext *avctx = inlink->dst; | |
363 | AVFilterLink *outlink = avctx->outputs[0]; | |
364 | DeintVAAPIContext *ctx = avctx->priv; | |
365 | AVFrame *output_frame = NULL; | |
366 | VASurfaceID input_surface, output_surface; | |
367 | VASurfaceID backward_references[MAX_REFERENCES]; | |
368 | VASurfaceID forward_references[MAX_REFERENCES]; | |
369 | VAProcPipelineParameterBuffer params; | |
370 | VAProcFilterParameterBufferDeinterlacing *filter_params; | |
371 | VARectangle input_region; | |
372 | VABufferID params_id; | |
373 | VAStatus vas; | |
374 | void *filter_params_addr = NULL; | |
375 | int err, i; | |
376 | ||
377 | av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", | |
378 | av_get_pix_fmt_name(input_frame->format), | |
379 | input_frame->width, input_frame->height, input_frame->pts); | |
380 | ||
381 | if (ctx->queue_count < ctx->queue_depth) { | |
382 | ctx->frame_queue[ctx->queue_count++] = input_frame; | |
383 | if (ctx->queue_count < ctx->queue_depth) { | |
384 | // Need more reference surfaces before we can continue. | |
385 | return 0; | |
386 | } | |
387 | } else { | |
388 | av_frame_free(&ctx->frame_queue[0]); | |
389 | for (i = 0; i + 1 < ctx->queue_count; i++) | |
390 | ctx->frame_queue[i] = ctx->frame_queue[i + 1]; | |
391 | ctx->frame_queue[i] = input_frame; | |
392 | } | |
393 | ||
394 | input_frame = | |
395 | ctx->frame_queue[ctx->pipeline_caps.num_backward_references]; | |
396 | input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; | |
397 | for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++) | |
398 | backward_references[i] = (VASurfaceID)(uintptr_t) | |
399 | ctx->frame_queue[ctx->pipeline_caps.num_backward_references - | |
400 | i - 1]->data[3]; | |
401 | for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++) | |
402 | forward_references[i] = (VASurfaceID)(uintptr_t) | |
403 | ctx->frame_queue[ctx->pipeline_caps.num_backward_references + | |
404 | i + 1]->data[3]; | |
405 | ||
406 | av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for " | |
407 | "deinterlace input.\n", input_surface); | |
408 | av_log(avctx, AV_LOG_DEBUG, "Backward references:"); | |
409 | for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++) | |
410 | av_log(avctx, AV_LOG_DEBUG, " %#x", backward_references[i]); | |
411 | av_log(avctx, AV_LOG_DEBUG, "\n"); | |
412 | av_log(avctx, AV_LOG_DEBUG, "Forward references:"); | |
413 | for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++) | |
414 | av_log(avctx, AV_LOG_DEBUG, " %#x", forward_references[i]); | |
415 | av_log(avctx, AV_LOG_DEBUG, "\n"); | |
416 | ||
417 | output_frame = ff_get_video_buffer(outlink, ctx->output_width, | |
418 | ctx->output_height); | |
419 | if (!output_frame) { | |
420 | err = AVERROR(ENOMEM); | |
421 | goto fail; | |
422 | } | |
423 | ||
424 | output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3]; | |
425 | av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for " | |
426 | "deinterlace output.\n", output_surface); | |
427 | ||
428 | memset(¶ms, 0, sizeof(params)); | |
429 | ||
430 | input_region = (VARectangle) { | |
431 | .x = 0, | |
432 | .y = 0, | |
433 | .width = input_frame->width, | |
434 | .height = input_frame->height, | |
435 | }; | |
436 | ||
437 | params.surface = input_surface; | |
438 | params.surface_region = &input_region; | |
439 | params.surface_color_standard = | |
440 | vaapi_proc_colour_standard(input_frame->colorspace); | |
441 | ||
442 | params.output_region = NULL; | |
443 | params.output_background_color = 0xff000000; | |
444 | params.output_color_standard = params.surface_color_standard; | |
445 | ||
446 | params.pipeline_flags = 0; | |
447 | params.filter_flags = VA_FRAME_PICTURE; | |
448 | ||
449 | vas = vaMapBuffer(ctx->hwctx->display, ctx->filter_buffer, | |
450 | &filter_params_addr); | |
451 | if (vas != VA_STATUS_SUCCESS) { | |
452 | av_log(avctx, AV_LOG_ERROR, "Failed to map filter parameter " | |
453 | "buffer: %d (%s).\n", vas, vaErrorStr(vas)); | |
454 | err = AVERROR(EIO); | |
455 | goto fail; | |
456 | } | |
457 | filter_params = filter_params_addr; | |
458 | filter_params->flags = 0; | |
459 | if (input_frame->interlaced_frame && !input_frame->top_field_first) | |
460 | filter_params->flags |= VA_DEINTERLACING_BOTTOM_FIELD_FIRST; | |
461 | filter_params_addr = NULL; | |
462 | vas = vaUnmapBuffer(ctx->hwctx->display, ctx->filter_buffer); | |
463 | if (vas != VA_STATUS_SUCCESS) | |
464 | av_log(avctx, AV_LOG_ERROR, "Failed to unmap filter parameter " | |
465 | "buffer: %d (%s).\n", vas, vaErrorStr(vas)); | |
466 | ||
467 | params.filters = &ctx->filter_buffer; | |
468 | params.num_filters = 1; | |
469 | ||
470 | params.forward_references = forward_references; | |
471 | params.num_forward_references = | |
472 | ctx->pipeline_caps.num_forward_references; | |
473 | params.backward_references = backward_references; | |
474 | params.num_backward_references = | |
475 | ctx->pipeline_caps.num_backward_references; | |
476 | ||
477 | vas = vaBeginPicture(ctx->hwctx->display, | |
478 | ctx->va_context, output_surface); | |
479 | if (vas != VA_STATUS_SUCCESS) { | |
480 | av_log(avctx, AV_LOG_ERROR, "Failed to attach new picture: " | |
481 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
482 | err = AVERROR(EIO); | |
483 | goto fail; | |
484 | } | |
485 | ||
486 | vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, | |
487 | VAProcPipelineParameterBufferType, | |
488 | sizeof(params), 1, ¶ms, ¶ms_id); | |
489 | if (vas != VA_STATUS_SUCCESS) { | |
490 | av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer: " | |
491 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
492 | err = AVERROR(EIO); | |
493 | goto fail_after_begin; | |
494 | } | |
495 | av_log(avctx, AV_LOG_DEBUG, "Pipeline parameter buffer is %#x.\n", | |
496 | params_id); | |
497 | ||
498 | vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, | |
499 | ¶ms_id, 1); | |
500 | if (vas != VA_STATUS_SUCCESS) { | |
501 | av_log(avctx, AV_LOG_ERROR, "Failed to render parameter buffer: " | |
502 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
503 | err = AVERROR(EIO); | |
504 | goto fail_after_begin; | |
505 | } | |
506 | ||
507 | vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); | |
508 | if (vas != VA_STATUS_SUCCESS) { | |
509 | av_log(avctx, AV_LOG_ERROR, "Failed to start picture processing: " | |
510 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
511 | err = AVERROR(EIO); | |
512 | goto fail_after_render; | |
513 | } | |
514 | ||
515 | if (ctx->hwctx->driver_quirks & | |
516 | AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) { | |
517 | vas = vaDestroyBuffer(ctx->hwctx->display, params_id); | |
518 | if (vas != VA_STATUS_SUCCESS) { | |
519 | av_log(avctx, AV_LOG_ERROR, "Failed to free parameter buffer: " | |
520 | "%d (%s).\n", vas, vaErrorStr(vas)); | |
521 | // And ignore. | |
522 | } | |
523 | } | |
524 | ||
525 | err = av_frame_copy_props(output_frame, input_frame); | |
526 | if (err < 0) | |
527 | goto fail; | |
528 | ||
529 | av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", | |
530 | av_get_pix_fmt_name(output_frame->format), | |
531 | output_frame->width, output_frame->height, output_frame->pts); | |
532 | ||
533 | return ff_filter_frame(outlink, output_frame); | |
534 | ||
535 | fail_after_begin: | |
536 | vaRenderPicture(ctx->hwctx->display, ctx->va_context, ¶ms_id, 1); | |
537 | fail_after_render: | |
538 | vaEndPicture(ctx->hwctx->display, ctx->va_context); | |
539 | fail: | |
540 | if (filter_params_addr) | |
541 | vaUnmapBuffer(ctx->hwctx->display, ctx->filter_buffer); | |
542 | av_frame_free(&output_frame); | |
543 | return err; | |
544 | } | |
545 | ||
546 | static av_cold int deint_vaapi_init(AVFilterContext *avctx) | |
547 | { | |
548 | DeintVAAPIContext *ctx = avctx->priv; | |
549 | ||
550 | ctx->va_config = VA_INVALID_ID; | |
551 | ctx->va_context = VA_INVALID_ID; | |
552 | ctx->filter_buffer = VA_INVALID_ID; | |
553 | ctx->valid_ids = 1; | |
554 | ||
555 | return 0; | |
556 | } | |
557 | ||
558 | static av_cold void deint_vaapi_uninit(AVFilterContext *avctx) | |
559 | { | |
560 | DeintVAAPIContext *ctx = avctx->priv; | |
561 | ||
562 | if (ctx->valid_ids) | |
563 | deint_vaapi_pipeline_uninit(avctx); | |
564 | ||
565 | av_buffer_unref(&ctx->input_frames_ref); | |
566 | av_buffer_unref(&ctx->output_frames_ref); | |
567 | av_buffer_unref(&ctx->device_ref); | |
568 | } | |
569 | ||
570 | #define OFFSET(x) offsetof(DeintVAAPIContext, x) | |
571 | #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM) | |
572 | static const AVOption deint_vaapi_options[] = { | |
573 | { "mode", "Deinterlacing mode", | |
574 | OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = VAProcDeinterlacingNone }, | |
575 | VAProcDeinterlacingNone, VAProcDeinterlacingCount - 1, FLAGS, "mode" }, | |
576 | { "default", "Use the highest-numbered (and therefore possibly most advanced) deinterlacing algorithm", | |
577 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingNone }, .unit = "mode" }, | |
578 | { "bob", "Use the bob deinterlacing algorithm", | |
579 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingBob }, .unit = "mode" }, | |
580 | { "weave", "Use the weave deinterlacing algorithm", | |
581 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingWeave }, .unit = "mode" }, | |
582 | { "motion_adaptive", "Use the motion adaptive deinterlacing algorithm", | |
583 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionAdaptive }, .unit = "mode" }, | |
584 | { "motion_compensated", "Use the motion compensated deinterlacing algorithm", | |
585 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionCompensated }, .unit = "mode" }, | |
586 | { NULL }, | |
587 | }; | |
588 | ||
589 | static const AVClass deint_vaapi_class = { | |
590 | .class_name = "deinterlace_vaapi", | |
591 | .item_name = av_default_item_name, | |
592 | .option = deint_vaapi_options, | |
593 | .version = LIBAVUTIL_VERSION_INT, | |
594 | }; | |
595 | ||
596 | static const AVFilterPad deint_vaapi_inputs[] = { | |
597 | { | |
598 | .name = "default", | |
599 | .type = AVMEDIA_TYPE_VIDEO, | |
600 | .filter_frame = &deint_vaapi_filter_frame, | |
601 | .config_props = &deint_vaapi_config_input, | |
602 | }, | |
603 | { NULL } | |
604 | }; | |
605 | ||
606 | static const AVFilterPad deint_vaapi_outputs[] = { | |
607 | { | |
608 | .name = "default", | |
609 | .type = AVMEDIA_TYPE_VIDEO, | |
610 | .config_props = &deint_vaapi_config_output, | |
611 | }, | |
612 | { NULL } | |
613 | }; | |
614 | ||
615 | AVFilter ff_vf_deinterlace_vaapi = { | |
616 | .name = "deinterlace_vaapi", | |
617 | .description = NULL_IF_CONFIG_SMALL("Deinterlacing of VAAPI surfaces"), | |
618 | .priv_size = sizeof(DeintVAAPIContext), | |
619 | .init = &deint_vaapi_init, | |
620 | .uninit = &deint_vaapi_uninit, | |
621 | .query_formats = &deint_vaapi_query_formats, | |
622 | .inputs = deint_vaapi_inputs, | |
623 | .outputs = deint_vaapi_outputs, | |
624 | .priv_class = &deint_vaapi_class, | |
625 | .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, | |
626 | }; |