2 * Intel MediaSDK QSV codec-independent code
4 * copyright (c) 2013 Luca Barbato
5 * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <sys/types.h>
27 #include <mfx/mfxvideo.h>
29 #include "libavutil/common.h"
30 #include "libavutil/hwcontext.h"
31 #include "libavutil/hwcontext_qsv.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/log.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/pixfmt.h"
36 #include "libavutil/time.h"
41 #include "qsv_internal.h"
44 static int qsv_init_session(AVCodecContext
*avctx
, QSVContext
*q
, mfxSession session
,
45 AVBufferRef
*hw_frames_ref
)
51 } else if (hw_frames_ref
) {
52 if (q
->internal_session
) {
53 MFXClose(q
->internal_session
);
54 q
->internal_session
= NULL
;
56 av_buffer_unref(&q
->frames_ctx
.hw_frames_ctx
);
58 q
->frames_ctx
.hw_frames_ctx
= av_buffer_ref(hw_frames_ref
);
59 if (!q
->frames_ctx
.hw_frames_ctx
)
60 return AVERROR(ENOMEM
);
62 ret
= ff_qsv_init_session_frames(avctx
, &q
->internal_session
,
63 &q
->frames_ctx
, q
->load_plugins
,
64 q
->iopattern
== MFX_IOPATTERN_OUT_OPAQUE_MEMORY
);
66 av_buffer_unref(&q
->frames_ctx
.hw_frames_ctx
);
70 q
->session
= q
->internal_session
;
72 if (!q
->internal_session
) {
73 ret
= ff_qsv_init_internal_session(avctx
, &q
->internal_session
,
79 q
->session
= q
->internal_session
;
82 /* make sure the decoder is uninitialized */
83 MFXVideoDECODE_Close(q
->session
);
88 static int qsv_decode_init(AVCodecContext
*avctx
, QSVContext
*q
)
90 const AVPixFmtDescriptor
*desc
;
91 mfxSession session
= NULL
;
93 mfxVideoParam param
= { 0 };
94 int frame_width
= avctx
->coded_width
;
95 int frame_height
= avctx
->coded_height
;
98 desc
= av_pix_fmt_desc_get(avctx
->sw_pix_fmt
);
102 if (!q
->async_fifo
) {
103 q
->async_fifo
= av_fifo_alloc((1 + q
->async_depth
) *
104 (sizeof(mfxSyncPoint
*) + sizeof(QSVFrame
*)));
106 return AVERROR(ENOMEM
);
109 if (avctx
->pix_fmt
== AV_PIX_FMT_QSV
&& avctx
->hwaccel_context
) {
110 AVQSVContext
*user_ctx
= avctx
->hwaccel_context
;
111 session
= user_ctx
->session
;
112 iopattern
= user_ctx
->iopattern
;
113 q
->ext_buffers
= user_ctx
->ext_buffers
;
114 q
->nb_ext_buffers
= user_ctx
->nb_ext_buffers
;
117 if (avctx
->hw_frames_ctx
) {
118 AVHWFramesContext
*frames_ctx
= (AVHWFramesContext
*)avctx
->hw_frames_ctx
->data
;
119 AVQSVFramesContext
*frames_hwctx
= frames_ctx
->hwctx
;
122 if (frames_hwctx
->frame_type
& MFX_MEMTYPE_OPAQUE_FRAME
)
123 iopattern
= MFX_IOPATTERN_OUT_OPAQUE_MEMORY
;
124 else if (frames_hwctx
->frame_type
& MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET
)
125 iopattern
= MFX_IOPATTERN_OUT_VIDEO_MEMORY
;
128 frame_width
= frames_hwctx
->surfaces
[0].Info
.Width
;
129 frame_height
= frames_hwctx
->surfaces
[0].Info
.Height
;
133 iopattern
= MFX_IOPATTERN_OUT_SYSTEM_MEMORY
;
134 q
->iopattern
= iopattern
;
136 ret
= qsv_init_session(avctx
, q
, session
, avctx
->hw_frames_ctx
);
138 av_log(avctx
, AV_LOG_ERROR
, "Error initializing an MFX session\n");
142 ret
= ff_qsv_codec_id_to_mfx(avctx
->codec_id
);
146 param
.mfx
.CodecId
= ret
;
147 param
.mfx
.CodecProfile
= ff_qsv_profile_to_mfx(avctx
->codec_id
, avctx
->profile
);
148 param
.mfx
.CodecLevel
= avctx
->level
== FF_LEVEL_UNKNOWN ? MFX_LEVEL_UNKNOWN
: avctx
->level
;
150 param
.mfx
.FrameInfo
.BitDepthLuma
= desc
->comp
[0].depth
;
151 param
.mfx
.FrameInfo
.BitDepthChroma
= desc
->comp
[0].depth
;
152 param
.mfx
.FrameInfo
.Shift
= desc
->comp
[0].depth
> 8;
153 param
.mfx
.FrameInfo
.FourCC
= q
->fourcc
;
154 param
.mfx
.FrameInfo
.Width
= frame_width
;
155 param
.mfx
.FrameInfo
.Height
= frame_height
;
156 param
.mfx
.FrameInfo
.ChromaFormat
= MFX_CHROMAFORMAT_YUV420
;
158 switch (avctx
->field_order
) {
159 case AV_FIELD_PROGRESSIVE
:
160 param
.mfx
.FrameInfo
.PicStruct
= MFX_PICSTRUCT_PROGRESSIVE
;
163 param
.mfx
.FrameInfo
.PicStruct
= MFX_PICSTRUCT_FIELD_TFF
;
166 param
.mfx
.FrameInfo
.PicStruct
= MFX_PICSTRUCT_FIELD_BFF
;
169 param
.mfx
.FrameInfo
.PicStruct
= MFX_PICSTRUCT_UNKNOWN
;
173 param
.IOPattern
= q
->iopattern
;
174 param
.AsyncDepth
= q
->async_depth
;
175 param
.ExtParam
= q
->ext_buffers
;
176 param
.NumExtParam
= q
->nb_ext_buffers
;
178 ret
= MFXVideoDECODE_Init(q
->session
, ¶m
);
180 return ff_qsv_print_error(avctx
, ret
,
181 "Error initializing the MFX video decoder");
183 q
->frame_info
= param
.mfx
.FrameInfo
;
188 static int alloc_frame(AVCodecContext
*avctx
, QSVContext
*q
, QSVFrame
*frame
)
192 ret
= ff_get_buffer(avctx
, frame
->frame
, AV_GET_BUFFER_FLAG_REF
);
196 if (frame
->frame
->format
== AV_PIX_FMT_QSV
) {
197 frame
->surface
= *(mfxFrameSurface1
*)frame
->frame
->data
[3];
199 frame
->surface
.Info
= q
->frame_info
;
201 frame
->surface
.Data
.PitchLow
= frame
->frame
->linesize
[0];
202 frame
->surface
.Data
.Y
= frame
->frame
->data
[0];
203 frame
->surface
.Data
.UV
= frame
->frame
->data
[1];
206 if (q
->frames_ctx
.mids
) {
207 ret
= ff_qsv_find_surface_idx(&q
->frames_ctx
, frame
);
211 frame
->surface
.Data
.MemId
= &q
->frames_ctx
.mids
[ret
];
219 static void qsv_clear_unused_frames(QSVContext
*q
)
221 QSVFrame
*cur
= q
->work_frames
;
223 if (cur
->used
&& !cur
->surface
.Data
.Locked
&& !cur
->queued
) {
225 av_frame_unref(cur
->frame
);
231 static int get_surface(AVCodecContext
*avctx
, QSVContext
*q
, mfxFrameSurface1
**surf
)
233 QSVFrame
*frame
, **last
;
236 qsv_clear_unused_frames(q
);
238 frame
= q
->work_frames
;
239 last
= &q
->work_frames
;
242 ret
= alloc_frame(avctx
, q
, frame
);
245 *surf
= &frame
->surface
;
253 frame
= av_mallocz(sizeof(*frame
));
255 return AVERROR(ENOMEM
);
256 frame
->frame
= av_frame_alloc();
259 return AVERROR(ENOMEM
);
263 ret
= alloc_frame(avctx
, q
, frame
);
267 *surf
= &frame
->surface
;
272 static QSVFrame
*find_frame(QSVContext
*q
, mfxFrameSurface1
*surf
)
274 QSVFrame
*cur
= q
->work_frames
;
276 if (surf
== &cur
->surface
)
283 static int qsv_decode(AVCodecContext
*avctx
, QSVContext
*q
,
284 AVFrame
*frame
, int *got_frame
,
288 mfxFrameSurface1
*insurf
;
289 mfxFrameSurface1
*outsurf
;
291 mfxBitstream bs
= { { { 0 } } };
295 bs
.Data
= avpkt
->data
;
296 bs
.DataLength
= avpkt
->size
;
297 bs
.MaxLength
= bs
.DataLength
;
298 bs
.TimeStamp
= avpkt
->pts
;
301 sync
= av_mallocz(sizeof(*sync
));
304 return AVERROR(ENOMEM
);
308 ret
= get_surface(avctx
, q
, &insurf
);
314 ret
= MFXVideoDECODE_DecodeFrameAsync(q
->session
, avpkt
->size ?
&bs
: NULL
,
315 insurf
, &outsurf
, sync
);
316 if (ret
== MFX_WRN_DEVICE_BUSY
)
319 } while (ret
== MFX_WRN_DEVICE_BUSY
|| ret
== MFX_ERR_MORE_SURFACE
);
321 if (ret
!= MFX_ERR_NONE
&&
322 ret
!= MFX_ERR_MORE_DATA
&&
323 ret
!= MFX_WRN_VIDEO_PARAM_CHANGED
&&
324 ret
!= MFX_ERR_MORE_SURFACE
) {
326 return ff_qsv_print_error(avctx
, ret
,
327 "Error during QSV decoding.");
330 /* make sure we do not enter an infinite loop if the SDK
331 * did not consume any data and did not return anything */
332 if (!*sync
&& !bs
.DataOffset
) {
333 bs
.DataOffset
= avpkt
->size
;
334 ++q
->zero_consume_run
;
335 if (q
->zero_consume_run
> 1)
336 ff_qsv_print_warning(avctx
, ret
, "A decode call did not consume any data");
338 q
->zero_consume_run
= 0;
342 QSVFrame
*out_frame
= find_frame(q
, outsurf
);
345 av_log(avctx
, AV_LOG_ERROR
,
346 "The returned surface does not correspond to any frame\n");
351 out_frame
->queued
= 1;
352 av_fifo_generic_write(q
->async_fifo
, &out_frame
, sizeof(out_frame
), NULL
);
353 av_fifo_generic_write(q
->async_fifo
, &sync
, sizeof(sync
), NULL
);
358 if (!av_fifo_space(q
->async_fifo
) ||
359 (!avpkt
->size
&& av_fifo_size(q
->async_fifo
))) {
362 av_fifo_generic_read(q
->async_fifo
, &out_frame
, sizeof(out_frame
), NULL
);
363 av_fifo_generic_read(q
->async_fifo
, &sync
, sizeof(sync
), NULL
);
364 out_frame
->queued
= 0;
366 if (avctx
->pix_fmt
!= AV_PIX_FMT_QSV
) {
368 ret
= MFXVideoCORE_SyncOperation(q
->session
, *sync
, 1000);
369 } while (ret
== MFX_WRN_IN_EXECUTION
);
374 src_frame
= out_frame
->frame
;
376 ret
= av_frame_ref(frame
, src_frame
);
380 outsurf
= &out_frame
->surface
;
383 FF_DISABLE_DEPRECATION_WARNINGS
384 frame
->pkt_pts
= outsurf
->Data
.TimeStamp
;
385 FF_ENABLE_DEPRECATION_WARNINGS
387 frame
->pts
= outsurf
->Data
.TimeStamp
;
390 outsurf
->Info
.PicStruct
& MFX_PICSTRUCT_FRAME_TRIPLING ?
4 :
391 outsurf
->Info
.PicStruct
& MFX_PICSTRUCT_FRAME_DOUBLING ?
2 :
392 outsurf
->Info
.PicStruct
& MFX_PICSTRUCT_FIELD_REPEATED ?
1 : 0;
393 frame
->top_field_first
=
394 outsurf
->Info
.PicStruct
& MFX_PICSTRUCT_FIELD_TFF
;
395 frame
->interlaced_frame
=
396 !(outsurf
->Info
.PicStruct
& MFX_PICSTRUCT_PROGRESSIVE
);
398 /* update the surface properties */
399 if (avctx
->pix_fmt
== AV_PIX_FMT_QSV
)
400 ((mfxFrameSurface1
*)frame
->data
[3])->Info
= outsurf
->Info
;
405 return bs
.DataOffset
;
408 int ff_qsv_decode_close(QSVContext
*q
)
410 QSVFrame
*cur
= q
->work_frames
;
413 MFXVideoDECODE_Close(q
->session
);
415 while (q
->async_fifo
&& av_fifo_size(q
->async_fifo
)) {
419 av_fifo_generic_read(q
->async_fifo
, &out_frame
, sizeof(out_frame
), NULL
);
420 av_fifo_generic_read(q
->async_fifo
, &sync
, sizeof(sync
), NULL
);
426 q
->work_frames
= cur
->next
;
427 av_frame_free(&cur
->frame
);
429 cur
= q
->work_frames
;
432 av_fifo_free(q
->async_fifo
);
433 q
->async_fifo
= NULL
;
435 av_parser_close(q
->parser
);
436 avcodec_free_context(&q
->avctx_internal
);
438 if (q
->internal_session
)
439 MFXClose(q
->internal_session
);
441 av_buffer_unref(&q
->frames_ctx
.hw_frames_ctx
);
442 av_buffer_unref(&q
->frames_ctx
.mids_buf
);
447 int ff_qsv_process_data(AVCodecContext
*avctx
, QSVContext
*q
,
448 AVFrame
*frame
, int *got_frame
, AVPacket
*pkt
)
454 if (!q
->avctx_internal
) {
455 q
->avctx_internal
= avcodec_alloc_context3(NULL
);
456 if (!q
->avctx_internal
)
457 return AVERROR(ENOMEM
);
459 if (avctx
->extradata
) {
460 q
->avctx_internal
->extradata
= av_mallocz(avctx
->extradata_size
+ AV_INPUT_BUFFER_PADDING_SIZE
);
461 if (!q
->avctx_internal
->extradata
)
462 return AVERROR(ENOMEM
);
464 memcpy(q
->avctx_internal
->extradata
, avctx
->extradata
,
465 avctx
->extradata_size
);
466 q
->avctx_internal
->extradata_size
= avctx
->extradata_size
;
469 q
->parser
= av_parser_init(avctx
->codec_id
);
471 return AVERROR(ENOMEM
);
473 q
->parser
->flags
|= PARSER_FLAG_COMPLETE_FRAMES
;
474 q
->orig_pix_fmt
= AV_PIX_FMT_NONE
;
478 return qsv_decode(avctx
, q
, frame
, got_frame
, pkt
);
480 /* we assume the packets are already split properly and want
481 * just the codec parameters here */
482 av_parser_parse2(q
->parser
, q
->avctx_internal
,
483 &dummy_data
, &dummy_size
,
484 pkt
->data
, pkt
->size
, pkt
->pts
, pkt
->dts
,
487 /* TODO: flush delayed frames on reinit */
488 if (q
->parser
->format
!= q
->orig_pix_fmt
||
489 q
->parser
->coded_width
!= avctx
->coded_width
||
490 q
->parser
->coded_height
!= avctx
->coded_height
) {
491 enum AVPixelFormat pix_fmts
[3] = { AV_PIX_FMT_QSV
,
494 enum AVPixelFormat qsv_format
;
496 qsv_format
= ff_qsv_map_pixfmt(q
->parser
->format
, &q
->fourcc
);
497 if (qsv_format
< 0) {
498 av_log(avctx
, AV_LOG_ERROR
,
499 "Decoding pixel format '%s' is not supported\n",
500 av_get_pix_fmt_name(q
->parser
->format
));
501 ret
= AVERROR(ENOSYS
);
505 q
->orig_pix_fmt
= q
->parser
->format
;
506 avctx
->pix_fmt
= pix_fmts
[1] = qsv_format
;
507 avctx
->width
= q
->parser
->width
;
508 avctx
->height
= q
->parser
->height
;
509 avctx
->coded_width
= q
->parser
->coded_width
;
510 avctx
->coded_height
= q
->parser
->coded_height
;
511 avctx
->field_order
= q
->parser
->field_order
;
512 avctx
->level
= q
->avctx_internal
->level
;
513 avctx
->profile
= q
->avctx_internal
->profile
;
515 ret
= ff_get_format(avctx
, pix_fmts
);
519 avctx
->pix_fmt
= ret
;
521 ret
= qsv_decode_init(avctx
, q
);
526 return qsv_decode(avctx
, q
, frame
, got_frame
, pkt
);
529 q
->orig_pix_fmt
= q
->parser
->format
= avctx
->pix_fmt
= AV_PIX_FMT_NONE
;
533 void ff_qsv_decode_flush(AVCodecContext
*avctx
, QSVContext
*q
)
535 q
->orig_pix_fmt
= AV_PIX_FMT_NONE
;