Commit | Line | Data |
---|---|---|
369122dd NC |
1 | /* |
2 | * Video Decode and Presentation API for UNIX (VDPAU) is used for | |
3 | * HW decode acceleration for MPEG-1/2, H.264 and VC-1. | |
4 | * | |
406792e7 | 5 | * Copyright (c) 2008 NVIDIA |
369122dd NC |
6 | * |
7 | * This file is part of FFmpeg. | |
8 | * | |
9 | * FFmpeg is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * FFmpeg is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with FFmpeg; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | */ | |
23 | ||
24 | #include <limits.h> | |
25 | #include "avcodec.h" | |
26 | #include "h264.h" | |
27 | ||
28 | #undef NDEBUG | |
29 | #include <assert.h> | |
30 | ||
8423186e | 31 | #include "vdpau.h" |
369122dd NC |
32 | #include "vdpau_internal.h" |
33 | ||
34 | /** | |
35 | * \addtogroup VDPAU_Decoding | |
36 | * | |
37 | * @{ | |
38 | */ | |
39 | ||
17170313 | 40 | void ff_vdpau_h264_set_reference_frames(MpegEncContext *s) |
369122dd | 41 | { |
17170313 | 42 | H264Context *h = s->avctx->priv_data; |
369122dd NC |
43 | struct vdpau_render_state * render, * render_ref; |
44 | VdpReferenceFrameH264 * rf, * rf2; | |
45 | Picture * pic; | |
46 | int i, list, pic_frame_idx; | |
47 | ||
48 | render = (struct vdpau_render_state*)s->current_picture_ptr->data[0]; | |
49 | assert(render); | |
50 | ||
51 | rf = &render->info.h264.referenceFrames[0]; | |
52 | #define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames) | |
53 | ||
54 | for (list = 0; list < 2; ++list) { | |
55 | Picture **lp = list ? h->long_ref : h->short_ref; | |
56 | int ls = list ? h->long_ref_count : h->short_ref_count; | |
57 | ||
58 | for (i = 0; i < ls; ++i) { | |
59 | pic = lp[i]; | |
60 | if (!pic || !pic->reference) | |
61 | continue; | |
62 | pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num; | |
63 | ||
64 | render_ref = (struct vdpau_render_state*)pic->data[0]; | |
65 | assert(render_ref); | |
66 | ||
67 | rf2 = &render->info.h264.referenceFrames[0]; | |
68 | while (rf2 != rf) { | |
69 | if ( | |
70 | (rf2->surface == render_ref->surface) | |
71 | && (rf2->is_long_term == pic->long_ref) | |
72 | && (rf2->frame_idx == pic_frame_idx) | |
73 | ) | |
74 | break; | |
75 | ++rf2; | |
76 | } | |
77 | if (rf2 != rf) { | |
78 | rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; | |
79 | rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; | |
80 | continue; | |
81 | } | |
82 | ||
83 | if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT]) | |
84 | continue; | |
85 | ||
86 | rf->surface = render_ref->surface; | |
87 | rf->is_long_term = pic->long_ref; | |
88 | rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; | |
89 | rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; | |
90 | rf->field_order_cnt[0] = pic->field_poc[0]; | |
91 | rf->field_order_cnt[1] = pic->field_poc[1]; | |
92 | rf->frame_idx = pic_frame_idx; | |
93 | ||
94 | ++rf; | |
95 | } | |
96 | } | |
97 | ||
98 | for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) { | |
99 | rf->surface = VDP_INVALID_HANDLE; | |
100 | rf->is_long_term = 0; | |
101 | rf->top_is_reference = 0; | |
102 | rf->bottom_is_reference = 0; | |
103 | rf->field_order_cnt[0] = 0; | |
104 | rf->field_order_cnt[1] = 0; | |
105 | rf->frame_idx = 0; | |
106 | } | |
107 | } | |
108 | ||
c639fc72 CEH |
109 | void ff_vdpau_add_data_chunk(MpegEncContext *s, |
110 | const uint8_t *buf, int buf_size) | |
369122dd | 111 | { |
369122dd NC |
112 | struct vdpau_render_state * render; |
113 | ||
114 | render = (struct vdpau_render_state*)s->current_picture_ptr->data[0]; | |
115 | assert(render); | |
116 | ||
6cc01c24 CEH |
117 | render->bitstream_buffers= av_fast_realloc( |
118 | render->bitstream_buffers, | |
119 | &render->bitstream_buffers_allocated, | |
120 | sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1) | |
369122dd NC |
121 | ); |
122 | ||
6cc01c24 CEH |
123 | render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION; |
124 | render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf; | |
125 | render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size; | |
126 | render->bitstream_buffers_used++; | |
369122dd NC |
127 | } |
128 | ||
17170313 | 129 | void ff_vdpau_h264_picture_complete(MpegEncContext *s) |
369122dd | 130 | { |
17170313 | 131 | H264Context *h = s->avctx->priv_data; |
369122dd NC |
132 | struct vdpau_render_state * render; |
133 | ||
134 | render = (struct vdpau_render_state*)s->current_picture_ptr->data[0]; | |
135 | assert(render); | |
136 | ||
137 | render->info.h264.slice_count = h->slice_num; | |
138 | if (render->info.h264.slice_count < 1) | |
139 | return; | |
140 | ||
141 | for (int i = 0; i < 2; ++i) { | |
142 | int foc = s->current_picture_ptr->field_poc[i]; | |
143 | if (foc == INT_MAX) | |
144 | foc = 0; | |
145 | render->info.h264.field_order_cnt[i] = foc; | |
146 | } | |
147 | ||
148 | render->info.h264.is_reference = s->current_picture_ptr->reference ? VDP_TRUE : VDP_FALSE; | |
149 | render->info.h264.frame_num = h->frame_num; | |
150 | render->info.h264.field_pic_flag = s->picture_structure != PICT_FRAME; | |
151 | render->info.h264.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD; | |
152 | render->info.h264.num_ref_frames = h->sps.ref_frame_count; | |
153 | render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff; | |
154 | render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; | |
155 | render->info.h264.weighted_pred_flag = h->pps.weighted_pred; | |
156 | render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc; | |
157 | render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; | |
158 | render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode; | |
159 | render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; | |
160 | render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; | |
161 | render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26; | |
162 | render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; | |
163 | render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; | |
164 | render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; | |
165 | render->info.h264.pic_order_cnt_type = h->sps.poc_type; | |
166 | render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4; | |
167 | render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; | |
168 | render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; | |
169 | render->info.h264.entropy_coding_mode_flag = h->pps.cabac; | |
170 | render->info.h264.pic_order_present_flag = h->pps.pic_order_present; | |
171 | render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; | |
172 | render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; | |
173 | memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4)); | |
174 | memcpy(render->info.h264.scaling_lists_8x8, h->pps.scaling_matrix8, sizeof(render->info.h264.scaling_lists_8x8)); | |
175 | ||
176 | ff_draw_horiz_band(s, 0, s->avctx->height); | |
6cc01c24 | 177 | render->bitstream_buffers_used = 0; |
369122dd NC |
178 | } |
179 | ||
d37edddc NC |
180 | void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, |
181 | int buf_size, int slice_count) | |
182 | { | |
183 | struct vdpau_render_state * render, * last, * next; | |
184 | int i; | |
185 | ||
186 | render = (struct vdpau_render_state*)s->current_picture_ptr->data[0]; | |
187 | assert(render); | |
188 | ||
189 | /* fill VdpPictureInfoMPEG1Or2 struct */ | |
190 | render->info.mpeg.picture_structure = s->picture_structure; | |
191 | render->info.mpeg.picture_coding_type = s->pict_type; | |
192 | render->info.mpeg.intra_dc_precision = s->intra_dc_precision; | |
193 | render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct; | |
194 | render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors; | |
195 | render->info.mpeg.intra_vlc_format = s->intra_vlc_format; | |
196 | render->info.mpeg.alternate_scan = s->alternate_scan; | |
197 | render->info.mpeg.q_scale_type = s->q_scale_type; | |
198 | render->info.mpeg.top_field_first = s->top_field_first; | |
199 | render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2 | |
200 | render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2 | |
201 | render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert. | |
202 | render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1]; | |
203 | render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0]; | |
204 | render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1]; | |
205 | for (i = 0; i < 64; ++i) { | |
206 | render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i]; | |
207 | render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; | |
208 | } | |
209 | ||
210 | render->info.mpeg.forward_reference = VDP_INVALID_HANDLE; | |
211 | render->info.mpeg.backward_reference = VDP_INVALID_HANDLE; | |
212 | ||
213 | switch(s->pict_type){ | |
214 | case FF_B_TYPE: | |
215 | next = (struct vdpau_render_state*)s->next_picture.data[0]; | |
216 | assert(next); | |
217 | render->info.mpeg.backward_reference = next->surface; | |
218 | // no return here, going to set forward prediction | |
219 | case FF_P_TYPE: | |
220 | last = (struct vdpau_render_state*)s->last_picture.data[0]; | |
221 | if (!last) // FIXME: Does this test make sense? | |
222 | last = render; // predict second field from the first | |
223 | render->info.mpeg.forward_reference = last->surface; | |
224 | } | |
225 | ||
226 | ff_vdpau_add_data_chunk(s, buf, buf_size); | |
227 | ||
228 | render->info.mpeg.slice_count = slice_count; | |
229 | ||
230 | if (slice_count) | |
231 | ff_draw_horiz_band(s, 0, s->avctx->height); | |
232 | render->bitstream_buffers_used = 0; | |
233 | } | |
234 | ||
369122dd | 235 | /* @}*/ |