Doxygen comment/explanation for ff_xvmc_init_block().
[libav.git] / libavcodec / mpegvideo_xvmc.c
1 /*
2 * XVideo Motion Compensation
3 * Copyright (c) 2003 Ivan Kalvachev
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <limits.h>
23
24 #include "avcodec.h"
25 #include "dsputil.h"
26 #include "mpegvideo.h"
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 #include "xvmc.h"
32 #include "xvmc_internal.h"
33
34 /**
35 * Initializes the block field of the MpegEncContext pointer passed as
36 * parameter after making sure that the data is not corrupted.
37 */
38 void ff_xvmc_init_block(MpegEncContext *s)
39 {
40 struct xvmc_render_state *render = (struct xvmc_render_state*)s->current_picture.data[2];
41 assert(render);
42 if (!render || render->magic != AV_XVMC_RENDER_MAGIC) {
43 assert(0);
44 return; // make sure that this is a render packet
45 }
46 s->block = (DCTELEM *)(render->data_blocks + render->next_free_data_block_num * 64);
47 }
48
49 void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
50 {
51 int i, j = 0;
52 const int mb_block_count = 4 + (1 << s->chroma_format);
53
54 cbp <<= 12-mb_block_count;
55 for (i = 0; i < mb_block_count; i++) {
56 if (cbp & (1 << 11))
57 s->pblocks[i] = (short *)(&s->block[j++]);
58 else
59 s->pblocks[i] = NULL;
60 cbp+=cbp;
61 }
62 }
63
64 /**
65 * This function should be called for every new field and/or frame.
66 * It should be safe to call the function a few times for the same field.
67 */
68 int ff_xvmc_field_start(MpegEncContext*s, AVCodecContext *avctx)
69 {
70 struct xvmc_render_state *last, *next, *render = (struct xvmc_render_state*)s->current_picture.data[2];
71
72 assert(avctx);
73 assert(render);
74 if (!render || render->magic != AV_XVMC_RENDER_MAGIC)
75 return -1; // make sure that this is a render packet
76
77 render->picture_structure = s->picture_structure;
78 render->flags = s->first_field ? 0 : XVMC_SECOND_FIELD;
79
80 if (render->filled_mv_blocks_num) {
81 av_log(avctx, AV_LOG_ERROR,
82 "Rendering surface contains %i unprocessed blocks\n",
83 render->filled_mv_blocks_num);
84 return -1;
85 }
86
87 render->p_future_surface = NULL;
88 render->p_past_surface = NULL;
89
90 switch(s->pict_type) {
91 case FF_I_TYPE:
92 return 0; // no prediction from other frames
93 case FF_B_TYPE:
94 next = (struct xvmc_render_state*)s->next_picture.data[2];
95 assert(next);
96 if (!next)
97 return -1;
98 if (next->magic != AV_XVMC_RENDER_MAGIC)
99 return -1;
100 render->p_future_surface = next->p_surface;
101 // no return here, going to set forward prediction
102 case FF_P_TYPE:
103 last = (struct xvmc_render_state*)s->last_picture.data[2];
104 if (!last)
105 last = render; // predict second field from the first
106 if (last->magic != AV_XVMC_RENDER_MAGIC)
107 return -1;
108 render->p_past_surface = last->p_surface;
109 return 0;
110 }
111
112 return -1;
113 }
114
115 /**
116 * This function should be called for every new field and/or frame.
117 * It should be safe to call the function a few times for the same field.
118 */
119 void ff_xvmc_field_end(MpegEncContext *s)
120 {
121 struct xvmc_render_state *render = (struct xvmc_render_state*)s->current_picture.data[2];
122 assert(render);
123
124 if (render->filled_mv_blocks_num > 0)
125 ff_draw_horiz_band(s, 0, 0);
126 }
127
128 void ff_xvmc_decode_mb(MpegEncContext *s)
129 {
130 XvMCMacroBlock *mv_block;
131 struct xvmc_render_state *render;
132 int i, cbp, blocks_per_mb;
133
134 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
135
136
137 if (s->encoding) {
138 av_log(s->avctx, AV_LOG_ERROR, "XVMC doesn't support encoding!!!\n");
139 return;
140 }
141
142 // from MPV_decode_mb(), update DC predictors for P macroblocks
143 if (!s->mb_intra) {
144 s->last_dc[0] =
145 s->last_dc[1] =
146 s->last_dc[2] = 128 << s->intra_dc_precision;
147 }
148
149 // MC doesn't skip blocks
150 s->mb_skipped = 0;
151
152
153 // Do I need to export quant when I could not perform postprocessing?
154 // Anyway, it doesn't hurt.
155 s->current_picture.qscale_table[mb_xy] = s->qscale;
156
157 // start of XVMC-specific code
158 render = (struct xvmc_render_state*)s->current_picture.data[2];
159 assert(render);
160 assert(render->magic == AV_XVMC_RENDER_MAGIC);
161 assert(render->mv_blocks);
162
163 // take the next free macroblock
164 mv_block = &render->mv_blocks[render->start_mv_blocks_num +
165 render->filled_mv_blocks_num];
166
167 mv_block->x = s->mb_x;
168 mv_block->y = s->mb_y;
169 mv_block->dct_type = s->interlaced_dct; // XVMC_DCT_TYPE_FRAME/FIELD;
170 if (s->mb_intra) {
171 mv_block->macroblock_type = XVMC_MB_TYPE_INTRA; // no MC, all done
172 } else {
173 mv_block->macroblock_type = XVMC_MB_TYPE_PATTERN;
174
175 if (s->mv_dir & MV_DIR_FORWARD) {
176 mv_block->macroblock_type |= XVMC_MB_TYPE_MOTION_FORWARD;
177 // PMV[n][dir][xy] = mv[dir][n][xy]
178 mv_block->PMV[0][0][0] = s->mv[0][0][0];
179 mv_block->PMV[0][0][1] = s->mv[0][0][1];
180 mv_block->PMV[1][0][0] = s->mv[0][1][0];
181 mv_block->PMV[1][0][1] = s->mv[0][1][1];
182 }
183 if (s->mv_dir & MV_DIR_BACKWARD) {
184 mv_block->macroblock_type |= XVMC_MB_TYPE_MOTION_BACKWARD;
185 mv_block->PMV[0][1][0] = s->mv[1][0][0];
186 mv_block->PMV[0][1][1] = s->mv[1][0][1];
187 mv_block->PMV[1][1][0] = s->mv[1][1][0];
188 mv_block->PMV[1][1][1] = s->mv[1][1][1];
189 }
190
191 switch(s->mv_type) {
192 case MV_TYPE_16X16:
193 mv_block->motion_type = XVMC_PREDICTION_FRAME;
194 break;
195 case MV_TYPE_16X8:
196 mv_block->motion_type = XVMC_PREDICTION_16x8;
197 break;
198 case MV_TYPE_FIELD:
199 mv_block->motion_type = XVMC_PREDICTION_FIELD;
200 if (s->picture_structure == PICT_FRAME) {
201 mv_block->PMV[0][0][1] <<= 1;
202 mv_block->PMV[1][0][1] <<= 1;
203 mv_block->PMV[0][1][1] <<= 1;
204 mv_block->PMV[1][1][1] <<= 1;
205 }
206 break;
207 case MV_TYPE_DMV:
208 mv_block->motion_type = XVMC_PREDICTION_DUAL_PRIME;
209 if (s->picture_structure == PICT_FRAME) {
210
211 mv_block->PMV[0][0][0] = s->mv[0][0][0]; // top from top
212 mv_block->PMV[0][0][1] = s->mv[0][0][1] << 1;
213
214 mv_block->PMV[0][1][0] = s->mv[0][0][0]; // bottom from bottom
215 mv_block->PMV[0][1][1] = s->mv[0][0][1] << 1;
216
217 mv_block->PMV[1][0][0] = s->mv[0][2][0]; // dmv00, top from bottom
218 mv_block->PMV[1][0][1] = s->mv[0][2][1] << 1; // dmv01
219
220 mv_block->PMV[1][1][0] = s->mv[0][3][0]; // dmv10, bottom from top
221 mv_block->PMV[1][1][1] = s->mv[0][3][1] << 1; // dmv11
222
223 } else {
224 mv_block->PMV[0][1][0] = s->mv[0][2][0]; // dmv00
225 mv_block->PMV[0][1][1] = s->mv[0][2][1]; // dmv01
226 }
227 break;
228 default:
229 assert(0);
230 }
231
232 mv_block->motion_vertical_field_select = 0;
233
234 // set correct field references
235 if (s->mv_type == MV_TYPE_FIELD || s->mv_type == MV_TYPE_16X8) {
236 mv_block->motion_vertical_field_select |= s->field_select[0][0];
237 mv_block->motion_vertical_field_select |= s->field_select[1][0] << 1;
238 mv_block->motion_vertical_field_select |= s->field_select[0][1] << 2;
239 mv_block->motion_vertical_field_select |= s->field_select[1][1] << 3;
240 }
241 } // !intra
242 // time to handle data blocks
243 mv_block->index = render->next_free_data_block_num;
244
245 blocks_per_mb = 6;
246 if (s->chroma_format >= 2) {
247 blocks_per_mb = 4 + (1 << s->chroma_format);
248 }
249
250 // calculate cbp
251 cbp = 0;
252 for (i = 0; i < blocks_per_mb; i++) {
253 cbp += cbp;
254 if (s->block_last_index[i] >= 0)
255 cbp++;
256 }
257
258 if (s->flags & CODEC_FLAG_GRAY) {
259 if (s->mb_intra) { // intra frames are always full chroma blocks
260 for (i = 4; i < blocks_per_mb; i++) {
261 memset(s->pblocks[i], 0, sizeof(short)*64); // so we need to clear them
262 if (!render->unsigned_intra)
263 s->pblocks[i][0] = 1 << 10;
264 }
265 } else {
266 cbp &= 0xf << (blocks_per_mb - 4);
267 blocks_per_mb = 4; // luminance blocks only
268 }
269 }
270 mv_block->coded_block_pattern = cbp;
271 if (cbp == 0)
272 mv_block->macroblock_type &= ~XVMC_MB_TYPE_PATTERN;
273
274 for (i = 0; i < blocks_per_mb; i++) {
275 if (s->block_last_index[i] >= 0) {
276 // I do not have unsigned_intra MOCO to test, hope it is OK.
277 if (s->mb_intra && (render->idct || (!render->idct && !render->unsigned_intra)))
278 s->pblocks[i][0] -= 1 << 10;
279 if (!render->idct) {
280 s->dsp.idct(s->pblocks[i]);
281 /* It is unclear if MC hardware requires pixel diff values to be
282 * in the range [-255;255]. TODO: Clipping if such hardware is
283 * ever found. As of now it would only be an unnecessary
284 * slowdown. */
285 }
286 // copy blocks only if the codec doesn't support pblocks reordering
287 if (s->avctx->xvmc_acceleration == 1) {
288 memcpy(&render->data_blocks[render->next_free_data_block_num*64],
289 s->pblocks[i], sizeof(short)*64);
290 }
291 render->next_free_data_block_num++;
292 }
293 }
294 render->filled_mv_blocks_num++;
295
296 assert(render->filled_mv_blocks_num <= render->total_number_of_mv_blocks);
297 assert(render->next_free_data_block_num <= render->total_number_of_data_blocks);
298 /* The above conditions should not be able to fail as long as this function
299 * is used and the following 'if ()' automatically calls a callback to free
300 * blocks. */
301
302
303 if (render->filled_mv_blocks_num >= render->total_number_of_mv_blocks)
304 ff_draw_horiz_band(s, 0, 0);
305 }