Replace two asserts by checks and error messages.
[libav.git] / libavcodec / mpegvideo_xvmc.c
1 /*
2 * XVideo Motion Compensation
3 * Copyright (c) 2003 Ivan Kalvachev
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <limits.h>
23
24 #include "avcodec.h"
25 #include "dsputil.h"
26 #include "mpegvideo.h"
27
28 #undef NDEBUG
29 #include <assert.h>
30
31 #include "xvmc.h"
32 #include "xvmc_internal.h"
33
34 /**
35 * Initializes the block field of the MpegEncContext pointer passed as
36 * parameter after making sure that the data is not corrupted.
37 * In order to implement something like direct rendering instead of decoding
38 * coefficients in s->blocks and then copying them, copy them directly
39 * into the data_blocks array provided by xvmc.
40 */
41 void ff_xvmc_init_block(MpegEncContext *s)
42 {
43 struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.data[2];
44 assert(render && render->xvmc_id == AV_XVMC_ID);
45
46 s->block = (DCTELEM *)(render->data_blocks + render->next_free_data_block_num * 64);
47 }
48
49 /**
50 * Fill individual block pointers, so there are no gaps in the data_block array
51 * in case not all blocks in MB are coded.
52 */
53 void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
54 {
55 int i, j = 0;
56 const int mb_block_count = 4 + (1 << s->chroma_format);
57
58 cbp <<= 12-mb_block_count;
59 for (i = 0; i < mb_block_count; i++) {
60 if (cbp & (1 << 11))
61 s->pblocks[i] = (short *)(&s->block[j++]);
62 else
63 s->pblocks[i] = NULL;
64 cbp+=cbp;
65 }
66 }
67
68 /**
69 * Find and store the surfaces that are used as reference frames.
70 * This function should be called for every new field and/or frame.
71 * It should be safe to call the function a few times for the same field.
72 */
73 int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
74 {
75 struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.data[2];
76 const int mb_block_count = 4 + (1 << s->chroma_format);
77
78 assert(avctx);
79 if (!render || render->xvmc_id != AV_XVMC_ID ||
80 !render->data_blocks || !render->mv_blocks) {
81 av_log(avctx, AV_LOG_ERROR,
82 "Render token doesn't look as expected.\n");
83 return -1; // make sure that this is a render packet
84 }
85
86 if (render->filled_mv_blocks_num) {
87 av_log(avctx, AV_LOG_ERROR,
88 "Rendering surface contains %i unprocessed blocks.\n",
89 render->filled_mv_blocks_num);
90 return -1;
91 }
92 if (render->allocated_mv_blocks < 1 ||
93 render->allocated_data_blocks < mb_block_count) {
94 av_log(avctx, AV_LOG_ERROR,
95 "Rendering surface doesn't provide enough block structures to work with.\n");
96 return -1;
97 }
98
99 render->picture_structure = s->picture_structure;
100 render->flags = s->first_field ? 0 : XVMC_SECOND_FIELD;
101 render->p_future_surface = NULL;
102 render->p_past_surface = NULL;
103
104 switch(s->pict_type) {
105 case FF_I_TYPE:
106 return 0; // no prediction from other frames
107 case FF_B_TYPE:
108 next = (struct xvmc_pix_fmt*)s->next_picture.data[2];
109 if (!next)
110 return -1;
111 if (next->xvmc_id != AV_XVMC_ID)
112 return -1;
113 render->p_future_surface = next->p_surface;
114 // no return here, going to set forward prediction
115 case FF_P_TYPE:
116 last = (struct xvmc_pix_fmt*)s->last_picture.data[2];
117 if (!last)
118 last = render; // predict second field from the first
119 if (last->xvmc_id != AV_XVMC_ID)
120 return -1;
121 render->p_past_surface = last->p_surface;
122 return 0;
123 }
124
125 return -1;
126 }
127
128 /**
129 * Complete frame/field rendering by passing any remaining blocks.
130 * Normally ff_draw_horiz_band() is called for each slice, however,
131 * some leftover blocks, for example from error_resilience(), may remain.
132 * It should be safe to call the function a few times for the same field.
133 */
134 void ff_xvmc_field_end(MpegEncContext *s)
135 {
136 struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.data[2];
137 assert(render);
138
139 if (render->filled_mv_blocks_num > 0)
140 ff_draw_horiz_band(s, 0, 0);
141 }
142
143 /**
144 * Synthesize the data needed by XvMC to render one macroblock of data.
145 * Fill all relevant fields, if necessary do IDCT.
146 */
147 void ff_xvmc_decode_mb(MpegEncContext *s)
148 {
149 XvMCMacroBlock *mv_block;
150 struct xvmc_pix_fmt *render;
151 int i, cbp, blocks_per_mb;
152
153 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
154
155
156 if (s->encoding) {
157 av_log(s->avctx, AV_LOG_ERROR, "XVMC doesn't support encoding!!!\n");
158 return;
159 }
160
161 // from MPV_decode_mb(), update DC predictors for P macroblocks
162 if (!s->mb_intra) {
163 s->last_dc[0] =
164 s->last_dc[1] =
165 s->last_dc[2] = 128 << s->intra_dc_precision;
166 }
167
168 // MC doesn't skip blocks
169 s->mb_skipped = 0;
170
171
172 // Do I need to export quant when I could not perform postprocessing?
173 // Anyway, it doesn't hurt.
174 s->current_picture.qscale_table[mb_xy] = s->qscale;
175
176 // start of XVMC-specific code
177 render = (struct xvmc_pix_fmt*)s->current_picture.data[2];
178 assert(render);
179 assert(render->xvmc_id == AV_XVMC_ID);
180 assert(render->mv_blocks);
181
182 // take the next free macroblock
183 mv_block = &render->mv_blocks[render->start_mv_blocks_num +
184 render->filled_mv_blocks_num];
185
186 mv_block->x = s->mb_x;
187 mv_block->y = s->mb_y;
188 mv_block->dct_type = s->interlaced_dct; // XVMC_DCT_TYPE_FRAME/FIELD;
189 if (s->mb_intra) {
190 mv_block->macroblock_type = XVMC_MB_TYPE_INTRA; // no MC, all done
191 } else {
192 mv_block->macroblock_type = XVMC_MB_TYPE_PATTERN;
193
194 if (s->mv_dir & MV_DIR_FORWARD) {
195 mv_block->macroblock_type |= XVMC_MB_TYPE_MOTION_FORWARD;
196 // PMV[n][dir][xy] = mv[dir][n][xy]
197 mv_block->PMV[0][0][0] = s->mv[0][0][0];
198 mv_block->PMV[0][0][1] = s->mv[0][0][1];
199 mv_block->PMV[1][0][0] = s->mv[0][1][0];
200 mv_block->PMV[1][0][1] = s->mv[0][1][1];
201 }
202 if (s->mv_dir & MV_DIR_BACKWARD) {
203 mv_block->macroblock_type |= XVMC_MB_TYPE_MOTION_BACKWARD;
204 mv_block->PMV[0][1][0] = s->mv[1][0][0];
205 mv_block->PMV[0][1][1] = s->mv[1][0][1];
206 mv_block->PMV[1][1][0] = s->mv[1][1][0];
207 mv_block->PMV[1][1][1] = s->mv[1][1][1];
208 }
209
210 switch(s->mv_type) {
211 case MV_TYPE_16X16:
212 mv_block->motion_type = XVMC_PREDICTION_FRAME;
213 break;
214 case MV_TYPE_16X8:
215 mv_block->motion_type = XVMC_PREDICTION_16x8;
216 break;
217 case MV_TYPE_FIELD:
218 mv_block->motion_type = XVMC_PREDICTION_FIELD;
219 if (s->picture_structure == PICT_FRAME) {
220 mv_block->PMV[0][0][1] <<= 1;
221 mv_block->PMV[1][0][1] <<= 1;
222 mv_block->PMV[0][1][1] <<= 1;
223 mv_block->PMV[1][1][1] <<= 1;
224 }
225 break;
226 case MV_TYPE_DMV:
227 mv_block->motion_type = XVMC_PREDICTION_DUAL_PRIME;
228 if (s->picture_structure == PICT_FRAME) {
229
230 mv_block->PMV[0][0][0] = s->mv[0][0][0]; // top from top
231 mv_block->PMV[0][0][1] = s->mv[0][0][1] << 1;
232
233 mv_block->PMV[0][1][0] = s->mv[0][0][0]; // bottom from bottom
234 mv_block->PMV[0][1][1] = s->mv[0][0][1] << 1;
235
236 mv_block->PMV[1][0][0] = s->mv[0][2][0]; // dmv00, top from bottom
237 mv_block->PMV[1][0][1] = s->mv[0][2][1] << 1; // dmv01
238
239 mv_block->PMV[1][1][0] = s->mv[0][3][0]; // dmv10, bottom from top
240 mv_block->PMV[1][1][1] = s->mv[0][3][1] << 1; // dmv11
241
242 } else {
243 mv_block->PMV[0][1][0] = s->mv[0][2][0]; // dmv00
244 mv_block->PMV[0][1][1] = s->mv[0][2][1]; // dmv01
245 }
246 break;
247 default:
248 assert(0);
249 }
250
251 mv_block->motion_vertical_field_select = 0;
252
253 // set correct field references
254 if (s->mv_type == MV_TYPE_FIELD || s->mv_type == MV_TYPE_16X8) {
255 mv_block->motion_vertical_field_select |= s->field_select[0][0];
256 mv_block->motion_vertical_field_select |= s->field_select[1][0] << 1;
257 mv_block->motion_vertical_field_select |= s->field_select[0][1] << 2;
258 mv_block->motion_vertical_field_select |= s->field_select[1][1] << 3;
259 }
260 } // !intra
261 // time to handle data blocks
262 mv_block->index = render->next_free_data_block_num;
263
264 blocks_per_mb = 6;
265 if (s->chroma_format >= 2) {
266 blocks_per_mb = 4 + (1 << s->chroma_format);
267 }
268
269 // calculate cbp
270 cbp = 0;
271 for (i = 0; i < blocks_per_mb; i++) {
272 cbp += cbp;
273 if (s->block_last_index[i] >= 0)
274 cbp++;
275 }
276
277 if (s->flags & CODEC_FLAG_GRAY) {
278 if (s->mb_intra) { // intra frames are always full chroma blocks
279 for (i = 4; i < blocks_per_mb; i++) {
280 memset(s->pblocks[i], 0, sizeof(short)*64); // so we need to clear them
281 if (!render->unsigned_intra)
282 s->pblocks[i][0] = 1 << 10;
283 }
284 } else {
285 cbp &= 0xf << (blocks_per_mb - 4);
286 blocks_per_mb = 4; // luminance blocks only
287 }
288 }
289 mv_block->coded_block_pattern = cbp;
290 if (cbp == 0)
291 mv_block->macroblock_type &= ~XVMC_MB_TYPE_PATTERN;
292
293 for (i = 0; i < blocks_per_mb; i++) {
294 if (s->block_last_index[i] >= 0) {
295 // I do not have unsigned_intra MOCO to test, hope it is OK.
296 if (s->mb_intra && (render->idct || (!render->idct && !render->unsigned_intra)))
297 s->pblocks[i][0] -= 1 << 10;
298 if (!render->idct) {
299 s->dsp.idct(s->pblocks[i]);
300 /* It is unclear if MC hardware requires pixel diff values to be
301 * in the range [-255;255]. TODO: Clipping if such hardware is
302 * ever found. As of now it would only be an unnecessary
303 * slowdown. */
304 }
305 // copy blocks only if the codec doesn't support pblocks reordering
306 if (s->avctx->xvmc_acceleration == 1) {
307 memcpy(&render->data_blocks[render->next_free_data_block_num*64],
308 s->pblocks[i], sizeof(short)*64);
309 }
310 render->next_free_data_block_num++;
311 }
312 }
313 render->filled_mv_blocks_num++;
314
315
316 if (render->filled_mv_blocks_num > render->allocated_mv_blocks)
317 av_log(s->avctx, AV_LOG_ERROR,
318 "Not enough space to store mv blocks allocated.\n");
319
320 if (render->next_free_data_block_num > render->allocated_data_blocks)
321 av_log(s->avctx, AV_LOG_ERROR,
322 "Offset to next data block exceeds number of allocated data blocks.\n");
323
324
325 if (render->filled_mv_blocks_num == render->allocated_mv_blocks)
326 ff_draw_horiz_band(s, 0, 0);
327 }