lavc: Add an internal wrapper around get_format()
[libav.git] / libavcodec / vc1dec.c
1 /*
2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 *
7 * This file is part of Libav.
8 *
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 /**
25 * @file
26 * VC-1 and WMV3 decoder
27 */
28
29 #include "internal.h"
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpegutils.h"
33 #include "mpegvideo.h"
34 #include "h263.h"
35 #include "h264chroma.h"
36 #include "vc1.h"
37 #include "vc1data.h"
38 #include "vc1acdata.h"
39 #include "msmpeg4data.h"
40 #include "unary.h"
41 #include "mathops.h"
42
43 #undef NDEBUG
44 #include <assert.h>
45
46 #define MB_INTRA_VLC_BITS 9
47 #define DC_VLC_BITS 9
48
49
50 // offset tables for interlaced picture MVDATA decoding
51 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
52 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
53
54 /***********************************************************************/
55 /**
56 * @name VC-1 Bitplane decoding
57 * @see 8.7, p56
58 * @{
59 */
60
61 /**
62 * Imode types
63 * @{
64 */
65 enum Imode {
66 IMODE_RAW,
67 IMODE_NORM2,
68 IMODE_DIFF2,
69 IMODE_NORM6,
70 IMODE_DIFF6,
71 IMODE_ROWSKIP,
72 IMODE_COLSKIP
73 };
74 /** @} */ //imode defines
75
76 static void init_block_index(VC1Context *v)
77 {
78 MpegEncContext *s = &v->s;
79 ff_init_block_index(s);
80 if (v->field_mode && !(v->second_field ^ v->tff)) {
81 s->dest[0] += s->current_picture_ptr->f->linesize[0];
82 s->dest[1] += s->current_picture_ptr->f->linesize[1];
83 s->dest[2] += s->current_picture_ptr->f->linesize[2];
84 }
85 }
86
87 /** @} */ //Bitplane group
88
89 static void vc1_put_signed_blocks_clamped(VC1Context *v)
90 {
91 MpegEncContext *s = &v->s;
92 int topleft_mb_pos, top_mb_pos;
93 int stride_y, fieldtx = 0;
94 int v_dist;
95
96 /* The put pixels loop is always one MB row behind the decoding loop,
97 * because we can only put pixels when overlap filtering is done, and
98 * for filtering of the bottom edge of a MB, we need the next MB row
99 * present as well.
100 * Within the row, the put pixels loop is also one MB col behind the
101 * decoding loop. The reason for this is again, because for filtering
102 * of the right MB edge, we need the next MB present. */
103 if (!s->first_slice_line) {
104 if (s->mb_x) {
105 topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
106 if (v->fcm == ILACE_FRAME)
107 fieldtx = v->fieldtx_plane[topleft_mb_pos];
108 stride_y = s->linesize << fieldtx;
109 v_dist = (16 - fieldtx) >> (fieldtx == 0);
110 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
111 s->dest[0] - 16 * s->linesize - 16,
112 stride_y);
113 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
114 s->dest[0] - 16 * s->linesize - 8,
115 stride_y);
116 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
117 s->dest[0] - v_dist * s->linesize - 16,
118 stride_y);
119 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
120 s->dest[0] - v_dist * s->linesize - 8,
121 stride_y);
122 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
123 s->dest[1] - 8 * s->uvlinesize - 8,
124 s->uvlinesize);
125 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
126 s->dest[2] - 8 * s->uvlinesize - 8,
127 s->uvlinesize);
128 }
129 if (s->mb_x == s->mb_width - 1) {
130 top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
131 if (v->fcm == ILACE_FRAME)
132 fieldtx = v->fieldtx_plane[top_mb_pos];
133 stride_y = s->linesize << fieldtx;
134 v_dist = fieldtx ? 15 : 8;
135 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
136 s->dest[0] - 16 * s->linesize,
137 stride_y);
138 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
139 s->dest[0] - 16 * s->linesize + 8,
140 stride_y);
141 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
142 s->dest[0] - v_dist * s->linesize,
143 stride_y);
144 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
145 s->dest[0] - v_dist * s->linesize + 8,
146 stride_y);
147 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
148 s->dest[1] - 8 * s->uvlinesize,
149 s->uvlinesize);
150 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
151 s->dest[2] - 8 * s->uvlinesize,
152 s->uvlinesize);
153 }
154 }
155
156 #define inc_blk_idx(idx) do { \
157 idx++; \
158 if (idx >= v->n_allocated_blks) \
159 idx = 0; \
160 } while (0)
161
162 inc_blk_idx(v->topleft_blk_idx);
163 inc_blk_idx(v->top_blk_idx);
164 inc_blk_idx(v->left_blk_idx);
165 inc_blk_idx(v->cur_blk_idx);
166 }
167
168 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
169 {
170 MpegEncContext *s = &v->s;
171 int j;
172 if (!s->first_slice_line) {
173 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
174 if (s->mb_x)
175 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
176 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
177 for (j = 0; j < 2; j++) {
178 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
179 if (s->mb_x)
180 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
181 }
182 }
183 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
184
185 if (s->mb_y == s->end_mb_y - 1) {
186 if (s->mb_x) {
187 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
188 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
189 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
190 }
191 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
192 }
193 }
194
195 static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
196 {
197 MpegEncContext *s = &v->s;
198 int j;
199
200 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
201 * means it runs two rows/cols behind the decoding loop. */
202 if (!s->first_slice_line) {
203 if (s->mb_x) {
204 if (s->mb_y >= s->start_mb_y + 2) {
205 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
206
207 if (s->mb_x >= 2)
208 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
209 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
210 for (j = 0; j < 2; j++) {
211 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
212 if (s->mb_x >= 2) {
213 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
214 }
215 }
216 }
217 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
218 }
219
220 if (s->mb_x == s->mb_width - 1) {
221 if (s->mb_y >= s->start_mb_y + 2) {
222 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
223
224 if (s->mb_x)
225 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
226 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
227 for (j = 0; j < 2; j++) {
228 v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
229 if (s->mb_x >= 2) {
230 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
231 }
232 }
233 }
234 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
235 }
236
237 if (s->mb_y == s->end_mb_y) {
238 if (s->mb_x) {
239 if (s->mb_x >= 2)
240 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
241 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
242 if (s->mb_x >= 2) {
243 for (j = 0; j < 2; j++) {
244 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
245 }
246 }
247 }
248
249 if (s->mb_x == s->mb_width - 1) {
250 if (s->mb_x)
251 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
252 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
253 if (s->mb_x) {
254 for (j = 0; j < 2; j++) {
255 v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
256 }
257 }
258 }
259 }
260 }
261 }
262
263 static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
264 {
265 MpegEncContext *s = &v->s;
266 int mb_pos;
267
268 if (v->condover == CONDOVER_NONE)
269 return;
270
271 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
272
273 /* Within a MB, the horizontal overlap always runs before the vertical.
274 * To accomplish that, we run the H on left and internal borders of the
275 * currently decoded MB. Then, we wait for the next overlap iteration
276 * to do H overlap on the right edge of this MB, before moving over and
277 * running the V overlap. Therefore, the V overlap makes us trail by one
278 * MB col and the H overlap filter makes us trail by one MB row. This
279 * is reflected in the time at which we run the put_pixels loop. */
280 if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
281 if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
282 v->over_flags_plane[mb_pos - 1])) {
283 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
284 v->block[v->cur_blk_idx][0]);
285 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
286 v->block[v->cur_blk_idx][2]);
287 if (!(s->flags & CODEC_FLAG_GRAY)) {
288 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
289 v->block[v->cur_blk_idx][4]);
290 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
291 v->block[v->cur_blk_idx][5]);
292 }
293 }
294 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
295 v->block[v->cur_blk_idx][1]);
296 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
297 v->block[v->cur_blk_idx][3]);
298
299 if (s->mb_x == s->mb_width - 1) {
300 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
301 v->over_flags_plane[mb_pos - s->mb_stride])) {
302 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
303 v->block[v->cur_blk_idx][0]);
304 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
305 v->block[v->cur_blk_idx][1]);
306 if (!(s->flags & CODEC_FLAG_GRAY)) {
307 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
308 v->block[v->cur_blk_idx][4]);
309 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
310 v->block[v->cur_blk_idx][5]);
311 }
312 }
313 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
314 v->block[v->cur_blk_idx][2]);
315 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
316 v->block[v->cur_blk_idx][3]);
317 }
318 }
319 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
320 if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
321 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
322 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
323 v->block[v->left_blk_idx][0]);
324 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
325 v->block[v->left_blk_idx][1]);
326 if (!(s->flags & CODEC_FLAG_GRAY)) {
327 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
328 v->block[v->left_blk_idx][4]);
329 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
330 v->block[v->left_blk_idx][5]);
331 }
332 }
333 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
334 v->block[v->left_blk_idx][2]);
335 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
336 v->block[v->left_blk_idx][3]);
337 }
338 }
339
340 /** Do motion compensation over 1 macroblock
341 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
342 */
343 static void vc1_mc_1mv(VC1Context *v, int dir)
344 {
345 MpegEncContext *s = &v->s;
346 H264ChromaContext *h264chroma = &v->h264chroma;
347 uint8_t *srcY, *srcU, *srcV;
348 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
349 int v_edge_pos = s->v_edge_pos >> v->field_mode;
350 int i;
351 uint8_t (*luty)[256], (*lutuv)[256];
352 int use_ic;
353
354 if ((!v->field_mode ||
355 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
356 !v->s.last_picture.f->data[0])
357 return;
358
359 mx = s->mv[dir][0][0];
360 my = s->mv[dir][0][1];
361
362 // store motion vectors for further use in B frames
363 if (s->pict_type == AV_PICTURE_TYPE_P) {
364 for (i = 0; i < 4; i++) {
365 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
366 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
367 }
368 }
369
370 uvmx = (mx + ((mx & 3) == 3)) >> 1;
371 uvmy = (my + ((my & 3) == 3)) >> 1;
372 v->luma_mv[s->mb_x][0] = uvmx;
373 v->luma_mv[s->mb_x][1] = uvmy;
374
375 if (v->field_mode &&
376 v->cur_field_type != v->ref_field_type[dir]) {
377 my = my - 2 + 4 * v->cur_field_type;
378 uvmy = uvmy - 2 + 4 * v->cur_field_type;
379 }
380
381 // fastuvmc shall be ignored for interlaced frame picture
382 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
383 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
384 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
385 }
386 if (!dir) {
387 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
388 srcY = s->current_picture.f->data[0];
389 srcU = s->current_picture.f->data[1];
390 srcV = s->current_picture.f->data[2];
391 luty = v->curr_luty;
392 lutuv = v->curr_lutuv;
393 use_ic = v->curr_use_ic;
394 } else {
395 srcY = s->last_picture.f->data[0];
396 srcU = s->last_picture.f->data[1];
397 srcV = s->last_picture.f->data[2];
398 luty = v->last_luty;
399 lutuv = v->last_lutuv;
400 use_ic = v->last_use_ic;
401 }
402 } else {
403 srcY = s->next_picture.f->data[0];
404 srcU = s->next_picture.f->data[1];
405 srcV = s->next_picture.f->data[2];
406 luty = v->next_luty;
407 lutuv = v->next_lutuv;
408 use_ic = v->next_use_ic;
409 }
410
411 if (!srcY || !srcU) {
412 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
413 return;
414 }
415
416 src_x = s->mb_x * 16 + (mx >> 2);
417 src_y = s->mb_y * 16 + (my >> 2);
418 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
419 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
420
421 if (v->profile != PROFILE_ADVANCED) {
422 src_x = av_clip( src_x, -16, s->mb_width * 16);
423 src_y = av_clip( src_y, -16, s->mb_height * 16);
424 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
425 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
426 } else {
427 src_x = av_clip( src_x, -17, s->avctx->coded_width);
428 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
429 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
430 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
431 }
432
433 srcY += src_y * s->linesize + src_x;
434 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
435 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
436
437 if (v->field_mode && v->ref_field_type[dir]) {
438 srcY += s->current_picture_ptr->f->linesize[0];
439 srcU += s->current_picture_ptr->f->linesize[1];
440 srcV += s->current_picture_ptr->f->linesize[2];
441 }
442
443 /* for grayscale we should not try to read from unknown area */
444 if (s->flags & CODEC_FLAG_GRAY) {
445 srcU = s->edge_emu_buffer + 18 * s->linesize;
446 srcV = s->edge_emu_buffer + 18 * s->linesize;
447 }
448
449 if (v->rangeredfrm || use_ic
450 || s->h_edge_pos < 22 || v_edge_pos < 22
451 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
452 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
453 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
454
455 srcY -= s->mspel * (1 + s->linesize);
456 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
457 s->linesize, s->linesize,
458 17 + s->mspel * 2, 17 + s->mspel * 2,
459 src_x - s->mspel, src_y - s->mspel,
460 s->h_edge_pos, v_edge_pos);
461 srcY = s->edge_emu_buffer;
462 s->vdsp.emulated_edge_mc(uvbuf, srcU,
463 s->uvlinesize, s->uvlinesize,
464 8 + 1, 8 + 1,
465 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
466 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
467 s->uvlinesize, s->uvlinesize,
468 8 + 1, 8 + 1,
469 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
470 srcU = uvbuf;
471 srcV = uvbuf + 16;
472 /* if we deal with range reduction we need to scale source blocks */
473 if (v->rangeredfrm) {
474 int i, j;
475 uint8_t *src, *src2;
476
477 src = srcY;
478 for (j = 0; j < 17 + s->mspel * 2; j++) {
479 for (i = 0; i < 17 + s->mspel * 2; i++)
480 src[i] = ((src[i] - 128) >> 1) + 128;
481 src += s->linesize;
482 }
483 src = srcU;
484 src2 = srcV;
485 for (j = 0; j < 9; j++) {
486 for (i = 0; i < 9; i++) {
487 src[i] = ((src[i] - 128) >> 1) + 128;
488 src2[i] = ((src2[i] - 128) >> 1) + 128;
489 }
490 src += s->uvlinesize;
491 src2 += s->uvlinesize;
492 }
493 }
494 /* if we deal with intensity compensation we need to scale source blocks */
495 if (use_ic) {
496 int i, j;
497 uint8_t *src, *src2;
498
499 src = srcY;
500 for (j = 0; j < 17 + s->mspel * 2; j++) {
501 int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
502 for (i = 0; i < 17 + s->mspel * 2; i++)
503 src[i] = luty[f][src[i]];
504 src += s->linesize;
505 }
506 src = srcU;
507 src2 = srcV;
508 for (j = 0; j < 9; j++) {
509 int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
510 for (i = 0; i < 9; i++) {
511 src[i] = lutuv[f][src[i]];
512 src2[i] = lutuv[f][src2[i]];
513 }
514 src += s->uvlinesize;
515 src2 += s->uvlinesize;
516 }
517 }
518 srcY += s->mspel * (1 + s->linesize);
519 }
520
521 if (s->mspel) {
522 dxy = ((my & 3) << 2) | (mx & 3);
523 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
524 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
525 srcY += s->linesize * 8;
526 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
527 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
528 } else { // hpel mc - always used for luma
529 dxy = (my & 2) | ((mx & 2) >> 1);
530 if (!v->rnd)
531 s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
532 else
533 s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
534 }
535
536 if (s->flags & CODEC_FLAG_GRAY) return;
537 /* Chroma MC always uses qpel bilinear */
538 uvmx = (uvmx & 3) << 1;
539 uvmy = (uvmy & 3) << 1;
540 if (!v->rnd) {
541 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
542 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
543 } else {
544 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
545 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
546 }
547 }
548
549 static inline int median4(int a, int b, int c, int d)
550 {
551 if (a < b) {
552 if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
553 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
554 } else {
555 if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
556 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
557 }
558 }
559
560 /** Do motion compensation for 4-MV macroblock - luminance block
561 */
562 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
563 {
564 MpegEncContext *s = &v->s;
565 uint8_t *srcY;
566 int dxy, mx, my, src_x, src_y;
567 int off;
568 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
569 int v_edge_pos = s->v_edge_pos >> v->field_mode;
570 uint8_t (*luty)[256];
571 int use_ic;
572
573 if ((!v->field_mode ||
574 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
575 !v->s.last_picture.f->data[0])
576 return;
577
578 mx = s->mv[dir][n][0];
579 my = s->mv[dir][n][1];
580
581 if (!dir) {
582 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
583 srcY = s->current_picture.f->data[0];
584 luty = v->curr_luty;
585 use_ic = v->curr_use_ic;
586 } else {
587 srcY = s->last_picture.f->data[0];
588 luty = v->last_luty;
589 use_ic = v->last_use_ic;
590 }
591 } else {
592 srcY = s->next_picture.f->data[0];
593 luty = v->next_luty;
594 use_ic = v->next_use_ic;
595 }
596
597 if (!srcY) {
598 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
599 return;
600 }
601
602 if (v->field_mode) {
603 if (v->cur_field_type != v->ref_field_type[dir])
604 my = my - 2 + 4 * v->cur_field_type;
605 }
606
607 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
608 int same_count = 0, opp_count = 0, k;
609 int chosen_mv[2][4][2], f;
610 int tx, ty;
611 for (k = 0; k < 4; k++) {
612 f = v->mv_f[0][s->block_index[k] + v->blocks_off];
613 chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
614 chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
615 opp_count += f;
616 same_count += 1 - f;
617 }
618 f = opp_count > same_count;
619 switch (f ? opp_count : same_count) {
620 case 4:
621 tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
622 chosen_mv[f][2][0], chosen_mv[f][3][0]);
623 ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
624 chosen_mv[f][2][1], chosen_mv[f][3][1]);
625 break;
626 case 3:
627 tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
628 ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
629 break;
630 case 2:
631 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
632 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
633 break;
634 }
635 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
636 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
637 for (k = 0; k < 4; k++)
638 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
639 }
640
641 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
642 int qx, qy;
643 int width = s->avctx->coded_width;
644 int height = s->avctx->coded_height >> 1;
645 if (s->pict_type == AV_PICTURE_TYPE_P) {
646 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
647 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
648 }
649 qx = (s->mb_x * 16) + (mx >> 2);
650 qy = (s->mb_y * 8) + (my >> 3);
651
652 if (qx < -17)
653 mx -= 4 * (qx + 17);
654 else if (qx > width)
655 mx -= 4 * (qx - width);
656 if (qy < -18)
657 my -= 8 * (qy + 18);
658 else if (qy > height + 1)
659 my -= 8 * (qy - height - 1);
660 }
661
662 if ((v->fcm == ILACE_FRAME) && fieldmv)
663 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
664 else
665 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
666
667 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
668 if (!fieldmv)
669 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
670 else
671 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
672
673 if (v->profile != PROFILE_ADVANCED) {
674 src_x = av_clip(src_x, -16, s->mb_width * 16);
675 src_y = av_clip(src_y, -16, s->mb_height * 16);
676 } else {
677 src_x = av_clip(src_x, -17, s->avctx->coded_width);
678 if (v->fcm == ILACE_FRAME) {
679 if (src_y & 1)
680 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
681 else
682 src_y = av_clip(src_y, -18, s->avctx->coded_height);
683 } else {
684 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
685 }
686 }
687
688 srcY += src_y * s->linesize + src_x;
689 if (v->field_mode && v->ref_field_type[dir])
690 srcY += s->current_picture_ptr->f->linesize[0];
691
692 if (fieldmv && !(src_y & 1))
693 v_edge_pos--;
694 if (fieldmv && (src_y & 1) && src_y < 4)
695 src_y--;
696 if (v->rangeredfrm || use_ic
697 || s->h_edge_pos < 13 || v_edge_pos < 23
698 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
699 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
700 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
701 /* check emulate edge stride and offset */
702 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
703 s->linesize, s->linesize,
704 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
705 src_x - s->mspel, src_y - (s->mspel << fieldmv),
706 s->h_edge_pos, v_edge_pos);
707 srcY = s->edge_emu_buffer;
708 /* if we deal with range reduction we need to scale source blocks */
709 if (v->rangeredfrm) {
710 int i, j;
711 uint8_t *src;
712
713 src = srcY;
714 for (j = 0; j < 9 + s->mspel * 2; j++) {
715 for (i = 0; i < 9 + s->mspel * 2; i++)
716 src[i] = ((src[i] - 128) >> 1) + 128;
717 src += s->linesize << fieldmv;
718 }
719 }
720 /* if we deal with intensity compensation we need to scale source blocks */
721 if (use_ic) {
722 int i, j;
723 uint8_t *src;
724
725 src = srcY;
726 for (j = 0; j < 9 + s->mspel * 2; j++) {
727 int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
728 for (i = 0; i < 9 + s->mspel * 2; i++)
729 src[i] = luty[f][src[i]];
730 src += s->linesize << fieldmv;
731 }
732 }
733 srcY += s->mspel * (1 + (s->linesize << fieldmv));
734 }
735
736 if (s->mspel) {
737 dxy = ((my & 3) << 2) | (mx & 3);
738 if (avg)
739 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
740 else
741 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
742 } else { // hpel mc - always used for luma
743 dxy = (my & 2) | ((mx & 2) >> 1);
744 if (!v->rnd)
745 s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
746 else
747 s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
748 }
749 }
750
751 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
752 {
753 int idx, i;
754 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
755
756 idx = ((a[3] != flag) << 3)
757 | ((a[2] != flag) << 2)
758 | ((a[1] != flag) << 1)
759 | (a[0] != flag);
760 if (!idx) {
761 *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
762 *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
763 return 4;
764 } else if (count[idx] == 1) {
765 switch (idx) {
766 case 0x1:
767 *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
768 *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
769 return 3;
770 case 0x2:
771 *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
772 *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
773 return 3;
774 case 0x4:
775 *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
776 *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
777 return 3;
778 case 0x8:
779 *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
780 *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
781 return 3;
782 }
783 } else if (count[idx] == 2) {
784 int t1 = 0, t2 = 0;
785 for (i = 0; i < 3; i++)
786 if (!a[i]) {
787 t1 = i;
788 break;
789 }
790 for (i = t1 + 1; i < 4; i++)
791 if (!a[i]) {
792 t2 = i;
793 break;
794 }
795 *tx = (mvx[t1] + mvx[t2]) / 2;
796 *ty = (mvy[t1] + mvy[t2]) / 2;
797 return 2;
798 } else {
799 return 0;
800 }
801 return -1;
802 }
803
804 /** Do motion compensation for 4-MV macroblock - both chroma blocks
805 */
806 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
807 {
808 MpegEncContext *s = &v->s;
809 H264ChromaContext *h264chroma = &v->h264chroma;
810 uint8_t *srcU, *srcV;
811 int uvmx, uvmy, uvsrc_x, uvsrc_y;
812 int k, tx = 0, ty = 0;
813 int mvx[4], mvy[4], intra[4], mv_f[4];
814 int valid_count;
815 int chroma_ref_type = v->cur_field_type;
816 int v_edge_pos = s->v_edge_pos >> v->field_mode;
817 uint8_t (*lutuv)[256];
818 int use_ic;
819
820 if (!v->field_mode && !v->s.last_picture.f->data[0])
821 return;
822 if (s->flags & CODEC_FLAG_GRAY)
823 return;
824
825 for (k = 0; k < 4; k++) {
826 mvx[k] = s->mv[dir][k][0];
827 mvy[k] = s->mv[dir][k][1];
828 intra[k] = v->mb_type[0][s->block_index[k]];
829 if (v->field_mode)
830 mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
831 }
832
833 /* calculate chroma MV vector from four luma MVs */
834 if (!v->field_mode || (v->field_mode && !v->numref)) {
835 valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
836 chroma_ref_type = v->reffield;
837 if (!valid_count) {
838 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
839 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
840 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
841 return; //no need to do MC for intra blocks
842 }
843 } else {
844 int dominant = 0;
845 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
846 dominant = 1;
847 valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
848 if (dominant)
849 chroma_ref_type = !v->cur_field_type;
850 }
851 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
852 return;
853 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
854 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
855 uvmx = (tx + ((tx & 3) == 3)) >> 1;
856 uvmy = (ty + ((ty & 3) == 3)) >> 1;
857
858 v->luma_mv[s->mb_x][0] = uvmx;
859 v->luma_mv[s->mb_x][1] = uvmy;
860
861 if (v->fastuvmc) {
862 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
863 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
864 }
865 // Field conversion bias
866 if (v->cur_field_type != chroma_ref_type)
867 uvmy += 2 - 4 * chroma_ref_type;
868
869 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
870 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
871
872 if (v->profile != PROFILE_ADVANCED) {
873 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
874 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
875 } else {
876 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
877 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
878 }
879
880 if (!dir) {
881 if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
882 srcU = s->current_picture.f->data[1];
883 srcV = s->current_picture.f->data[2];
884 lutuv = v->curr_lutuv;
885 use_ic = v->curr_use_ic;
886 } else {
887 srcU = s->last_picture.f->data[1];
888 srcV = s->last_picture.f->data[2];
889 lutuv = v->last_lutuv;
890 use_ic = v->last_use_ic;
891 }
892 } else {
893 srcU = s->next_picture.f->data[1];
894 srcV = s->next_picture.f->data[2];
895 lutuv = v->next_lutuv;
896 use_ic = v->next_use_ic;
897 }
898
899 if (!srcU) {
900 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
901 return;
902 }
903
904 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
905 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
906
907 if (v->field_mode) {
908 if (chroma_ref_type) {
909 srcU += s->current_picture_ptr->f->linesize[1];
910 srcV += s->current_picture_ptr->f->linesize[2];
911 }
912 }
913
914 if (v->rangeredfrm || use_ic
915 || s->h_edge_pos < 18 || v_edge_pos < 18
916 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
917 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
918 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
919 s->uvlinesize, s->uvlinesize,
920 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
921 s->h_edge_pos >> 1, v_edge_pos >> 1);
922 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
923 s->uvlinesize, s->uvlinesize,
924 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
925 s->h_edge_pos >> 1, v_edge_pos >> 1);
926 srcU = s->edge_emu_buffer;
927 srcV = s->edge_emu_buffer + 16;
928
929 /* if we deal with range reduction we need to scale source blocks */
930 if (v->rangeredfrm) {
931 int i, j;
932 uint8_t *src, *src2;
933
934 src = srcU;
935 src2 = srcV;
936 for (j = 0; j < 9; j++) {
937 for (i = 0; i < 9; i++) {
938 src[i] = ((src[i] - 128) >> 1) + 128;
939 src2[i] = ((src2[i] - 128) >> 1) + 128;
940 }
941 src += s->uvlinesize;
942 src2 += s->uvlinesize;
943 }
944 }
945 /* if we deal with intensity compensation we need to scale source blocks */
946 if (use_ic) {
947 int i, j;
948 uint8_t *src, *src2;
949
950 src = srcU;
951 src2 = srcV;
952 for (j = 0; j < 9; j++) {
953 int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
954 for (i = 0; i < 9; i++) {
955 src[i] = lutuv[f][src[i]];
956 src2[i] = lutuv[f][src2[i]];
957 }
958 src += s->uvlinesize;
959 src2 += s->uvlinesize;
960 }
961 }
962 }
963
964 /* Chroma MC always uses qpel bilinear */
965 uvmx = (uvmx & 3) << 1;
966 uvmy = (uvmy & 3) << 1;
967 if (!v->rnd) {
968 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
969 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
970 } else {
971 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
972 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
973 }
974 }
975
976 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
977 */
978 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
979 {
980 MpegEncContext *s = &v->s;
981 H264ChromaContext *h264chroma = &v->h264chroma;
982 uint8_t *srcU, *srcV;
983 int uvsrc_x, uvsrc_y;
984 int uvmx_field[4], uvmy_field[4];
985 int i, off, tx, ty;
986 int fieldmv = v->blk_mv_type[s->block_index[0]];
987 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
988 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
989 int v_edge_pos = s->v_edge_pos >> 1;
990 int use_ic;
991 uint8_t (*lutuv)[256];
992
993 if (s->flags & CODEC_FLAG_GRAY)
994 return;
995
996 for (i = 0; i < 4; i++) {
997 int d = i < 2 ? dir: dir2;
998 tx = s->mv[d][i][0];
999 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1000 ty = s->mv[d][i][1];
1001 if (fieldmv)
1002 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1003 else
1004 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1005 }
1006
1007 for (i = 0; i < 4; i++) {
1008 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1009 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1010 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1011 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1012 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1013 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1014 if (i < 2 ? dir : dir2) {
1015 srcU = s->next_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1016 srcV = s->next_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1017 lutuv = v->next_lutuv;
1018 use_ic = v->next_use_ic;
1019 } else {
1020 srcU = s->last_picture.f->data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1021 srcV = s->last_picture.f->data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1022 lutuv = v->last_lutuv;
1023 use_ic = v->last_use_ic;
1024 }
1025 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1026 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1027
1028 if (fieldmv && !(uvsrc_y & 1))
1029 v_edge_pos--;
1030 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1031 uvsrc_y--;
1032 if (use_ic
1033 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1034 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1035 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1036 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU,
1037 s->uvlinesize, s->uvlinesize,
1038 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1039 s->h_edge_pos >> 1, v_edge_pos);
1040 s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1041 s->uvlinesize, s->uvlinesize,
1042 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1043 s->h_edge_pos >> 1, v_edge_pos);
1044 srcU = s->edge_emu_buffer;
1045 srcV = s->edge_emu_buffer + 16;
1046
1047 /* if we deal with intensity compensation we need to scale source blocks */
1048 if (use_ic) {
1049 int i, j;
1050 uint8_t *src, *src2;
1051
1052 src = srcU;
1053 src2 = srcV;
1054 for (j = 0; j < 5; j++) {
1055 int f = (uvsrc_y + (j << fieldmv)) & 1;
1056 for (i = 0; i < 5; i++) {
1057 src[i] = lutuv[f][src[i]];
1058 src2[i] = lutuv[f][src2[i]];
1059 }
1060 src += s->uvlinesize << fieldmv;
1061 src2 += s->uvlinesize << fieldmv;
1062 }
1063 }
1064 }
1065 if (avg) {
1066 if (!v->rnd) {
1067 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1068 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1069 } else {
1070 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1071 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1072 }
1073 } else {
1074 if (!v->rnd) {
1075 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1076 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1077 } else {
1078 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1079 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1080 }
1081 }
1082 }
1083 }
1084
1085 /***********************************************************************/
1086 /**
1087 * @name VC-1 Block-level functions
1088 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1089 * @{
1090 */
1091
1092 /**
1093 * @def GET_MQUANT
1094 * @brief Get macroblock-level quantizer scale
1095 */
1096 #define GET_MQUANT() \
1097 if (v->dquantfrm) { \
1098 int edges = 0; \
1099 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1100 if (v->dqbilevel) { \
1101 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1102 } else { \
1103 mqdiff = get_bits(gb, 3); \
1104 if (mqdiff != 7) \
1105 mquant = v->pq + mqdiff; \
1106 else \
1107 mquant = get_bits(gb, 5); \
1108 } \
1109 } \
1110 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1111 edges = 1 << v->dqsbedge; \
1112 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1113 edges = (3 << v->dqsbedge) % 15; \
1114 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1115 edges = 15; \
1116 if ((edges&1) && !s->mb_x) \
1117 mquant = v->altpq; \
1118 if ((edges&2) && s->first_slice_line) \
1119 mquant = v->altpq; \
1120 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1121 mquant = v->altpq; \
1122 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1123 mquant = v->altpq; \
1124 if (!mquant || mquant > 31) { \
1125 av_log(v->s.avctx, AV_LOG_ERROR, \
1126 "Overriding invalid mquant %d\n", mquant); \
1127 mquant = 1; \
1128 } \
1129 }
1130
1131 /**
1132 * @def GET_MVDATA(_dmv_x, _dmv_y)
1133 * @brief Get MV differentials
1134 * @see MVDATA decoding from 8.3.5.2, p(1)20
1135 * @param _dmv_x Horizontal differential for decoded MV
1136 * @param _dmv_y Vertical differential for decoded MV
1137 */
1138 #define GET_MVDATA(_dmv_x, _dmv_y) \
1139 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1140 VC1_MV_DIFF_VLC_BITS, 2); \
1141 if (index > 36) { \
1142 mb_has_coeffs = 1; \
1143 index -= 37; \
1144 } else \
1145 mb_has_coeffs = 0; \
1146 s->mb_intra = 0; \
1147 if (!index) { \
1148 _dmv_x = _dmv_y = 0; \
1149 } else if (index == 35) { \
1150 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1151 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1152 } else if (index == 36) { \
1153 _dmv_x = 0; \
1154 _dmv_y = 0; \
1155 s->mb_intra = 1; \
1156 } else { \
1157 index1 = index % 6; \
1158 if (!s->quarter_sample && index1 == 5) val = 1; \
1159 else val = 0; \
1160 if (size_table[index1] - val > 0) \
1161 val = get_bits(gb, size_table[index1] - val); \
1162 else val = 0; \
1163 sign = 0 - (val&1); \
1164 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1165 \
1166 index1 = index / 6; \
1167 if (!s->quarter_sample && index1 == 5) val = 1; \
1168 else val = 0; \
1169 if (size_table[index1] - val > 0) \
1170 val = get_bits(gb, size_table[index1] - val); \
1171 else val = 0; \
1172 sign = 0 - (val & 1); \
1173 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1174 }
1175
1176 static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
1177 int *dmv_y, int *pred_flag)
1178 {
1179 int index, index1;
1180 int extend_x = 0, extend_y = 0;
1181 GetBitContext *gb = &v->s.gb;
1182 int bits, esc;
1183 int val, sign;
1184 const int* offs_tab;
1185
1186 if (v->numref) {
1187 bits = VC1_2REF_MVDATA_VLC_BITS;
1188 esc = 125;
1189 } else {
1190 bits = VC1_1REF_MVDATA_VLC_BITS;
1191 esc = 71;
1192 }
1193 switch (v->dmvrange) {
1194 case 1:
1195 extend_x = 1;
1196 break;
1197 case 2:
1198 extend_y = 1;
1199 break;
1200 case 3:
1201 extend_x = extend_y = 1;
1202 break;
1203 }
1204 index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1205 if (index == esc) {
1206 *dmv_x = get_bits(gb, v->k_x);
1207 *dmv_y = get_bits(gb, v->k_y);
1208 if (v->numref) {
1209 if (pred_flag) {
1210 *pred_flag = *dmv_y & 1;
1211 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1212 } else {
1213 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1214 }
1215 }
1216 }
1217 else {
1218 if (extend_x)
1219 offs_tab = offset_table2;
1220 else
1221 offs_tab = offset_table1;
1222 index1 = (index + 1) % 9;
1223 if (index1 != 0) {
1224 val = get_bits(gb, index1 + extend_x);
1225 sign = 0 -(val & 1);
1226 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1227 } else
1228 *dmv_x = 0;
1229 if (extend_y)
1230 offs_tab = offset_table2;
1231 else
1232 offs_tab = offset_table1;
1233 index1 = (index + 1) / 9;
1234 if (index1 > v->numref) {
1235 val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1236 sign = 0 - (val & 1);
1237 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1238 } else
1239 *dmv_y = 0;
1240 if (v->numref && pred_flag)
1241 *pred_flag = index1 & 1;
1242 }
1243 }
1244
1245 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1246 {
1247 int scaledvalue, refdist;
1248 int scalesame1, scalesame2;
1249 int scalezone1_x, zone1offset_x;
1250 int table_index = dir ^ v->second_field;
1251
1252 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1253 refdist = v->refdist;
1254 else
1255 refdist = dir ? v->brfd : v->frfd;
1256 if (refdist > 3)
1257 refdist = 3;
1258 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1259 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1260 scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1261 zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1262
1263 if (FFABS(n) > 255)
1264 scaledvalue = n;
1265 else {
1266 if (FFABS(n) < scalezone1_x)
1267 scaledvalue = (n * scalesame1) >> 8;
1268 else {
1269 if (n < 0)
1270 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1271 else
1272 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1273 }
1274 }
1275 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1276 }
1277
1278 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1279 {
1280 int scaledvalue, refdist;
1281 int scalesame1, scalesame2;
1282 int scalezone1_y, zone1offset_y;
1283 int table_index = dir ^ v->second_field;
1284
1285 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1286 refdist = v->refdist;
1287 else
1288 refdist = dir ? v->brfd : v->frfd;
1289 if (refdist > 3)
1290 refdist = 3;
1291 scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1292 scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1293 scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1294 zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1295
1296 if (FFABS(n) > 63)
1297 scaledvalue = n;
1298 else {
1299 if (FFABS(n) < scalezone1_y)
1300 scaledvalue = (n * scalesame1) >> 8;
1301 else {
1302 if (n < 0)
1303 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1304 else
1305 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1306 }
1307 }
1308
1309 if (v->cur_field_type && !v->ref_field_type[dir])
1310 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1311 else
1312 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1313 }
1314
1315 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1316 {
1317 int scalezone1_x, zone1offset_x;
1318 int scaleopp1, scaleopp2, brfd;
1319 int scaledvalue;
1320
1321 brfd = FFMIN(v->brfd, 3);
1322 scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1323 zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1324 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1325 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1326
1327 if (FFABS(n) > 255)
1328 scaledvalue = n;
1329 else {
1330 if (FFABS(n) < scalezone1_x)
1331 scaledvalue = (n * scaleopp1) >> 8;
1332 else {
1333 if (n < 0)
1334 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1335 else
1336 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1337 }
1338 }
1339 return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1340 }
1341
1342 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1343 {
1344 int scalezone1_y, zone1offset_y;
1345 int scaleopp1, scaleopp2, brfd;
1346 int scaledvalue;
1347
1348 brfd = FFMIN(v->brfd, 3);
1349 scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1350 zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1351 scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1352 scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1353
1354 if (FFABS(n) > 63)
1355 scaledvalue = n;
1356 else {
1357 if (FFABS(n) < scalezone1_y)
1358 scaledvalue = (n * scaleopp1) >> 8;
1359 else {
1360 if (n < 0)
1361 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1362 else
1363 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1364 }
1365 }
1366 if (v->cur_field_type && !v->ref_field_type[dir]) {
1367 return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1368 } else {
1369 return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1370 }
1371 }
1372
1373 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1374 int dim, int dir)
1375 {
1376 int brfd, scalesame;
1377 int hpel = 1 - v->s.quarter_sample;
1378
1379 n >>= hpel;
1380 if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1381 if (dim)
1382 n = scaleforsame_y(v, i, n, dir) << hpel;
1383 else
1384 n = scaleforsame_x(v, n, dir) << hpel;
1385 return n;
1386 }
1387 brfd = FFMIN(v->brfd, 3);
1388 scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1389
1390 n = (n * scalesame >> 8) << hpel;
1391 return n;
1392 }
1393
1394 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1395 int dim, int dir)
1396 {
1397 int refdist, scaleopp;
1398 int hpel = 1 - v->s.quarter_sample;
1399
1400 n >>= hpel;
1401 if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1402 if (dim)
1403 n = scaleforopp_y(v, n, dir) << hpel;
1404 else
1405 n = scaleforopp_x(v, n) << hpel;
1406 return n;
1407 }
1408 if (v->s.pict_type != AV_PICTURE_TYPE_B)
1409 refdist = FFMIN(v->refdist, 3);
1410 else
1411 refdist = dir ? v->brfd : v->frfd;
1412 scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1413
1414 n = (n * scaleopp >> 8) << hpel;
1415 return n;
1416 }
1417
1418 /** Predict and set motion vector
1419 */
1420 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1421 int mv1, int r_x, int r_y, uint8_t* is_intra,
1422 int pred_flag, int dir)
1423 {
1424 MpegEncContext *s = &v->s;
1425 int xy, wrap, off = 0;
1426 int16_t *A, *B, *C;
1427 int px, py;
1428 int sum;
1429 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1430 int opposite, a_f, b_f, c_f;
1431 int16_t field_predA[2];
1432 int16_t field_predB[2];
1433 int16_t field_predC[2];
1434 int a_valid, b_valid, c_valid;
1435 int hybridmv_thresh, y_bias = 0;
1436
1437 if (v->mv_mode == MV_PMODE_MIXED_MV ||
1438 ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV)))
1439 mixedmv_pic = 1;
1440 else
1441 mixedmv_pic = 0;
1442 /* scale MV difference to be quad-pel */
1443 dmv_x <<= 1 - s->quarter_sample;
1444 dmv_y <<= 1 - s->quarter_sample;
1445
1446 wrap = s->b8_stride;
1447 xy = s->block_index[n];
1448
1449 if (s->mb_intra) {
1450 s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1451 s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1452 s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1453 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1454 if (mv1) { /* duplicate motion data for 1-MV block */
1455 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1456 s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1457 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1458 s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1459 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1460 s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1461 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1462 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1463 s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1464 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1465 s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1466 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1467 s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1468 }
1469 return;
1470 }
1471
1472 C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1473 A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1474 if (mv1) {
1475 if (v->field_mode && mixedmv_pic)
1476 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1477 else
1478 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1479 } else {
1480 //in 4-MV mode different blocks have different B predictor position
1481 switch (n) {
1482 case 0:
1483 off = (s->mb_x > 0) ? -1 : 1;
1484 break;
1485 case 1:
1486 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1487 break;
1488 case 2:
1489 off = 1;
1490 break;
1491 case 3:
1492 off = -1;
1493 }
1494 }
1495 B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1496
1497 a_valid = !s->first_slice_line || (n == 2 || n == 3);
1498 b_valid = a_valid && (s->mb_width > 1);
1499 c_valid = s->mb_x || (n == 1 || n == 3);
1500 if (v->field_mode) {
1501 a_valid = a_valid && !is_intra[xy - wrap];
1502 b_valid = b_valid && !is_intra[xy - wrap + off];
1503 c_valid = c_valid && !is_intra[xy - 1];
1504 }
1505
1506 if (a_valid) {
1507 a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1508 num_oppfield += a_f;
1509 num_samefield += 1 - a_f;
1510 field_predA[0] = A[0];
1511 field_predA[1] = A[1];
1512 } else {
1513 field_predA[0] = field_predA[1] = 0;
1514 a_f = 0;
1515 }
1516 if (b_valid) {
1517 b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1518 num_oppfield += b_f;
1519 num_samefield += 1 - b_f;
1520 field_predB[0] = B[0];
1521 field_predB[1] = B[1];
1522 } else {
1523 field_predB[0] = field_predB[1] = 0;
1524 b_f = 0;
1525 }
1526 if (c_valid) {
1527 c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1528 num_oppfield += c_f;
1529 num_samefield += 1 - c_f;
1530 field_predC[0] = C[0];
1531 field_predC[1] = C[1];
1532 } else {
1533 field_predC[0] = field_predC[1] = 0;
1534 c_f = 0;
1535 }
1536
1537 if (v->field_mode) {
1538 if (!v->numref)
1539 // REFFIELD determines if the last field or the second-last field is
1540 // to be used as reference
1541 opposite = 1 - v->reffield;
1542 else {
1543 if (num_samefield <= num_oppfield)
1544 opposite = 1 - pred_flag;
1545 else
1546 opposite = pred_flag;
1547 }
1548 } else
1549 opposite = 0;
1550 if (opposite) {
1551 if (a_valid && !a_f) {
1552 field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1553 field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1554 }
1555 if (b_valid && !b_f) {
1556 field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1557 field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1558 }
1559 if (c_valid && !c_f) {
1560 field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1561 field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1562 }
1563 v->mv_f[dir][xy + v->blocks_off] = 1;
1564 v->ref_field_type[dir] = !v->cur_field_type;
1565 } else {
1566 if (a_valid && a_f) {
1567 field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1568 field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1569 }
1570 if (b_valid && b_f) {
1571 field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1572 field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1573 }
1574 if (c_valid && c_f) {
1575 field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1576 field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1577 }
1578 v->mv_f[dir][xy + v->blocks_off] = 0;
1579 v->ref_field_type[dir] = v->cur_field_type;
1580 }
1581
1582 if (a_valid) {
1583 px = field_predA[0];
1584 py = field_predA[1];
1585 } else if (c_valid) {
1586 px = field_predC[0];
1587 py = field_predC[1];
1588 } else if (b_valid) {
1589 px = field_predB[0];
1590 py = field_predB[1];
1591 } else {
1592 px = 0;
1593 py = 0;
1594 }
1595
1596 if (num_samefield + num_oppfield > 1) {
1597 px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1598 py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1599 }
1600
1601 /* Pullback MV as specified in 8.3.5.3.4 */
1602 if (!v->field_mode) {
1603 int qx, qy, X, Y;
1604 qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1605 qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1606 X = (s->mb_width << 6) - 4;
1607 Y = (s->mb_height << 6) - 4;
1608 if (mv1) {
1609 if (qx + px < -60) px = -60 - qx;
1610 if (qy + py < -60) py = -60 - qy;
1611 } else {
1612 if (qx + px < -28) px = -28 - qx;
1613 if (qy + py < -28) py = -28 - qy;
1614 }
1615 if (qx + px > X) px = X - qx;
1616 if (qy + py > Y) py = Y - qy;
1617 }
1618
1619 if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1620 /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1621 hybridmv_thresh = 32;
1622 if (a_valid && c_valid) {
1623 if (is_intra[xy - wrap])
1624 sum = FFABS(px) + FFABS(py);
1625 else
1626 sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1627 if (sum > hybridmv_thresh) {
1628 if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1629 px = field_predA[0];
1630 py = field_predA[1];
1631 } else {
1632 px = field_predC[0];
1633 py = field_predC[1];
1634 }
1635 } else {
1636 if (is_intra[xy - 1])
1637 sum = FFABS(px) + FFABS(py);
1638 else
1639 sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1640 if (sum > hybridmv_thresh) {
1641 if (get_bits1(&s->gb)) {
1642 px = field_predA[0];
1643 py = field_predA[1];
1644 } else {
1645 px = field_predC[0];
1646 py = field_predC[1];
1647 }
1648 }
1649 }
1650 }
1651 }
1652
1653 if (v->field_mode && v->numref)
1654 r_y >>= 1;
1655 if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1656 y_bias = 1;
1657 /* store MV using signed modulus of MV range defined in 4.11 */
1658 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1659 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1660 if (mv1) { /* duplicate motion data for 1-MV block */
1661 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1662 s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1663 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1664 s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1665 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1666 s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1667 v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1668 v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1669 }
1670 }
1671
1672 /** Predict and set motion vector for interlaced frame picture MBs
1673 */
1674 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1675 int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1676 {
1677 MpegEncContext *s = &v->s;
1678 int xy, wrap, off = 0;
1679 int A[2], B[2], C[2];
1680 int px, py;
1681 int a_valid = 0, b_valid = 0, c_valid = 0;
1682 int field_a, field_b, field_c; // 0: same, 1: opposit
1683 int total_valid, num_samefield, num_oppfield;
1684 int pos_c, pos_b, n_adj;
1685
1686 wrap = s->b8_stride;
1687 xy = s->block_index[n];
1688
1689 if (s->mb_intra) {
1690 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1691 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1692 s->current_picture.motion_val[1][xy][0] = 0;
1693 s->current_picture.motion_val[1][xy][1] = 0;
1694 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1695 s->current_picture.motion_val[0][xy + 1][0] = 0;
1696 s->current_picture.motion_val[0][xy + 1][1] = 0;
1697 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1698 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1699 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1700 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1701 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1702 s->current_picture.motion_val[1][xy + 1][0] = 0;
1703 s->current_picture.motion_val[1][xy + 1][1] = 0;
1704 s->current_picture.motion_val[1][xy + wrap][0] = 0;
1705 s->current_picture.motion_val[1][xy + wrap][1] = 0;
1706 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1707 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1708 }
1709 return;
1710 }
1711
1712 off = ((n == 0) || (n == 1)) ? 1 : -1;
1713 /* predict A */
1714 if (s->mb_x || (n == 1) || (n == 3)) {
1715 if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1716 || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1717 A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1718 A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1719 a_valid = 1;
1720 } else { // current block has frame mv and cand. has field MV (so average)
1721 A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1722 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1723 A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1724 + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1725 a_valid = 1;
1726 }
1727 if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1728 a_valid = 0;
1729 A[0] = A[1] = 0;
1730 }
1731 } else
1732 A[0] = A[1] = 0;
1733 /* Predict B and C */
1734 B[0] = B[1] = C[0] = C[1] = 0;
1735 if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1736 if (!s->first_slice_line) {
1737 if (!v->is_intra[s->mb_x - s->mb_stride]) {
1738 b_valid = 1;
1739 n_adj = n | 2;
1740 pos_b = s->block_index[n_adj] - 2 * wrap;
1741 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1742 n_adj = (n & 2) | (n & 1);
1743 }
1744 B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1745 B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1746 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1747 B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1748 B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1749 }
1750 }
1751 if (s->mb_width > 1) {
1752 if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1753 c_valid = 1;
1754 n_adj = 2;
1755 pos_c = s->block_index[2] - 2 * wrap + 2;
1756 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1757 n_adj = n & 2;
1758 }
1759 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1760 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1761 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1762 C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1763 C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1764 }
1765 if (s->mb_x == s->mb_width - 1) {
1766 if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1767 c_valid = 1;
1768 n_adj = 3;
1769 pos_c = s->block_index[3] - 2 * wrap - 2;
1770 if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1771 n_adj = n | 1;
1772 }
1773 C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1774 C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1775 if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1776 C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1777 C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1778 }
1779 } else
1780 c_valid = 0;
1781 }
1782 }
1783 }
1784 }
1785 } else {
1786 pos_b = s->block_index[1];
1787 b_valid = 1;
1788 B[0] = s->current_picture.motion_val[dir][pos_b][0];
1789 B[1] = s->current_picture.motion_val[dir][pos_b][1];
1790 pos_c = s->block_index[0];
1791 c_valid = 1;
1792 C[0] = s->current_picture.motion_val[dir][pos_c][0];
1793 C[1] = s->current_picture.motion_val[dir][pos_c][1];
1794 }
1795
1796 total_valid = a_valid + b_valid + c_valid;
1797 // check if predictor A is out of bounds
1798 if (!s->mb_x && !(n == 1 || n == 3)) {
1799 A[0] = A[1] = 0;
1800 }
1801 // check if predictor B is out of bounds
1802 if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1803 B[0] = B[1] = C[0] = C[1] = 0;
1804 }
1805 if (!v->blk_mv_type[xy]) {
1806 if (s->mb_width == 1) {
1807 px = B[0];
1808 py = B[1];
1809 } else {
1810 if (total_valid >= 2) {
1811 px = mid_pred(A[0], B[0], C[0]);
1812 py = mid_pred(A[1], B[1], C[1]);
1813 } else if (total_valid) {
1814 if (a_valid) { px = A[0]; py = A[1]; }
1815 if (b_valid) { px = B[0]; py = B[1]; }
1816 if (c_valid) { px = C[0]; py = C[1]; }
1817 } else
1818 px = py = 0;
1819 }
1820 } else {
1821 if (a_valid)
1822 field_a = (A[1] & 4) ? 1 : 0;
1823 else
1824 field_a = 0;
1825 if (b_valid)
1826 field_b = (B[1] & 4) ? 1 : 0;
1827 else
1828 field_b = 0;
1829 if (c_valid)
1830 field_c = (C[1] & 4) ? 1 : 0;
1831 else
1832 field_c = 0;
1833
1834 num_oppfield = field_a + field_b + field_c;
1835 num_samefield = total_valid - num_oppfield;
1836 if (total_valid == 3) {
1837 if ((num_samefield == 3) || (num_oppfield == 3)) {
1838 px = mid_pred(A[0], B[0], C[0]);
1839 py = mid_pred(A[1], B[1], C[1]);
1840 } else if (num_samefield >= num_oppfield) {
1841 /* take one MV from same field set depending on priority
1842 the check for B may not be necessary */
1843 px = !field_a ? A[0] : B[0];
1844 py = !field_a ? A[1] : B[1];
1845 } else {
1846 px = field_a ? A[0] : B[0];
1847 py = field_a ? A[1] : B[1];
1848 }
1849 } else if (total_valid == 2) {
1850 if (num_samefield >= num_oppfield) {
1851 if (!field_a && a_valid) {
1852 px = A[0];
1853 py = A[1];
1854 } else if (!field_b && b_valid) {
1855 px = B[0];
1856 py = B[1];
1857 } else if (c_valid) {
1858 px = C[0];
1859 py = C[1];
1860 } else px = py = 0;
1861 } else {
1862 if (field_a && a_valid) {
1863 px = A[0];
1864 py = A[1];
1865 } else if (field_b && b_valid) {
1866 px = B[0];
1867 py = B[1];
1868 } else if (c_valid) {
1869 px = C[0];
1870 py = C[1];
1871 } else
1872 px = py = 0;
1873 }
1874 } else if (total_valid == 1) {
1875 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1876 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1877 } else
1878 px = py = 0;
1879 }
1880
1881 /* store MV using signed modulus of MV range defined in 4.11 */
1882 s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1883 s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1884 if (mvn == 1) { /* duplicate motion data for 1-MV block */
1885 s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1886 s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1887 s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1888 s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1889 s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1890 s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1891 } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1892 s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1893 s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1894 s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1895 s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1896 }
1897 }
1898
1899 /** Motion compensation for direct or interpolated blocks in B-frames
1900 */
1901 static void vc1_interp_mc(VC1Context *v)
1902 {
1903 MpegEncContext *s = &v->s;
1904 H264ChromaContext *h264chroma = &v->h264chroma;
1905 uint8_t *srcY, *srcU, *srcV;
1906 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1907 int off, off_uv;
1908 int v_edge_pos = s->v_edge_pos >> v->field_mode;
1909 int use_ic = v->next_use_ic;
1910
1911 if (!v->field_mode && !v->s.next_picture.f->data[0])
1912 return;
1913
1914 mx = s->mv[1][0][0];
1915 my = s->mv[1][0][1];
1916 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1917 uvmy = (my + ((my & 3) == 3)) >> 1;
1918 if (v->field_mode) {
1919 if (v->cur_field_type != v->ref_field_type[1])
1920 my = my - 2 + 4 * v->cur_field_type;
1921 uvmy = uvmy - 2 + 4 * v->cur_field_type;
1922 }
1923 if (v->fastuvmc) {
1924 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1925 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1926 }
1927 srcY = s->next_picture.f->data[0];
1928 srcU = s->next_picture.f->data[1];
1929 srcV = s->next_picture.f->data[2];
1930
1931 src_x = s->mb_x * 16 + (mx >> 2);
1932 src_y = s->mb_y * 16 + (my >> 2);
1933 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1934 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1935
1936 if (v->profile != PROFILE_ADVANCED) {
1937 src_x = av_clip( src_x, -16, s->mb_width * 16);
1938 src_y = av_clip( src_y, -16, s->mb_height * 16);
1939 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1940 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1941 } else {
1942 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1943 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1944 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1945 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1946 }
1947
1948 srcY += src_y * s->linesize + src_x;
1949 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1950 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1951
1952 if (v->field_mode && v->ref_field_type[1]) {
1953 srcY += s->current_picture_ptr->f->linesize[0];
1954 srcU += s->current_picture_ptr->f->linesize[1];
1955 srcV += s->current_picture_ptr->f->linesize[2];
1956 }
1957
1958 /* for grayscale we should not try to read from unknown area */
1959 if (s->flags & CODEC_FLAG_GRAY) {
1960 srcU = s->edge_emu_buffer + 18 * s->linesize;
1961 srcV = s->edge_emu_buffer + 18 * s->linesize;
1962 }
1963
1964 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1965 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1966 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1967 uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1968
1969 srcY -= s->mspel * (1 + s->linesize);
1970 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcY,
1971 s->linesize, s->linesize,
1972 17 + s->mspel * 2, 17 + s->mspel * 2,
1973 src_x - s->mspel, src_y - s->mspel,
1974 s->h_edge_pos, v_edge_pos);
1975 srcY = s->edge_emu_buffer;
1976 s->vdsp.emulated_edge_mc(uvbuf, srcU,
1977 s->uvlinesize, s->uvlinesize,
1978 8 + 1, 8 + 1,
1979 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1980 s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1981 s->uvlinesize, s->uvlinesize,
1982 8 + 1, 8 + 1,
1983 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1984 srcU = uvbuf;
1985 srcV = uvbuf + 16;
1986 /* if we deal with range reduction we need to scale source blocks */
1987 if (v->rangeredfrm) {
1988 int i, j;
1989 uint8_t *src, *src2;
1990
1991 src = srcY;
1992 for (j = 0; j < 17 + s->mspel * 2; j++) {
1993 for (i = 0; i < 17 + s->mspel * 2; i++)
1994 src[i] = ((src[i] - 128) >> 1) + 128;
1995 src += s->linesize;
1996 }
1997 src = srcU;
1998 src2 = srcV;
1999 for (j = 0; j < 9; j++) {
2000 for (i = 0; i < 9; i++) {
2001 src[i] = ((src[i] - 128) >> 1) + 128;
2002 src2[i] = ((src2[i] - 128) >> 1) + 128;
2003 }
2004 src += s->uvlinesize;
2005 src2 += s->uvlinesize;
2006 }
2007 }
2008
2009 if (use_ic) {
2010 uint8_t (*luty )[256] = v->next_luty;
2011 uint8_t (*lutuv)[256] = v->next_lutuv;
2012 int i, j;
2013 uint8_t *src, *src2;
2014
2015 src = srcY;
2016 for (j = 0; j < 17 + s->mspel * 2; j++) {
2017 int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2018 for (i = 0; i < 17 + s->mspel * 2; i++)
2019 src[i] = luty[f][src[i]];
2020 src += s->linesize;
2021 }
2022 src = srcU;
2023 src2 = srcV;
2024 for (j = 0; j < 9; j++) {
2025 int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2026 for (i = 0; i < 9; i++) {
2027 src[i] = lutuv[f][src[i]];
2028 src2[i] = lutuv[f][src2[i]];
2029 }
2030 src += s->uvlinesize;
2031 src2 += s->uvlinesize;
2032 }
2033 }
2034 srcY += s->mspel * (1 + s->linesize);
2035 }
2036
2037 off = 0;
2038 off_uv = 0;
2039
2040 if (s->mspel) {
2041 dxy = ((my & 3) << 2) | (mx & 3);
2042 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2043 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2044 srcY += s->linesize * 8;
2045 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2046 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2047 } else { // hpel mc
2048 dxy = (my & 2) | ((mx & 2) >> 1);
2049
2050 if (!v->rnd)
2051 s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2052 else
2053 s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2054 }
2055
2056 if (s->flags & CODEC_FLAG_GRAY) return;
2057 /* Chroma MC always uses qpel blilinear */
2058 uvmx = (uvmx & 3) << 1;
2059 uvmy = (uvmy & 3) << 1;
2060 if (!v->rnd) {
2061 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2062 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2063 } else {
2064 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2065 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2066 }
2067 }
2068
2069 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2070 {
2071 int n = bfrac;
2072
2073 #if B_FRACTION_DEN==256
2074 if (inv)
2075 n -= 256;
2076 if (!qs)
2077 return 2 * ((value * n + 255) >> 9);
2078 return (value * n + 128) >> 8;
2079 #else
2080 if (inv)
2081 n -= B_FRACTION_DEN;
2082 if (!qs)
2083 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2084 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2085 #endif
2086 }
2087
2088 /** Reconstruct motion vector for B-frame and do motion compensation
2089 */
2090 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2091 int direct, int mode)
2092 {
2093 if (direct) {
2094 vc1_mc_1mv(v, 0);
2095 vc1_interp_mc(v);
2096 return;
2097 }
2098 if (mode == BMV_TYPE_INTERPOLATED) {
2099 vc1_mc_1mv(v, 0);
2100 vc1_interp_mc(v);
2101 return;
2102 }
2103
2104 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2105 }
2106
2107 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2108 int direct, int mvtype)
2109 {
2110 MpegEncContext *s = &v->s;
2111 int xy, wrap, off = 0;
2112 int16_t *A, *B, *C;
2113 int px, py;
2114 int sum;
2115 int r_x, r_y;
2116 const uint8_t *is_intra = v->mb_type[0];
2117
2118 r_x = v->range_x;
2119 r_y = v->range_y;
2120 /* scale MV difference to be quad-pel */
2121 dmv_x[0] <<= 1 - s->quarter_sample;
2122 dmv_y[0] <<= 1 - s->quarter_sample;
2123 dmv_x[1] <<= 1 - s->quarter_sample;
2124 dmv_y[1] <<= 1 - s->quarter_sample;
2125
2126 wrap = s->b8_stride;
2127 xy = s->block_index[0];
2128
2129 if (s->mb_intra) {
2130 s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2131 s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2132 s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2133 s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2134 return;
2135 }
2136 if (!v->field_mode) {
2137 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2138 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2139 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2140 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2141
2142 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2143 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2144 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2145 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2146 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2147 }
2148 if (direct) {
2149 s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2150 s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2151 s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2152 s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2153 return;
2154 }
2155
2156 if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2157 C = s->current_picture.motion_val[0][xy - 2];
2158 A = s->current_picture.motion_val[0][xy - wrap * 2];
2159 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2160 B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2161
2162 if (!s->mb_x) C[0] = C[1] = 0;
2163 if (!s->first_slice_line) { // predictor A is not out of bounds
2164 if (s->mb_width == 1) {
2165 px = A[0];
2166 py = A[1];
2167 } else {
2168 px = mid_pred(A[0], B[0], C[0]);
2169 py = mid_pred(A[1], B[1], C[1]);
2170 }
2171 } else if (s->mb_x) { // predictor C is not out of bounds
2172 px = C[0];
2173 py = C[1];
2174 } else {
2175 px = py = 0;
2176 }
2177 /* Pullback MV as specified in 8.3.5.3.4 */
2178 {
2179 int qx, qy, X, Y;
2180 if (v->profile < PROFILE_ADVANCED) {
2181 qx = (s->mb_x << 5);
2182 qy = (s->mb_y << 5);
2183 X = (s->mb_width << 5) - 4;
2184 Y = (s->mb_height << 5) - 4;
2185 if (qx + px < -28) px = -28 - qx;
2186 if (qy + py < -28) py = -28 - qy;
2187 if (qx + px > X) px = X - qx;
2188 if (qy + py > Y) py = Y - qy;
2189 } else {
2190 qx = (s->mb_x << 6);
2191 qy = (s->mb_y << 6);
2192 X = (s->mb_width << 6) - 4;
2193 Y = (s->mb_height << 6) - 4;
2194 if (qx + px < -60) px = -60 - qx;
2195 if (qy + py < -60) py = -60 - qy;
2196 if (qx + px > X) px = X - qx;
2197 if (qy + py > Y) py = Y - qy;
2198 }
2199 }
2200 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2201 if (0 && !s->first_slice_line && s->mb_x) {
2202 if (is_intra[xy - wrap])
2203 sum = FFABS(px) + FFABS(py);
2204 else
2205 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2206 if (sum > 32) {
2207 if (get_bits1(&s->gb)) {
2208 px = A[0];
2209 py = A[1];
2210 } else {
2211 px = C[0];
2212 py = C[1];
2213 }
2214 } else {
2215 if (is_intra[xy - 2])
2216 sum = FFABS(px) + FFABS(py);
2217 else
2218 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2219 if (sum > 32) {
2220 if (get_bits1(&s->gb)) {
2221 px = A[0];
2222 py = A[1];
2223 } else {
2224 px = C[0];
2225 py = C[1];
2226 }
2227 }
2228 }
2229 }
2230 /* store MV using signed modulus of MV range defined in 4.11 */
2231 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2232 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2233 }
2234 if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2235 C = s->current_picture.motion_val[1][xy - 2];
2236 A = s->current_picture.motion_val[1][xy - wrap * 2];
2237 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2238 B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2239
2240 if (!s->mb_x)
2241 C[0] = C[1] = 0;
2242 if (!s->first_slice_line) { // predictor A is not out of bounds
2243 if (s->mb_width == 1) {
2244 px = A[0];
2245 py = A[1];
2246 } else {
2247 px = mid_pred(A[0], B[0], C[0]);
2248 py = mid_pred(A[1], B[1], C[1]);
2249 }
2250 } else if (s->mb_x) { // predictor C is not out of bounds
2251 px = C[0];
2252 py = C[1];
2253 } else {
2254 px = py = 0;
2255 }
2256 /* Pullback MV as specified in 8.3.5.3.4 */
2257 {
2258 int qx, qy, X, Y;
2259 if (v->profile < PROFILE_ADVANCED) {
2260 qx = (s->mb_x << 5);
2261 qy = (s->mb_y << 5);
2262 X = (s->mb_width << 5) - 4;
2263 Y = (s->mb_height << 5) - 4;
2264 if (qx + px < -28) px = -28 - qx;
2265 if (qy + py < -28) py = -28 - qy;
2266 if (qx + px > X) px = X - qx;
2267 if (qy + py > Y) py = Y - qy;
2268 } else {
2269 qx = (s->mb_x << 6);
2270 qy = (s->mb_y << 6);
2271 X = (s->mb_width << 6) - 4;
2272 Y = (s->mb_height << 6) - 4;
2273 if (qx + px < -60) px = -60 - qx;
2274 if (qy + py < -60) py = -60 - qy;
2275 if (qx + px > X) px = X - qx;
2276 if (qy + py > Y) py = Y - qy;
2277 }
2278 }
2279 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2280 if (0 && !s->first_slice_line && s->mb_x) {
2281 if (is_intra[xy - wrap])
2282 sum = FFABS(px) + FFABS(py);
2283 else
2284 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2285 if (sum > 32) {
2286 if (get_bits1(&s->gb)) {
2287 px = A[0];
2288 py = A[1];
2289 } else {
2290 px = C[0];
2291 py = C[1];
2292 }
2293 } else {
2294 if (is_intra[xy - 2])
2295 sum = FFABS(px) + FFABS(py);
2296 else
2297 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2298 if (sum > 32) {
2299 if (get_bits1(&s->gb)) {
2300 px = A[0];
2301 py = A[1];
2302 } else {
2303 px = C[0];
2304 py = C[1];
2305 }
2306 }
2307 }
2308 }
2309 /* store MV using signed modulus of MV range defined in 4.11 */
2310
2311 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2312 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2313 }
2314 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2315 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2316 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2317 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2318 }
2319
2320 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2321 {
2322 int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2323 MpegEncContext *s = &v->s;
2324 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2325
2326 if (v->bmvtype == BMV_TYPE_DIRECT) {
2327 int total_opp, k, f;
2328 if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2329 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2330 v->bfraction, 0, s->quarter_sample);
2331 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2332 v->bfraction, 0, s->quarter_sample);
2333 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2334 v->bfraction, 1, s->quarter_sample);
2335 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2336 v->bfraction, 1, s->quarter_sample);
2337
2338 total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2339 + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2340 + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2341 + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2342 f = (total_opp > 2) ? 1 : 0;
2343 } else {
2344 s->mv[0][0][0] = s->mv[0][0][1] = 0;
2345 s->mv[1][0][0] = s->mv[1][0][1] = 0;
2346 f = 0;
2347 }
2348 v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2349 for (k = 0; k < 4; k++) {
2350 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2351 s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2352 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2353 s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2354 v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2355 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2356 }
2357 return;
2358 }
2359 if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2360 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2361 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2362 return;
2363 }
2364 if (dir) { // backward
2365 vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2366 if (n == 3 || mv1) {
2367 vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2368 }
2369 } else { // forward
2370 vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2371 if (n == 3 || mv1) {
2372 vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2373 }
2374 }
2375 }
2376
2377 /** Get predicted DC value for I-frames only
2378 * prediction dir: left=0, top=1
2379 * @param s MpegEncContext
2380 * @param overlap flag indicating that overlap filtering is used
2381 * @param pq integer part of picture quantizer
2382 * @param[in] n block index in the current MB
2383 * @param dc_val_ptr Pointer to DC predictor
2384 * @param dir_ptr Prediction direction for use in AC prediction
2385 */
2386 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2387 int16_t **dc_val_ptr, int *dir_ptr)
2388 {
2389 int a, b, c, wrap, pred, scale;
2390 int16_t *dc_val;
2391 static const uint16_t dcpred[32] = {
2392 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2393 114, 102, 93, 85, 79, 73, 68, 64,
2394 60, 57, 54, 51, 49, 47, 45, 43,
2395 41, 39, 38, 37, 35, 34, 33
2396 };
2397
2398 /* find prediction - wmv3_dc_scale always used here in fact */
2399 if (n < 4) scale = s->y_dc_scale;
2400 else scale = s->c_dc_scale;
2401
2402 wrap = s->block_wrap[n];
2403 dc_val = s->dc_val[0] + s->block_index[n];
2404
2405 /* B A
2406 * C X
2407 */
2408 c = dc_val[ - 1];
2409 b = dc_val[ - 1 - wrap];
2410 a = dc_val[ - wrap];
2411
2412 if (pq < 9 || !overlap) {
2413 /* Set outer values */
2414 if (s->first_slice_line && (n != 2 && n != 3))
2415 b = a = dcpred[scale];
2416 if (s->mb_x == 0 && (n != 1 && n != 3))
2417 b = c = dcpred[scale];
2418 } else {
2419 /* Set outer values */
2420 if (s->first_slice_line && (n != 2 && n != 3))
2421 b = a = 0;
2422 if (s->mb_x == 0 && (n != 1 && n != 3))
2423 b = c = 0;
2424 }
2425
2426 if (abs(a - b) <= abs(b - c)) {
2427 pred = c;
2428 *dir_ptr = 1; // left
2429 } else {
2430 pred = a;
2431 *dir_ptr = 0; // top
2432 }
2433
2434 /* update predictor */
2435 *dc_val_ptr = &dc_val[0];
2436 return pred;
2437 }
2438
2439
2440 /** Get predicted DC value
2441 * prediction dir: left=0, top=1
2442 * @param s MpegEncContext
2443 * @param overlap flag indicating that overlap filtering is used
2444 * @param pq integer part of picture quantizer
2445 * @param[in] n block index in the current MB
2446 * @param a_avail flag indicating top block availability
2447 * @param c_avail flag indicating left block availability
2448 * @param dc_val_ptr Pointer to DC predictor
2449 * @param dir_ptr Prediction direction for use in AC prediction
2450 */
2451 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2452 int a_avail, int c_avail,
2453 int16_t **dc_val_ptr, int *dir_ptr)
2454 {
2455 int a, b, c, wrap, pred;
2456 int16_t *dc_val;
2457 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2458 int q1, q2 = 0;
2459 int dqscale_index;
2460
2461 wrap = s->block_wrap[n];
2462 dc_val = s->dc_val[0] + s->block_index[n];
2463
2464 /* B A
2465 * C X
2466 */
2467 c = dc_val[ - 1];
2468 b = dc_val[ - 1 - wrap];
2469 a = dc_val[ - wrap];
2470 /* scale predictors if needed */
2471 q1 = s->current_picture.qscale_table[mb_pos];
2472 dqscale_index = s->y_dc_scale_table[q1] - 1;
2473 if (dqscale_index < 0)
2474 return 0;
2475 if (c_avail && (n != 1 && n != 3)) {
2476 q2 = s->current_picture.qscale_table[mb_pos - 1];
2477 if (q2 && q2 != q1)
2478 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2479 }
2480 if (a_avail && (n != 2 && n != 3)) {
2481 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2482 if (q2 && q2 != q1)
2483 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2484 }
2485 if (a_avail && c_avail && (n != 3)) {
2486 int off = mb_pos;
2487 if (n != 1)
2488 off--;
2489 if (n != 2)
2490 off -= s->mb_stride;
2491 q2 = s->current_picture.qscale_table[off];
2492 if (q2 && q2 != q1)
2493 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2494 }
2495
2496 if (a_avail && c_avail) {
2497 if (abs(a - b) <= abs(b - c)) {
2498 pred = c;
2499 *dir_ptr = 1; // left
2500 } else {
2501 pred = a;
2502 *dir_ptr = 0; // top
2503 }
2504 } else if (a_avail) {
2505 pred = a;
2506 *dir_ptr = 0; // top
2507 } else if (c_avail) {
2508 pred = c;
2509 *dir_ptr = 1; // left
2510 } else {
2511 pred = 0;
2512 *dir_ptr = 1; // left
2513 }
2514
2515 /* update predictor */
2516 *dc_val_ptr = &dc_val[0];
2517 return pred;
2518 }
2519
2520 /** @} */ // Block group
2521
2522 /**
2523 * @name VC1 Macroblock-level functions in Simple/Main Profiles
2524 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2525 * @{
2526 */
2527
2528 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2529 uint8_t **coded_block_ptr)
2530 {
2531 int xy, wrap, pred, a, b, c;
2532
2533 xy = s->block_index[n];
2534 wrap = s->b8_stride;
2535
2536 /* B C
2537 * A X
2538 */
2539 a = s->coded_block[xy - 1 ];
2540 b = s->coded_block[xy - 1 - wrap];
2541 c = s->coded_block[xy - wrap];
2542
2543 if (b == c) {
2544 pred = a;
2545 } else {
2546 pred = c;
2547 }
2548
2549 /* store value */
2550 *coded_block_ptr = &s->coded_block[xy];
2551
2552 return pred;
2553 }
2554
2555 /**
2556 * Decode one AC coefficient
2557 * @param v The VC1 context
2558 * @param last Last coefficient
2559 * @param skip How much zero coefficients to skip
2560 * @param value Decoded AC coefficient value
2561 * @param codingset set of VLC to decode data
2562 * @see 8.1.3.4
2563 */
2564 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2565 int *value, int codingset)
2566 {
2567 GetBitContext *gb = &v->s.gb;
2568 int index, escape, run = 0, level = 0, lst = 0;
2569
2570 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2571 if (index != ff_vc1_ac_sizes[codingset] - 1) {
2572 run = vc1_index_decode_table[codingset][index][0];
2573 level = vc1_index_decode_table[codingset][index][1];
2574 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2575 if (get_bits1(gb))
2576 level = -level;
2577 } else {
2578 escape = decode210(gb);
2579 if (escape != 2) {
2580 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2581 run = vc1_index_decode_table[codingset][index][0];
2582 level = vc1_index_decode_table[codingset][index][1];
2583 lst = index >= vc1_last_decode_table[codingset];
2584 if (escape == 0) {
2585 if (lst)
2586 level += vc1_last_delta_level_table[codingset][run];
2587 else
2588 level += vc1_delta_level_table[codingset][run];
2589 } else {
2590 if (lst)
2591 run += vc1_last_delta_run_table[codingset][level] + 1;
2592 else
2593 run += vc1_delta_run_table[codingset][level] + 1;
2594 }
2595 if (get_bits1(gb))
2596 level = -level;
2597 } else {
2598 int sign;
2599 lst = get_bits1(gb);
2600 if (v->s.esc3_level_length == 0) {
2601 if (v->pq < 8 || v->dquantfrm) { // table 59
2602 v->s.esc3_level_length = get_bits(gb, 3);
2603 if (!v->s.esc3_level_length)
2604 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2605 } else { // table 60
2606 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2607 }
2608 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2609 }
2610 run = get_bits(gb, v->s.esc3_run_length);
2611 sign = get_bits1(gb);
2612 level = get_bits(gb, v->s.esc3_level_length);
2613 if (sign)
2614 level = -level;
2615 }
2616 }
2617
2618 *last = lst;
2619 *skip = run;
2620 *value = level;
2621 }
2622
2623 /** Decode intra block in intra frames - should be faster than decode_intra_block
2624 * @param v VC1Context
2625 * @param block block to decode
2626 * @param[in] n subblock index
2627 * @param coded are AC coeffs present or not
2628 * @param codingset set of VLC to decode data
2629 */
2630 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2631 int coded, int codingset)
2632 {
2633 GetBitContext *gb = &v->s.gb;
2634 MpegEncContext *s = &v->s;
2635 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2636 int i;
2637 int16_t *dc_val;
2638 int16_t *ac_val, *ac_val2;
2639 int dcdiff;
2640
2641 /* Get DC differential */
2642 if (n < 4) {
2643 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);