6707cdec71be01ab6ac035b09ee7752a7b7c354c
[libav.git] / libavcodec / vc1dec.c
1 /*
2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006-2007 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /**
24 * @file
25 * VC-1 and WMV3 decoder
26 *
27 */
28 #include "internal.h"
29 #include "dsputil.h"
30 #include "avcodec.h"
31 #include "mpegvideo.h"
32 #include "h263.h"
33 #include "vc1.h"
34 #include "vc1data.h"
35 #include "vc1acdata.h"
36 #include "msmpeg4data.h"
37 #include "unary.h"
38 #include "simple_idct.h"
39 #include "mathops.h"
40 #include "vdpau_internal.h"
41
42 #undef NDEBUG
43 #include <assert.h>
44
45 #define MB_INTRA_VLC_BITS 9
46 #define DC_VLC_BITS 9
47 #define AC_VLC_BITS 9
48 static const uint16_t table_mb_intra[64][2];
49
50
51 static const uint16_t vlc_offs[] = {
52 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
53 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8620,
54 9262, 10202, 10756, 11310, 12228, 15078
55 };
56
57 /**
58 * Init VC-1 specific tables and VC1Context members
59 * @param v The VC1Context to initialize
60 * @return Status
61 */
62 static int vc1_init_common(VC1Context *v)
63 {
64 static int done = 0;
65 int i = 0;
66 static VLC_TYPE vlc_table[15078][2];
67
68 v->hrd_rate = v->hrd_buffer = NULL;
69
70 /* VLC tables */
71 if(!done)
72 {
73 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
74 ff_vc1_bfraction_bits, 1, 1,
75 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
76 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
77 ff_vc1_norm2_bits, 1, 1,
78 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
79 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
80 ff_vc1_norm6_bits, 1, 1,
81 ff_vc1_norm6_codes, 2, 2, 556);
82 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
83 ff_vc1_imode_bits, 1, 1,
84 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
85 for (i=0; i<3; i++)
86 {
87 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i*3+0]];
88 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i*3+1] - vlc_offs[i*3+0];
89 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
90 ff_vc1_ttmb_bits[i], 1, 1,
91 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
92 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i*3+1]];
93 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i*3+2] - vlc_offs[i*3+1];
94 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
95 ff_vc1_ttblk_bits[i], 1, 1,
96 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
97 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i*3+2]];
98 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i*3+3] - vlc_offs[i*3+2];
99 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
100 ff_vc1_subblkpat_bits[i], 1, 1,
101 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
102 }
103 for(i=0; i<4; i++)
104 {
105 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i*3+9]];
106 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i*3+10] - vlc_offs[i*3+9];
107 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
108 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
109 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
110 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i*3+10]];
111 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i*3+11] - vlc_offs[i*3+10];
112 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
113 ff_vc1_cbpcy_p_bits[i], 1, 1,
114 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
115 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i*3+11]];
116 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i*3+12] - vlc_offs[i*3+11];
117 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
118 ff_vc1_mv_diff_bits[i], 1, 1,
119 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
120 }
121 for(i=0; i<8; i++){
122 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i+21]];
123 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i+22] - vlc_offs[i+21];
124 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
125 &vc1_ac_tables[i][0][1], 8, 4,
126 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
127 }
128 done = 1;
129 }
130
131 /* Other defaults */
132 v->pq = -1;
133 v->mvrange = 0; /* 7.1.1.18, p80 */
134
135 return 0;
136 }
137
138 /***********************************************************************/
139 /**
140 * @defgroup vc1bitplane VC-1 Bitplane decoding
141 * @see 8.7, p56
142 * @{
143 */
144
145 /**
146 * Imode types
147 * @{
148 */
149 enum Imode {
150 IMODE_RAW,
151 IMODE_NORM2,
152 IMODE_DIFF2,
153 IMODE_NORM6,
154 IMODE_DIFF6,
155 IMODE_ROWSKIP,
156 IMODE_COLSKIP
157 };
158 /** @} */ //imode defines
159
160
161 /** @} */ //Bitplane group
162
163 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
164 {
165 MpegEncContext *s = &v->s;
166 int j;
167 if (!s->first_slice_line) {
168 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
169 if (s->mb_x)
170 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16*s->linesize, s->linesize, pq);
171 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16*s->linesize+8, s->linesize, pq);
172 for(j = 0; j < 2; j++){
173 v->vc1dsp.vc1_v_loop_filter8(s->dest[j+1], s->uvlinesize, pq);
174 if (s->mb_x)
175 v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1]-8*s->uvlinesize, s->uvlinesize, pq);
176 }
177 }
178 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8*s->linesize, s->linesize, pq);
179
180 if (s->mb_y == s->mb_height-1) {
181 if (s->mb_x) {
182 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
183 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
184 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
185 }
186 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
187 }
188 }
189
190 /** Put block onto picture
191 */
192 static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
193 {
194 uint8_t *Y;
195 int ys, us, vs;
196 DSPContext *dsp = &v->s.dsp;
197
198 if(v->rangeredfrm) {
199 int i, j, k;
200 for(k = 0; k < 6; k++)
201 for(j = 0; j < 8; j++)
202 for(i = 0; i < 8; i++)
203 block[k][i + j*8] = (block[k][i + j*8] - 64) << 1;
204
205 }
206 ys = v->s.current_picture.linesize[0];
207 us = v->s.current_picture.linesize[1];
208 vs = v->s.current_picture.linesize[2];
209 Y = v->s.dest[0];
210
211 dsp->put_pixels_clamped(block[0], Y, ys);
212 dsp->put_pixels_clamped(block[1], Y + 8, ys);
213 Y += ys * 8;
214 dsp->put_pixels_clamped(block[2], Y, ys);
215 dsp->put_pixels_clamped(block[3], Y + 8, ys);
216
217 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
218 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
219 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
220 }
221 }
222
223 /** Do motion compensation over 1 macroblock
224 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
225 */
226 static void vc1_mc_1mv(VC1Context *v, int dir)
227 {
228 MpegEncContext *s = &v->s;
229 DSPContext *dsp = &v->s.dsp;
230 uint8_t *srcY, *srcU, *srcV;
231 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
232
233 if(!v->s.last_picture.data[0])return;
234
235 mx = s->mv[dir][0][0];
236 my = s->mv[dir][0][1];
237
238 // store motion vectors for further use in B frames
239 if(s->pict_type == FF_P_TYPE) {
240 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
241 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
242 }
243 uvmx = (mx + ((mx & 3) == 3)) >> 1;
244 uvmy = (my + ((my & 3) == 3)) >> 1;
245 if(v->fastuvmc) {
246 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
247 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
248 }
249 if(!dir) {
250 srcY = s->last_picture.data[0];
251 srcU = s->last_picture.data[1];
252 srcV = s->last_picture.data[2];
253 } else {
254 srcY = s->next_picture.data[0];
255 srcU = s->next_picture.data[1];
256 srcV = s->next_picture.data[2];
257 }
258
259 src_x = s->mb_x * 16 + (mx >> 2);
260 src_y = s->mb_y * 16 + (my >> 2);
261 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
262 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
263
264 if(v->profile != PROFILE_ADVANCED){
265 src_x = av_clip( src_x, -16, s->mb_width * 16);
266 src_y = av_clip( src_y, -16, s->mb_height * 16);
267 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
268 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
269 }else{
270 src_x = av_clip( src_x, -17, s->avctx->coded_width);
271 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
272 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
273 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
274 }
275
276 srcY += src_y * s->linesize + src_x;
277 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
278 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
279
280 /* for grayscale we should not try to read from unknown area */
281 if(s->flags & CODEC_FLAG_GRAY) {
282 srcU = s->edge_emu_buffer + 18 * s->linesize;
283 srcV = s->edge_emu_buffer + 18 * s->linesize;
284 }
285
286 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
287 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
288 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
289 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
290
291 srcY -= s->mspel * (1 + s->linesize);
292 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
293 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
294 srcY = s->edge_emu_buffer;
295 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
296 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
297 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
298 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
299 srcU = uvbuf;
300 srcV = uvbuf + 16;
301 /* if we deal with range reduction we need to scale source blocks */
302 if(v->rangeredfrm) {
303 int i, j;
304 uint8_t *src, *src2;
305
306 src = srcY;
307 for(j = 0; j < 17 + s->mspel*2; j++) {
308 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
309 src += s->linesize;
310 }
311 src = srcU; src2 = srcV;
312 for(j = 0; j < 9; j++) {
313 for(i = 0; i < 9; i++) {
314 src[i] = ((src[i] - 128) >> 1) + 128;
315 src2[i] = ((src2[i] - 128) >> 1) + 128;
316 }
317 src += s->uvlinesize;
318 src2 += s->uvlinesize;
319 }
320 }
321 /* if we deal with intensity compensation we need to scale source blocks */
322 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
323 int i, j;
324 uint8_t *src, *src2;
325
326 src = srcY;
327 for(j = 0; j < 17 + s->mspel*2; j++) {
328 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
329 src += s->linesize;
330 }
331 src = srcU; src2 = srcV;
332 for(j = 0; j < 9; j++) {
333 for(i = 0; i < 9; i++) {
334 src[i] = v->lutuv[src[i]];
335 src2[i] = v->lutuv[src2[i]];
336 }
337 src += s->uvlinesize;
338 src2 += s->uvlinesize;
339 }
340 }
341 srcY += s->mspel * (1 + s->linesize);
342 }
343
344 if(s->mspel) {
345 dxy = ((my & 3) << 2) | (mx & 3);
346 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
347 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
348 srcY += s->linesize * 8;
349 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
350 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
351 } else { // hpel mc - always used for luma
352 dxy = (my & 2) | ((mx & 2) >> 1);
353
354 if(!v->rnd)
355 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
356 else
357 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
358 }
359
360 if(s->flags & CODEC_FLAG_GRAY) return;
361 /* Chroma MC always uses qpel bilinear */
362 uvmx = (uvmx&3)<<1;
363 uvmy = (uvmy&3)<<1;
364 if(!v->rnd){
365 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
366 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
367 }else{
368 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
369 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
370 }
371 }
372
373 /** Do motion compensation for 4-MV macroblock - luminance block
374 */
375 static void vc1_mc_4mv_luma(VC1Context *v, int n)
376 {
377 MpegEncContext *s = &v->s;
378 DSPContext *dsp = &v->s.dsp;
379 uint8_t *srcY;
380 int dxy, mx, my, src_x, src_y;
381 int off;
382
383 if(!v->s.last_picture.data[0])return;
384 mx = s->mv[0][n][0];
385 my = s->mv[0][n][1];
386 srcY = s->last_picture.data[0];
387
388 off = s->linesize * 4 * (n&2) + (n&1) * 8;
389
390 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
391 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
392
393 if(v->profile != PROFILE_ADVANCED){
394 src_x = av_clip( src_x, -16, s->mb_width * 16);
395 src_y = av_clip( src_y, -16, s->mb_height * 16);
396 }else{
397 src_x = av_clip( src_x, -17, s->avctx->coded_width);
398 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
399 }
400
401 srcY += src_y * s->linesize + src_x;
402
403 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
404 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
405 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
406 srcY -= s->mspel * (1 + s->linesize);
407 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
408 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
409 srcY = s->edge_emu_buffer;
410 /* if we deal with range reduction we need to scale source blocks */
411 if(v->rangeredfrm) {
412 int i, j;
413 uint8_t *src;
414
415 src = srcY;
416 for(j = 0; j < 9 + s->mspel*2; j++) {
417 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
418 src += s->linesize;
419 }
420 }
421 /* if we deal with intensity compensation we need to scale source blocks */
422 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
423 int i, j;
424 uint8_t *src;
425
426 src = srcY;
427 for(j = 0; j < 9 + s->mspel*2; j++) {
428 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
429 src += s->linesize;
430 }
431 }
432 srcY += s->mspel * (1 + s->linesize);
433 }
434
435 if(s->mspel) {
436 dxy = ((my & 3) << 2) | (mx & 3);
437 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
438 } else { // hpel mc - always used for luma
439 dxy = (my & 2) | ((mx & 2) >> 1);
440 if(!v->rnd)
441 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
442 else
443 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
444 }
445 }
446
447 static inline int median4(int a, int b, int c, int d)
448 {
449 if(a < b) {
450 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
451 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
452 } else {
453 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
454 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
455 }
456 }
457
458
459 /** Do motion compensation for 4-MV macroblock - both chroma blocks
460 */
461 static void vc1_mc_4mv_chroma(VC1Context *v)
462 {
463 MpegEncContext *s = &v->s;
464 DSPContext *dsp = &v->s.dsp;
465 uint8_t *srcU, *srcV;
466 int uvmx, uvmy, uvsrc_x, uvsrc_y;
467 int i, idx, tx = 0, ty = 0;
468 int mvx[4], mvy[4], intra[4];
469 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
470
471 if(!v->s.last_picture.data[0])return;
472 if(s->flags & CODEC_FLAG_GRAY) return;
473
474 for(i = 0; i < 4; i++) {
475 mvx[i] = s->mv[0][i][0];
476 mvy[i] = s->mv[0][i][1];
477 intra[i] = v->mb_type[0][s->block_index[i]];
478 }
479
480 /* calculate chroma MV vector from four luma MVs */
481 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
482 if(!idx) { // all blocks are inter
483 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
484 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
485 } else if(count[idx] == 1) { // 3 inter blocks
486 switch(idx) {
487 case 0x1:
488 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
489 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
490 break;
491 case 0x2:
492 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
493 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
494 break;
495 case 0x4:
496 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
497 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
498 break;
499 case 0x8:
500 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
501 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
502 break;
503 }
504 } else if(count[idx] == 2) {
505 int t1 = 0, t2 = 0;
506 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
507 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
508 tx = (mvx[t1] + mvx[t2]) / 2;
509 ty = (mvy[t1] + mvy[t2]) / 2;
510 } else {
511 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
512 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
513 return; //no need to do MC for inter blocks
514 }
515
516 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
517 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
518 uvmx = (tx + ((tx&3) == 3)) >> 1;
519 uvmy = (ty + ((ty&3) == 3)) >> 1;
520 if(v->fastuvmc) {
521 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
522 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
523 }
524
525 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
526 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
527
528 if(v->profile != PROFILE_ADVANCED){
529 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
530 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
531 }else{
532 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
533 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
534 }
535
536 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
537 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
538 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
539 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
540 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
541 s->dsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
542 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
543 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
544 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
545 srcU = s->edge_emu_buffer;
546 srcV = s->edge_emu_buffer + 16;
547
548 /* if we deal with range reduction we need to scale source blocks */
549 if(v->rangeredfrm) {
550 int i, j;
551 uint8_t *src, *src2;
552
553 src = srcU; src2 = srcV;
554 for(j = 0; j < 9; j++) {
555 for(i = 0; i < 9; i++) {
556 src[i] = ((src[i] - 128) >> 1) + 128;
557 src2[i] = ((src2[i] - 128) >> 1) + 128;
558 }
559 src += s->uvlinesize;
560 src2 += s->uvlinesize;
561 }
562 }
563 /* if we deal with intensity compensation we need to scale source blocks */
564 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
565 int i, j;
566 uint8_t *src, *src2;
567
568 src = srcU; src2 = srcV;
569 for(j = 0; j < 9; j++) {
570 for(i = 0; i < 9; i++) {
571 src[i] = v->lutuv[src[i]];
572 src2[i] = v->lutuv[src2[i]];
573 }
574 src += s->uvlinesize;
575 src2 += s->uvlinesize;
576 }
577 }
578 }
579
580 /* Chroma MC always uses qpel bilinear */
581 uvmx = (uvmx&3)<<1;
582 uvmy = (uvmy&3)<<1;
583 if(!v->rnd){
584 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
585 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
586 }else{
587 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
588 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
589 }
590 }
591
592 /***********************************************************************/
593 /**
594 * @defgroup vc1block VC-1 Block-level functions
595 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
596 * @{
597 */
598
599 /**
600 * @def GET_MQUANT
601 * @brief Get macroblock-level quantizer scale
602 */
603 #define GET_MQUANT() \
604 if (v->dquantfrm) \
605 { \
606 int edges = 0; \
607 if (v->dqprofile == DQPROFILE_ALL_MBS) \
608 { \
609 if (v->dqbilevel) \
610 { \
611 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
612 } \
613 else \
614 { \
615 mqdiff = get_bits(gb, 3); \
616 if (mqdiff != 7) mquant = v->pq + mqdiff; \
617 else mquant = get_bits(gb, 5); \
618 } \
619 } \
620 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
621 edges = 1 << v->dqsbedge; \
622 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
623 edges = (3 << v->dqsbedge) % 15; \
624 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
625 edges = 15; \
626 if((edges&1) && !s->mb_x) \
627 mquant = v->altpq; \
628 if((edges&2) && s->first_slice_line) \
629 mquant = v->altpq; \
630 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
631 mquant = v->altpq; \
632 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
633 mquant = v->altpq; \
634 }
635
636 /**
637 * @def GET_MVDATA(_dmv_x, _dmv_y)
638 * @brief Get MV differentials
639 * @see MVDATA decoding from 8.3.5.2, p(1)20
640 * @param _dmv_x Horizontal differential for decoded MV
641 * @param _dmv_y Vertical differential for decoded MV
642 */
643 #define GET_MVDATA(_dmv_x, _dmv_y) \
644 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table,\
645 VC1_MV_DIFF_VLC_BITS, 2); \
646 if (index > 36) \
647 { \
648 mb_has_coeffs = 1; \
649 index -= 37; \
650 } \
651 else mb_has_coeffs = 0; \
652 s->mb_intra = 0; \
653 if (!index) { _dmv_x = _dmv_y = 0; } \
654 else if (index == 35) \
655 { \
656 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
657 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
658 } \
659 else if (index == 36) \
660 { \
661 _dmv_x = 0; \
662 _dmv_y = 0; \
663 s->mb_intra = 1; \
664 } \
665 else \
666 { \
667 index1 = index%6; \
668 if (!s->quarter_sample && index1 == 5) val = 1; \
669 else val = 0; \
670 if(size_table[index1] - val > 0) \
671 val = get_bits(gb, size_table[index1] - val); \
672 else val = 0; \
673 sign = 0 - (val&1); \
674 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
675 \
676 index1 = index/6; \
677 if (!s->quarter_sample && index1 == 5) val = 1; \
678 else val = 0; \
679 if(size_table[index1] - val > 0) \
680 val = get_bits(gb, size_table[index1] - val); \
681 else val = 0; \
682 sign = 0 - (val&1); \
683 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
684 }
685
686 /** Predict and set motion vector
687 */
688 static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
689 {
690 int xy, wrap, off = 0;
691 int16_t *A, *B, *C;
692 int px, py;
693 int sum;
694
695 /* scale MV difference to be quad-pel */
696 dmv_x <<= 1 - s->quarter_sample;
697 dmv_y <<= 1 - s->quarter_sample;
698
699 wrap = s->b8_stride;
700 xy = s->block_index[n];
701
702 if(s->mb_intra){
703 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
704 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
705 s->current_picture.motion_val[1][xy][0] = 0;
706 s->current_picture.motion_val[1][xy][1] = 0;
707 if(mv1) { /* duplicate motion data for 1-MV block */
708 s->current_picture.motion_val[0][xy + 1][0] = 0;
709 s->current_picture.motion_val[0][xy + 1][1] = 0;
710 s->current_picture.motion_val[0][xy + wrap][0] = 0;
711 s->current_picture.motion_val[0][xy + wrap][1] = 0;
712 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
713 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
714 s->current_picture.motion_val[1][xy + 1][0] = 0;
715 s->current_picture.motion_val[1][xy + 1][1] = 0;
716 s->current_picture.motion_val[1][xy + wrap][0] = 0;
717 s->current_picture.motion_val[1][xy + wrap][1] = 0;
718 s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
719 s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
720 }
721 return;
722 }
723
724 C = s->current_picture.motion_val[0][xy - 1];
725 A = s->current_picture.motion_val[0][xy - wrap];
726 if(mv1)
727 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
728 else {
729 //in 4-MV mode different blocks have different B predictor position
730 switch(n){
731 case 0:
732 off = (s->mb_x > 0) ? -1 : 1;
733 break;
734 case 1:
735 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
736 break;
737 case 2:
738 off = 1;
739 break;
740 case 3:
741 off = -1;
742 }
743 }
744 B = s->current_picture.motion_val[0][xy - wrap + off];
745
746 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
747 if(s->mb_width == 1) {
748 px = A[0];
749 py = A[1];
750 } else {
751 px = mid_pred(A[0], B[0], C[0]);
752 py = mid_pred(A[1], B[1], C[1]);
753 }
754 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
755 px = C[0];
756 py = C[1];
757 } else {
758 px = py = 0;
759 }
760 /* Pullback MV as specified in 8.3.5.3.4 */
761 {
762 int qx, qy, X, Y;
763 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
764 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
765 X = (s->mb_width << 6) - 4;
766 Y = (s->mb_height << 6) - 4;
767 if(mv1) {
768 if(qx + px < -60) px = -60 - qx;
769 if(qy + py < -60) py = -60 - qy;
770 } else {
771 if(qx + px < -28) px = -28 - qx;
772 if(qy + py < -28) py = -28 - qy;
773 }
774 if(qx + px > X) px = X - qx;
775 if(qy + py > Y) py = Y - qy;
776 }
777 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
778 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
779 if(is_intra[xy - wrap])
780 sum = FFABS(px) + FFABS(py);
781 else
782 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
783 if(sum > 32) {
784 if(get_bits1(&s->gb)) {
785 px = A[0];
786 py = A[1];
787 } else {
788 px = C[0];
789 py = C[1];
790 }
791 } else {
792 if(is_intra[xy - 1])
793 sum = FFABS(px) + FFABS(py);
794 else
795 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
796 if(sum > 32) {
797 if(get_bits1(&s->gb)) {
798 px = A[0];
799 py = A[1];
800 } else {
801 px = C[0];
802 py = C[1];
803 }
804 }
805 }
806 }
807 /* store MV using signed modulus of MV range defined in 4.11 */
808 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
809 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
810 if(mv1) { /* duplicate motion data for 1-MV block */
811 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
812 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
813 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
814 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
815 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
816 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
817 }
818 }
819
820 /** Motion compensation for direct or interpolated blocks in B-frames
821 */
822 static void vc1_interp_mc(VC1Context *v)
823 {
824 MpegEncContext *s = &v->s;
825 DSPContext *dsp = &v->s.dsp;
826 uint8_t *srcY, *srcU, *srcV;
827 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
828
829 if(!v->s.next_picture.data[0])return;
830
831 mx = s->mv[1][0][0];
832 my = s->mv[1][0][1];
833 uvmx = (mx + ((mx & 3) == 3)) >> 1;
834 uvmy = (my + ((my & 3) == 3)) >> 1;
835 if(v->fastuvmc) {
836 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
837 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
838 }
839 srcY = s->next_picture.data[0];
840 srcU = s->next_picture.data[1];
841 srcV = s->next_picture.data[2];
842
843 src_x = s->mb_x * 16 + (mx >> 2);
844 src_y = s->mb_y * 16 + (my >> 2);
845 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
846 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
847
848 if(v->profile != PROFILE_ADVANCED){
849 src_x = av_clip( src_x, -16, s->mb_width * 16);
850 src_y = av_clip( src_y, -16, s->mb_height * 16);
851 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
852 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
853 }else{
854 src_x = av_clip( src_x, -17, s->avctx->coded_width);
855 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
856 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
857 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
858 }
859
860 srcY += src_y * s->linesize + src_x;
861 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
862 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
863
864 /* for grayscale we should not try to read from unknown area */
865 if(s->flags & CODEC_FLAG_GRAY) {
866 srcU = s->edge_emu_buffer + 18 * s->linesize;
867 srcV = s->edge_emu_buffer + 18 * s->linesize;
868 }
869
870 if(v->rangeredfrm
871 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
872 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
873 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
874
875 srcY -= s->mspel * (1 + s->linesize);
876 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
877 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
878 srcY = s->edge_emu_buffer;
879 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
880 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
881 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
882 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
883 srcU = uvbuf;
884 srcV = uvbuf + 16;
885 /* if we deal with range reduction we need to scale source blocks */
886 if(v->rangeredfrm) {
887 int i, j;
888 uint8_t *src, *src2;
889
890 src = srcY;
891 for(j = 0; j < 17 + s->mspel*2; j++) {
892 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
893 src += s->linesize;
894 }
895 src = srcU; src2 = srcV;
896 for(j = 0; j < 9; j++) {
897 for(i = 0; i < 9; i++) {
898 src[i] = ((src[i] - 128) >> 1) + 128;
899 src2[i] = ((src2[i] - 128) >> 1) + 128;
900 }
901 src += s->uvlinesize;
902 src2 += s->uvlinesize;
903 }
904 }
905 srcY += s->mspel * (1 + s->linesize);
906 }
907
908 if(s->mspel) {
909 dxy = ((my & 3) << 2) | (mx & 3);
910 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
911 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
912 srcY += s->linesize * 8;
913 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
914 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
915 } else { // hpel mc
916 dxy = (my & 2) | ((mx & 2) >> 1);
917
918 if(!v->rnd)
919 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
920 else
921 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
922 }
923
924 if(s->flags & CODEC_FLAG_GRAY) return;
925 /* Chroma MC always uses qpel blilinear */
926 uvmx = (uvmx&3)<<1;
927 uvmy = (uvmy&3)<<1;
928 if(!v->rnd){
929 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
930 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
931 }else{
932 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
933 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
934 }
935 }
936
937 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
938 {
939 int n = bfrac;
940
941 #if B_FRACTION_DEN==256
942 if(inv)
943 n -= 256;
944 if(!qs)
945 return 2 * ((value * n + 255) >> 9);
946 return (value * n + 128) >> 8;
947 #else
948 if(inv)
949 n -= B_FRACTION_DEN;
950 if(!qs)
951 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
952 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
953 #endif
954 }
955
956 /** Reconstruct motion vector for B-frame and do motion compensation
957 */
958 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
959 {
960 if(v->use_ic) {
961 v->mv_mode2 = v->mv_mode;
962 v->mv_mode = MV_PMODE_INTENSITY_COMP;
963 }
964 if(direct) {
965 vc1_mc_1mv(v, 0);
966 vc1_interp_mc(v);
967 if(v->use_ic) v->mv_mode = v->mv_mode2;
968 return;
969 }
970 if(mode == BMV_TYPE_INTERPOLATED) {
971 vc1_mc_1mv(v, 0);
972 vc1_interp_mc(v);
973 if(v->use_ic) v->mv_mode = v->mv_mode2;
974 return;
975 }
976
977 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
978 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
979 if(v->use_ic) v->mv_mode = v->mv_mode2;
980 }
981
982 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
983 {
984 MpegEncContext *s = &v->s;
985 int xy, wrap, off = 0;
986 int16_t *A, *B, *C;
987 int px, py;
988 int sum;
989 int r_x, r_y;
990 const uint8_t *is_intra = v->mb_type[0];
991
992 r_x = v->range_x;
993 r_y = v->range_y;
994 /* scale MV difference to be quad-pel */
995 dmv_x[0] <<= 1 - s->quarter_sample;
996 dmv_y[0] <<= 1 - s->quarter_sample;
997 dmv_x[1] <<= 1 - s->quarter_sample;
998 dmv_y[1] <<= 1 - s->quarter_sample;
999
1000 wrap = s->b8_stride;
1001 xy = s->block_index[0];
1002
1003 if(s->mb_intra) {
1004 s->current_picture.motion_val[0][xy][0] =
1005 s->current_picture.motion_val[0][xy][1] =
1006 s->current_picture.motion_val[1][xy][0] =
1007 s->current_picture.motion_val[1][xy][1] = 0;
1008 return;
1009 }
1010 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
1011 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
1012 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
1013 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
1014
1015 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
1016 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
1017 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
1018 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
1019 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
1020 if(direct) {
1021 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
1022 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
1023 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
1024 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
1025 return;
1026 }
1027
1028 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
1029 C = s->current_picture.motion_val[0][xy - 2];
1030 A = s->current_picture.motion_val[0][xy - wrap*2];
1031 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1032 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
1033
1034 if(!s->mb_x) C[0] = C[1] = 0;
1035 if(!s->first_slice_line) { // predictor A is not out of bounds
1036 if(s->mb_width == 1) {
1037 px = A[0];
1038 py = A[1];
1039 } else {
1040 px = mid_pred(A[0], B[0], C[0]);
1041 py = mid_pred(A[1], B[1], C[1]);
1042 }
1043 } else if(s->mb_x) { // predictor C is not out of bounds
1044 px = C[0];
1045 py = C[1];
1046 } else {
1047 px = py = 0;
1048 }
1049 /* Pullback MV as specified in 8.3.5.3.4 */
1050 {
1051 int qx, qy, X, Y;
1052 if(v->profile < PROFILE_ADVANCED) {
1053 qx = (s->mb_x << 5);
1054 qy = (s->mb_y << 5);
1055 X = (s->mb_width << 5) - 4;
1056 Y = (s->mb_height << 5) - 4;
1057 if(qx + px < -28) px = -28 - qx;
1058 if(qy + py < -28) py = -28 - qy;
1059 if(qx + px > X) px = X - qx;
1060 if(qy + py > Y) py = Y - qy;
1061 } else {
1062 qx = (s->mb_x << 6);
1063 qy = (s->mb_y << 6);
1064 X = (s->mb_width << 6) - 4;
1065 Y = (s->mb_height << 6) - 4;
1066 if(qx + px < -60) px = -60 - qx;
1067 if(qy + py < -60) py = -60 - qy;
1068 if(qx + px > X) px = X - qx;
1069 if(qy + py > Y) py = Y - qy;
1070 }
1071 }
1072 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1073 if(0 && !s->first_slice_line && s->mb_x) {
1074 if(is_intra[xy - wrap])
1075 sum = FFABS(px) + FFABS(py);
1076 else
1077 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
1078 if(sum > 32) {
1079 if(get_bits1(&s->gb)) {
1080 px = A[0];
1081 py = A[1];
1082 } else {
1083 px = C[0];
1084 py = C[1];
1085 }
1086 } else {
1087 if(is_intra[xy - 2])
1088 sum = FFABS(px) + FFABS(py);
1089 else
1090 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
1091 if(sum > 32) {
1092 if(get_bits1(&s->gb)) {
1093 px = A[0];
1094 py = A[1];
1095 } else {
1096 px = C[0];
1097 py = C[1];
1098 }
1099 }
1100 }
1101 }
1102 /* store MV using signed modulus of MV range defined in 4.11 */
1103 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
1104 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
1105 }
1106 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
1107 C = s->current_picture.motion_val[1][xy - 2];
1108 A = s->current_picture.motion_val[1][xy - wrap*2];
1109 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1110 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
1111
1112 if(!s->mb_x) C[0] = C[1] = 0;
1113 if(!s->first_slice_line) { // predictor A is not out of bounds
1114 if(s->mb_width == 1) {
1115 px = A[0];
1116 py = A[1];
1117 } else {
1118 px = mid_pred(A[0], B[0], C[0]);
1119 py = mid_pred(A[1], B[1], C[1]);
1120 }
1121 } else if(s->mb_x) { // predictor C is not out of bounds
1122 px = C[0];
1123 py = C[1];
1124 } else {
1125 px = py = 0;
1126 }
1127 /* Pullback MV as specified in 8.3.5.3.4 */
1128 {
1129 int qx, qy, X, Y;
1130 if(v->profile < PROFILE_ADVANCED) {
1131 qx = (s->mb_x << 5);
1132 qy = (s->mb_y << 5);
1133 X = (s->mb_width << 5) - 4;
1134 Y = (s->mb_height << 5) - 4;
1135 if(qx + px < -28) px = -28 - qx;
1136 if(qy + py < -28) py = -28 - qy;
1137 if(qx + px > X) px = X - qx;
1138 if(qy + py > Y) py = Y - qy;
1139 } else {
1140 qx = (s->mb_x << 6);
1141 qy = (s->mb_y << 6);
1142 X = (s->mb_width << 6) - 4;
1143 Y = (s->mb_height << 6) - 4;
1144 if(qx + px < -60) px = -60 - qx;
1145 if(qy + py < -60) py = -60 - qy;
1146 if(qx + px > X) px = X - qx;
1147 if(qy + py > Y) py = Y - qy;
1148 }
1149 }
1150 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1151 if(0 && !s->first_slice_line && s->mb_x) {
1152 if(is_intra[xy - wrap])
1153 sum = FFABS(px) + FFABS(py);
1154 else
1155 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
1156 if(sum > 32) {
1157 if(get_bits1(&s->gb)) {
1158 px = A[0];
1159 py = A[1];
1160 } else {
1161 px = C[0];
1162 py = C[1];
1163 }
1164 } else {
1165 if(is_intra[xy - 2])
1166 sum = FFABS(px) + FFABS(py);
1167 else
1168 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
1169 if(sum > 32) {
1170 if(get_bits1(&s->gb)) {
1171 px = A[0];
1172 py = A[1];
1173 } else {
1174 px = C[0];
1175 py = C[1];
1176 }
1177 }
1178 }
1179 }
1180 /* store MV using signed modulus of MV range defined in 4.11 */
1181
1182 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
1183 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
1184 }
1185 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
1186 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
1187 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
1188 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
1189 }
1190
1191 /** Get predicted DC value for I-frames only
1192 * prediction dir: left=0, top=1
1193 * @param s MpegEncContext
1194 * @param overlap flag indicating that overlap filtering is used
1195 * @param pq integer part of picture quantizer
1196 * @param[in] n block index in the current MB
1197 * @param dc_val_ptr Pointer to DC predictor
1198 * @param dir_ptr Prediction direction for use in AC prediction
1199 */
1200 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
1201 int16_t **dc_val_ptr, int *dir_ptr)
1202 {
1203 int a, b, c, wrap, pred, scale;
1204 int16_t *dc_val;
1205 static const uint16_t dcpred[32] = {
1206 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
1207 114, 102, 93, 85, 79, 73, 68, 64,
1208 60, 57, 54, 51, 49, 47, 45, 43,
1209 41, 39, 38, 37, 35, 34, 33
1210 };
1211
1212 /* find prediction - wmv3_dc_scale always used here in fact */
1213 if (n < 4) scale = s->y_dc_scale;
1214 else scale = s->c_dc_scale;
1215
1216 wrap = s->block_wrap[n];
1217 dc_val= s->dc_val[0] + s->block_index[n];
1218
1219 /* B A
1220 * C X
1221 */
1222 c = dc_val[ - 1];
1223 b = dc_val[ - 1 - wrap];
1224 a = dc_val[ - wrap];
1225
1226 if (pq < 9 || !overlap)
1227 {
1228 /* Set outer values */
1229 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
1230 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
1231 }
1232 else
1233 {
1234 /* Set outer values */
1235 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
1236 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
1237 }
1238
1239 if (abs(a - b) <= abs(b - c)) {
1240 pred = c;
1241 *dir_ptr = 1;//left
1242 } else {
1243 pred = a;
1244 *dir_ptr = 0;//top
1245 }
1246
1247 /* update predictor */
1248 *dc_val_ptr = &dc_val[0];
1249 return pred;
1250 }
1251
1252
1253 /** Get predicted DC value
1254 * prediction dir: left=0, top=1
1255 * @param s MpegEncContext
1256 * @param overlap flag indicating that overlap filtering is used
1257 * @param pq integer part of picture quantizer
1258 * @param[in] n block index in the current MB
1259 * @param a_avail flag indicating top block availability
1260 * @param c_avail flag indicating left block availability
1261 * @param dc_val_ptr Pointer to DC predictor
1262 * @param dir_ptr Prediction direction for use in AC prediction
1263 */
1264 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
1265 int a_avail, int c_avail,
1266 int16_t **dc_val_ptr, int *dir_ptr)
1267 {
1268 int a, b, c, wrap, pred;
1269 int16_t *dc_val;
1270 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1271 int q1, q2 = 0;
1272
1273 wrap = s->block_wrap[n];
1274 dc_val= s->dc_val[0] + s->block_index[n];
1275
1276 /* B A
1277 * C X
1278 */
1279 c = dc_val[ - 1];
1280 b = dc_val[ - 1 - wrap];
1281 a = dc_val[ - wrap];
1282 /* scale predictors if needed */
1283 q1 = s->current_picture.qscale_table[mb_pos];
1284 if(c_avail && (n!= 1 && n!=3)) {
1285 q2 = s->current_picture.qscale_table[mb_pos - 1];
1286 if(q2 && q2 != q1)
1287 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
1288 }
1289 if(a_avail && (n!= 2 && n!=3)) {
1290 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
1291 if(q2 && q2 != q1)
1292 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
1293 }
1294 if(a_avail && c_avail && (n!=3)) {
1295 int off = mb_pos;
1296 if(n != 1) off--;
1297 if(n != 2) off -= s->mb_stride;
1298 q2 = s->current_picture.qscale_table[off];
1299 if(q2 && q2 != q1)
1300 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
1301 }
1302
1303 if(a_avail && c_avail) {
1304 if(abs(a - b) <= abs(b - c)) {
1305 pred = c;
1306 *dir_ptr = 1;//left
1307 } else {
1308 pred = a;
1309 *dir_ptr = 0;//top
1310 }
1311 } else if(a_avail) {
1312 pred = a;
1313 *dir_ptr = 0;//top
1314 } else if(c_avail) {
1315 pred = c;
1316 *dir_ptr = 1;//left
1317 } else {
1318 pred = 0;
1319 *dir_ptr = 1;//left
1320 }
1321
1322 /* update predictor */
1323 *dc_val_ptr = &dc_val[0];
1324 return pred;
1325 }
1326
1327 /** @} */ // Block group
1328
1329 /**
1330 * @defgroup vc1_std_mb VC1 Macroblock-level functions in Simple/Main Profiles
1331 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1332 * @{
1333 */
1334
1335 static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
1336 {
1337 int xy, wrap, pred, a, b, c;
1338
1339 xy = s->block_index[n];
1340 wrap = s->b8_stride;
1341
1342 /* B C
1343 * A X
1344 */
1345 a = s->coded_block[xy - 1 ];
1346 b = s->coded_block[xy - 1 - wrap];
1347 c = s->coded_block[xy - wrap];
1348
1349 if (b == c) {
1350 pred = a;
1351 } else {
1352 pred = c;
1353 }
1354
1355 /* store value */
1356 *coded_block_ptr = &s->coded_block[xy];
1357
1358 return pred;
1359 }
1360
1361 /**
1362 * Decode one AC coefficient
1363 * @param v The VC1 context
1364 * @param last Last coefficient
1365 * @param skip How much zero coefficients to skip
1366 * @param value Decoded AC coefficient value
1367 * @param codingset set of VLC to decode data
1368 * @see 8.1.3.4
1369 */
1370 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
1371 {
1372 GetBitContext *gb = &v->s.gb;
1373 int index, escape, run = 0, level = 0, lst = 0;
1374
1375 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
1376 if (index != vc1_ac_sizes[codingset] - 1) {
1377 run = vc1_index_decode_table[codingset][index][0];
1378 level = vc1_index_decode_table[codingset][index][1];
1379 lst = index >= vc1_last_decode_table[codingset];
1380 if(get_bits1(gb))
1381 level = -level;
1382 } else {
1383 escape = decode210(gb);
1384 if (escape != 2) {
1385 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
1386 run = vc1_index_decode_table[codingset][index][0];
1387 level = vc1_index_decode_table[codingset][index][1];
1388 lst = index >= vc1_last_decode_table[codingset];
1389 if(escape == 0) {
1390 if(lst)
1391 level += vc1_last_delta_level_table[codingset][run];
1392 else
1393 level += vc1_delta_level_table[codingset][run];
1394 } else {
1395 if(lst)
1396 run += vc1_last_delta_run_table[codingset][level] + 1;
1397 else
1398 run += vc1_delta_run_table[codingset][level] + 1;
1399 }
1400 if(get_bits1(gb))
1401 level = -level;
1402 } else {
1403 int sign;
1404 lst = get_bits1(gb);
1405 if(v->s.esc3_level_length == 0) {
1406 if(v->pq < 8 || v->dquantfrm) { // table 59
1407 v->s.esc3_level_length = get_bits(gb, 3);
1408 if(!v->s.esc3_level_length)
1409 v->s.esc3_level_length = get_bits(gb, 2) + 8;
1410 } else { //table 60
1411 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
1412 }
1413 v->s.esc3_run_length = 3 + get_bits(gb, 2);
1414 }
1415 run = get_bits(gb, v->s.esc3_run_length);
1416 sign = get_bits1(gb);
1417 level = get_bits(gb, v->s.esc3_level_length);
1418 if(sign)
1419 level = -level;
1420 }
1421 }
1422
1423 *last = lst;
1424 *skip = run;
1425 *value = level;
1426 }
1427
1428 /** Decode intra block in intra frames - should be faster than decode_intra_block
1429 * @param v VC1Context
1430 * @param block block to decode
1431 * @param[in] n subblock index
1432 * @param coded are AC coeffs present or not
1433 * @param codingset set of VLC to decode data
1434 */
1435 static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
1436 {
1437 GetBitContext *gb = &v->s.gb;
1438 MpegEncContext *s = &v->s;
1439 int dc_pred_dir = 0; /* Direction of the DC prediction used */
1440 int i;
1441 int16_t *dc_val;
1442 int16_t *ac_val, *ac_val2;
1443 int dcdiff;
1444
1445 /* Get DC differential */
1446 if (n < 4) {
1447 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1448 } else {
1449 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1450 }
1451 if (dcdiff < 0){
1452 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
1453 return -1;
1454 }
1455 if (dcdiff)
1456 {
1457 if (dcdiff == 119 /* ESC index value */)
1458 {
1459 /* TODO: Optimize */
1460 if (v->pq == 1) dcdiff = get_bits(gb, 10);
1461 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
1462 else dcdiff = get_bits(gb, 8);
1463 }
1464 else
1465 {
1466 if (v->pq == 1)
1467 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
1468 else if (v->pq == 2)
1469 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
1470 }
1471 if (get_bits1(gb))
1472 dcdiff = -dcdiff;
1473 }
1474
1475 /* Prediction */
1476 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
1477 *dc_val = dcdiff;
1478
1479 /* Store the quantized DC coeff, used for prediction */
1480 if (n < 4) {
1481 block[0] = dcdiff * s->y_dc_scale;
1482 } else {
1483 block[0] = dcdiff * s->c_dc_scale;
1484 }
1485 /* Skip ? */
1486 if (!coded) {
1487 goto not_coded;
1488 }
1489
1490 //AC Decoding
1491 i = 1;
1492
1493 {
1494 int last = 0, skip, value;
1495 const uint8_t *zz_table;
1496 int scale;
1497 int k;
1498
1499 scale = v->pq * 2 + v->halfpq;
1500
1501 if(v->s.ac_pred) {
1502 if(!dc_pred_dir)
1503 zz_table = v->zz_8x8[2];
1504 else
1505 zz_table = v->zz_8x8[3];
1506 } else
1507 zz_table = v->zz_8x8[1];
1508
1509 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
1510 ac_val2 = ac_val;
1511 if(dc_pred_dir) //left
1512 ac_val -= 16;
1513 else //top
1514 ac_val -= 16 * s->block_wrap[n];
1515
1516 while (!last) {
1517 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
1518 i += skip;
1519 if(i > 63)
1520 break;
1521 block[zz_table[i++]] = value;
1522 }
1523
1524 /* apply AC prediction if needed */
1525 if(s->ac_pred) {
1526 if(dc_pred_dir) { //left
1527 for(k = 1; k < 8; k++)
1528 block[k] += ac_val[k];
1529 } else { //top
1530 for(k = 1; k < 8; k++)
1531 block[k << 3] += ac_val[k + 8];
1532 }
1533 }
1534 /* save AC coeffs for further prediction */
1535 for(k = 1; k < 8; k++) {
1536 ac_val2[k] = block[k];
1537 ac_val2[k + 8] = block[k << 3];
1538 }
1539
1540 /* scale AC coeffs */
1541 for(k = 1; k < 64; k++)
1542 if(block[k]) {
1543 block[k] *= scale;
1544 if(!v->pquantizer)
1545 block[k] += (block[k] < 0) ? -v->pq : v->pq;
1546 }
1547
1548 if(s->ac_pred) i = 63;
1549 }
1550
1551 not_coded:
1552 if(!coded) {
1553 int k, scale;
1554 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
1555 ac_val2 = ac_val;
1556
1557 i = 0;
1558 scale = v->pq * 2 + v->halfpq;
1559 memset(ac_val2, 0, 16 * 2);
1560 if(dc_pred_dir) {//left
1561 ac_val -= 16;
1562 if(s->ac_pred)
1563 memcpy(ac_val2, ac_val, 8 * 2);
1564 } else {//top
1565 ac_val -= 16 * s->block_wrap[n];
1566 if(s->ac_pred)
1567 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
1568 }
1569
1570 /* apply AC prediction if needed */
1571 if(s->ac_pred) {
1572 if(dc_pred_dir) { //left
1573 for(k = 1; k < 8; k++) {
1574 block[k] = ac_val[k] * scale;
1575 if(!v->pquantizer && block[k])
1576 block[k] += (block[k] < 0) ? -v->pq : v->pq;
1577 }
1578 } else { //top
1579 for(k = 1; k < 8; k++) {
1580 block[k << 3] = ac_val[k + 8] * scale;
1581 if(!v->pquantizer && block[k << 3])
1582 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
1583 }
1584 }
1585 i = 63;
1586 }
1587 }
1588 s->block_last_index[n] = i;
1589
1590 return 0;
1591 }
1592
1593 /** Decode intra block in intra frames - should be faster than decode_intra_block
1594 * @param v VC1Context
1595 * @param block block to decode
1596 * @param[in] n subblock number
1597 * @param coded are AC coeffs present or not
1598 * @param codingset set of VLC to decode data
1599 * @param mquant quantizer value for this macroblock
1600 */
1601 static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
1602 {
1603 GetBitContext *gb = &v->s.gb;
1604 MpegEncContext *s = &v->s;
1605 int dc_pred_dir = 0; /* Direction of the DC prediction used */
1606 int i;
1607 int16_t *dc_val;
1608 int16_t *ac_val, *ac_val2;
1609 int dcdiff;
1610 int a_avail = v->a_avail, c_avail = v->c_avail;
1611 int use_pred = s->ac_pred;
1612 int scale;
1613 int q1, q2 = 0;
1614 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1615
1616 /* Get DC differential */
1617 if (n < 4) {
1618 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1619 } else {
1620 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1621 }
1622 if (dcdiff < 0){
1623 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
1624 return -1;
1625 }
1626 if (dcdiff)
1627 {
1628 if (dcdiff == 119 /* ESC index value */)
1629 {
1630 /* TODO: Optimize */
1631 if (mquant == 1) dcdiff = get_bits(gb, 10);
1632 else if (mquant == 2) dcdiff = get_bits(gb, 9);
1633 else dcdiff = get_bits(gb, 8);
1634 }
1635 else
1636 {
1637 if (mquant == 1)
1638 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
1639 else if (mquant == 2)
1640 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
1641 }
1642 if (get_bits1(gb))
1643 dcdiff = -dcdiff;
1644 }
1645
1646 /* Prediction */
1647 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
1648 *dc_val = dcdiff;
1649
1650 /* Store the quantized DC coeff, used for prediction */
1651 if (n < 4) {
1652 block[0] = dcdiff * s->y_dc_scale;
1653 } else {
1654 block[0] = dcdiff * s->c_dc_scale;
1655 }
1656
1657 //AC Decoding
1658 i = 1;
1659
1660 /* check if AC is needed at all */
1661 if(!a_avail && !c_avail) use_pred = 0;
1662 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
1663 ac_val2 = ac_val;
1664
1665 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
1666
1667 if(dc_pred_dir) //left
1668 ac_val -= 16;
1669 else //top
1670 ac_val -= 16 * s->block_wrap[n];
1671
1672 q1 = s->current_picture.qscale_table[mb_pos];
1673 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
1674 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
1675 if(dc_pred_dir && n==1) q2 = q1;
1676 if(!dc_pred_dir && n==2) q2 = q1;
1677 if(n==3) q2 = q1;
1678
1679 if(coded) {
1680 int last = 0, skip, value;
1681 const uint8_t *zz_table;
1682 int k;
1683
1684 if(v->s.ac_pred) {
1685 if(!dc_pred_dir)
1686 zz_table = v->zz_8x8[2];
1687 else
1688 zz_table = v->zz_8x8[3];
1689 } else
1690 zz_table = v->zz_8x8[1];
1691
1692 while (!last) {
1693 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
1694 i += skip;
1695 if(i > 63)
1696 break;
1697 block[zz_table[i++]] = value;
1698 }
1699
1700 /* apply AC prediction if needed */
1701 if(use_pred) {
1702 /* scale predictors if needed*/
1703 if(q2 && q1!=q2) {
1704 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
1705 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
1706
1707 if(dc_pred_dir) { //left
1708 for(k = 1; k < 8; k++)
1709 block[k] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1710 } else { //top
1711 for(k = 1; k < 8; k++)
1712 block[k << 3] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1713 }
1714 } else {
1715 if(dc_pred_dir) { //left
1716 for(k = 1; k < 8; k++)
1717 block[k] += ac_val[k];
1718 } else { //top
1719 for(k = 1; k < 8; k++)
1720 block[k << 3] += ac_val[k + 8];
1721 }
1722 }
1723 }
1724 /* save AC coeffs for further prediction */
1725 for(k = 1; k < 8; k++) {
1726 ac_val2[k] = block[k];
1727 ac_val2[k + 8] = block[k << 3];
1728 }
1729
1730 /* scale AC coeffs */
1731 for(k = 1; k < 64; k++)
1732 if(block[k]) {
1733 block[k] *= scale;
1734 if(!v->pquantizer)
1735 block[k] += (block[k] < 0) ? -mquant : mquant;
1736 }
1737
1738 if(use_pred) i = 63;
1739 } else { // no AC coeffs
1740 int k;
1741
1742 memset(ac_val2, 0, 16 * 2);
1743 if(dc_pred_dir) {//left
1744 if(use_pred) {
1745 memcpy(ac_val2, ac_val, 8 * 2);
1746 if(q2 && q1!=q2) {
1747 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
1748 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
1749 for(k = 1; k < 8; k++)
1750 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1751 }
1752 }
1753 } else {//top
1754 if(use_pred) {
1755 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
1756 if(q2 && q1!=q2) {
1757 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
1758 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
1759 for(k = 1; k < 8; k++)
1760 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1761 }
1762 }
1763 }
1764
1765 /* apply AC prediction if needed */
1766 if(use_pred) {
1767 if(dc_pred_dir) { //left
1768 for(k = 1; k < 8; k++) {
1769 block[k] = ac_val2[k] * scale;
1770 if(!v->pquantizer && block[k])
1771 block[k] += (block[k] < 0) ? -mquant : mquant;
1772 }
1773 } else { //top
1774 for(k = 1; k < 8; k++) {
1775 block[k << 3] = ac_val2[k + 8] * scale;
1776 if(!v->pquantizer && block[k << 3])
1777 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
1778 }
1779 }
1780 i = 63;
1781 }
1782 }
1783 s->block_last_index[n] = i;
1784
1785 return 0;
1786 }
1787
1788 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
1789 * @param v VC1Context
1790 * @param block block to decode
1791 * @param[in] n subblock index
1792 * @param coded are AC coeffs present or not
1793 * @param mquant block quantizer
1794 * @param codingset set of VLC to decode data
1795 */
1796 static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
1797 {
1798 GetBitContext *gb = &v->s.gb;
1799 MpegEncContext *s = &v->s;
1800 int dc_pred_dir = 0; /* Direction of the DC prediction used */
1801 int i;
1802 int16_t *dc_val;
1803 int16_t *ac_val, *ac_val2;
1804 int dcdiff;
1805 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1806 int a_avail = v->a_avail, c_avail = v->c_avail;
1807 int use_pred = s->ac_pred;
1808 int scale;
1809 int q1, q2 = 0;
1810
1811 s->dsp.clear_block(block);
1812
1813 /* XXX: Guard against dumb values of mquant */
1814 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
1815
1816 /* Set DC scale - y and c use the same */
1817 s->y_dc_scale = s->y_dc_scale_table[mquant];
1818 s->c_dc_scale = s->c_dc_scale_table[mquant];
1819
1820 /* Get DC differential */
1821 if (n < 4) {
1822 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1823 } else {
1824 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1825 }
1826 if (dcdiff < 0){
1827 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
1828 return -1;
1829 }
1830 if (dcdiff)
1831 {
1832 if (dcdiff == 119 /* ESC index value */)
1833 {
1834 /* TODO: Optimize */
1835 if (mquant == 1) dcdiff = get_bits(gb, 10);
1836 else if (mquant == 2) dcdiff = get_bits(gb, 9);
1837 else dcdiff = get_bits(gb, 8);
1838 }
1839 else
1840 {
1841 if (mquant == 1)
1842 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
1843 else if (mquant == 2)
1844 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
1845 }
1846 if (get_bits1(gb))
1847 dcdiff = -dcdiff;
1848 }
1849
1850 /* Prediction */
1851 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
1852 *dc_val = dcdiff;
1853
1854 /* Store the quantized DC coeff, used for prediction */
1855
1856 if (n < 4) {
1857 block[0] = dcdiff * s->y_dc_scale;
1858 } else {
1859 block[0] = dcdiff * s->c_dc_scale;
1860 }
1861
1862 //AC Decoding
1863 i = 1;
1864
1865 /* check if AC is needed at all and adjust direction if needed */
1866 if(!a_avail) dc_pred_dir = 1;
1867 if(!c_avail) dc_pred_dir = 0;
1868 if(!a_avail && !c_avail) use_pred = 0;
1869 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
1870 ac_val2 = ac_val;
1871
1872 scale = mquant * 2 + v->halfpq;
1873
1874 if(dc_pred_dir) //left
1875 ac_val -= 16;
1876 else //top
1877 ac_val -= 16 * s->block_wrap[n];
1878
1879 q1 = s->current_picture.qscale_table[mb_pos];
1880 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.qscale_table[mb_pos - 1];
1881 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
1882 if(dc_pred_dir && n==1) q2 = q1;
1883 if(!dc_pred_dir && n==2) q2 = q1;
1884 if(n==3) q2 = q1;
1885
1886 if(coded) {
1887 int last = 0, skip, value;
1888 int k;
1889
1890 while (!last) {
1891 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
1892 i += skip;
1893 if(i > 63)
1894 break;
1895 block[v->zz_8x8[0][i++]] = value;
1896 }
1897
1898 /* apply AC prediction if needed */
1899 if(use_pred) {
1900 /* scale predictors if needed*/
1901 if(q2 && q1!=q2) {
1902 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
1903 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
1904
1905 if(dc_pred_dir) { //left
1906 for(k = 1; k < 8; k++)
1907 block[k] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1908 } else { //top
1909 for(k = 1; k < 8; k++)
1910 block[k << 3] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1911 }
1912 } else {
1913 if(dc_pred_dir) { //left
1914 for(k = 1; k < 8; k++)
1915 block[k] += ac_val[k];
1916 } else { //top
1917 for(k = 1; k < 8; k++)
1918 block[k << 3] += ac_val[k + 8];
1919 }
1920 }
1921 }
1922 /* save AC coeffs for further prediction */
1923 for(k = 1; k < 8; k++) {
1924 ac_val2[k] = block[k];
1925 ac_val2[k + 8] = block[k << 3];
1926 }
1927
1928 /* scale AC coeffs */
1929 for(k = 1; k < 64; k++)
1930 if(block[k]) {
1931 block[k] *= scale;
1932 if(!v->pquantizer)
1933 block[k] += (block[k] < 0) ? -mquant : mquant;
1934 }
1935
1936 if(use_pred) i = 63;
1937 } else { // no AC coeffs
1938 int k;
1939
1940 memset(ac_val2, 0, 16 * 2);
1941 if(dc_pred_dir) {//left
1942 if(use_pred) {
1943 memcpy(ac_val2, ac_val, 8 * 2);
1944 if(q2 && q1!=q2) {
1945 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
1946 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
1947 for(k = 1; k < 8; k++)
1948 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1949 }
1950 }
1951 } else {//top
1952 if(use_pred) {
1953 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
1954 if(q2 && q1!=q2) {
1955 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
1956 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
1957 for(k = 1; k < 8; k++)
1958 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1959 }
1960 }
1961 }
1962
1963 /* apply AC prediction if needed */
1964 if(use_pred) {
1965 if(dc_pred_dir) { //left
1966 for(k = 1; k < 8; k++) {
1967 block[k] = ac_val2[k] * scale;
1968 if(!v->pquantizer && block[k])
1969 block[k] += (block[k] < 0) ? -mquant : mquant;
1970 }
1971 } else { //top
1972 for(k = 1; k < 8; k++) {
1973 block[k << 3] = ac_val2[k + 8] * scale;
1974 if(!v->pquantizer && block[k << 3])
1975 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
1976 }
1977 }
1978 i = 63;
1979 }
1980 }
1981 s->block_last_index[n] = i;
1982
1983 return 0;
1984 }
1985
1986 /** Decode P block
1987 */
1988 static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block,
1989 uint8_t *dst, int linesize, int skip_block, int apply_filter, int cbp_top, int cbp_left)
1990 {
1991 MpegEncContext *s = &v->s;
1992 GetBitContext *gb = &s->gb;
1993 int i, j;
1994 int subblkpat = 0;
1995 int scale, off, idx, last, skip, value;
1996 int ttblk = ttmb & 7;
1997 int pat = 0;
1998
1999 s->dsp.clear_block(block);
2000
2001 if(ttmb == -1) {
2002 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
2003 }
2004 if(ttblk == TT_4X4) {
2005 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
2006 }
2007 if((ttblk != TT_8X8 && ttblk != TT_4X4)
2008 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
2009 || (!v->res_rtm_flag && !first_block))) {
2010 subblkpat = decode012(gb);
2011 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
2012 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
2013 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
2014 }
2015 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
2016
2017 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
2018 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
2019 subblkpat = 2 - (ttblk == TT_8X4_TOP);
2020 ttblk = TT_8X4;
2021 }
2022 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
2023 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
2024 ttblk = TT_4X8;
2025 }
2026 switch(ttblk) {
2027 case TT_8X8:
2028 pat = 0xF;
2029 i = 0;
2030 last = 0;
2031 while (!last) {
2032 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
2033 i += skip;
2034 if(i > 63)
2035 break;
2036 idx = v->zz_8x8[0][i++];
2037 block[idx] = value * scale;
2038 if(!v->pquantizer)
2039 block[idx] += (block[idx] < 0) ? -mquant : mquant;
2040 }
2041 if(!skip_block){
2042 if(i==1)
2043 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
2044 else{
2045 v->vc1dsp.vc1_inv_trans_8x8(block);
2046 s->dsp.add_pixels_clamped(block, dst, linesize);
2047 }
2048 if(apply_filter && cbp_top & 0xC)
2049 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
2050 if(apply_filter && cbp_left & 0xA)
2051 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
2052 }
2053 break;
2054 case TT_4X4:
2055 pat = ~subblkpat & 0xF;
2056 for(j = 0; j < 4; j++) {
2057 last = subblkpat & (1 << (3 - j));
2058 i = 0;
2059 off = (j & 1) * 4 + (j & 2) * 16;
2060 while (!last) {
2061 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
2062 i += skip;
2063 if(i > 15)
2064 break;
2065 idx = ff_vc1_simple_progressive_4x4_zz[i++];
2066 block[idx + off] = value * scale;
2067 if(!v->pquantizer)
2068 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
2069 }
2070 if(!(subblkpat & (1 << (3 - j))) && !skip_block){
2071 if(i==1)
2072 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off);
2073 else
2074 v->vc1dsp.vc1_inv_trans_4x4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off);
2075 if(apply_filter && (j&2 ? pat & (1<<(j-2)) : (cbp_top & (1 << (j + 2)))))
2076 v->vc1dsp.vc1_v_loop_filter4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, v->pq);
2077 if(apply_filter && (j&1 ? pat & (1<<(j-1)) : (cbp_left & (1 << (j + 1)))))
2078 v->vc1dsp.vc1_h_loop_filter4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, v->pq);
2079 }
2080 }
2081 break;
2082 case TT_8X4:
2083 pat = ~((subblkpat & 2)*6 + (subblkpat & 1)*3) & 0xF;
2084 for(j = 0; j < 2; j++) {
2085 last = subblkpat & (1 << (1 - j));
2086 i = 0;
2087 off = j * 32;
2088 while (!last) {
2089 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
2090 i += skip;
2091 if(i > 31)
2092 break;
2093 idx = v->zz_8x4[i++]+off;
2094 block[idx] = value * scale;
2095 if(!v->pquantizer)
2096 block[idx] += (block[idx] < 0) ? -mquant : mquant;
2097 }
2098 if(!(subblkpat & (1 << (1 - j))) && !skip_block){
2099 if(i==1)
2100 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j*4*linesize, linesize, block + off);
2101 else
2102 v->vc1dsp.vc1_inv_trans_8x4(dst + j*4*linesize, linesize, block + off);
2103 if(apply_filter && j ? pat & 0x3 : (cbp_top & 0xC))
2104 v->vc1dsp.vc1_v_loop_filter8(dst + j*4*linesize, linesize, v->pq);
2105 if(apply_filter && cbp_left & (2 << j))
2106 v->vc1dsp.vc1_h_loop_filter4(dst + j*4*linesize, linesize, v->pq);
2107 }
2108 }
2109 break;
2110 case TT_4X8:
2111 pat = ~(subblkpat*5) & 0xF;
2112 for(j = 0; j < 2; j++) {
2113 last = subblkpat & (1 << (1 - j));
2114 i = 0;
2115 off = j * 4;
2116 while (!last) {
2117 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
2118 i += skip;
2119 if(i > 31)
2120 break;
2121 idx = v->zz_4x8[i++]+off;
2122 block[idx] = value * scale;
2123 if(!v->pquantizer)
2124 block[idx] += (block[idx] < 0) ? -mquant : mquant;
2125 }
2126 if(!(subblkpat & (1 << (1 - j))) && !skip_block){
2127 if(i==1)
2128 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j*4, linesize, block + off);
2129 else
2130 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
2131 if(apply_filter && cbp_top & (2 << j))
2132 v->vc1dsp.vc1_v_loop_filter4(dst + j*4, linesize, v->pq);
2133 if(apply_filter && j ? pat & 0x5 : (cbp_left & 0xA))
2134 v->vc1dsp.vc1_h_loop_filter8(dst + j*4, linesize, v->pq);
2135 }
2136 }
2137 break;
2138 }
2139 return pat;
2140 }
2141
2142 /** @} */ // Macroblock group
2143
2144 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
2145 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
2146
2147 /** Decode one P-frame MB (in Simple/Main profile)
2148 */
2149 static int vc1_decode_p_mb(VC1Context *v)
2150 {
2151 MpegEncContext *s = &v->s;
2152 GetBitContext *gb = &s->gb;
2153 int i, j;
2154 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2155 int cbp; /* cbp decoding stuff */
2156 int mqdiff, mquant; /* MB quantization */
2157 int ttmb = v->ttfrm; /* MB Transform type */
2158
2159 int mb_has_coeffs = 1; /* last_flag */
2160 int dmv_x, dmv_y; /* Differential MV components */
2161 int index, index1; /* LUT indexes */
2162 int val, sign; /* temp values */
2163 int first_block = 1;
2164 int dst_idx, off;
2165 int skipped, fourmv;
2166 int block_cbp = 0, pat;
2167 int apply_loop_filter;
2168
2169 mquant = v->pq; /* Loosy initialization */
2170
2171 if (v->mv_type_is_raw)
2172 fourmv = get_bits1(gb);
2173 else
2174 fourmv = v->mv_type_mb_plane[mb_pos];
2175 if (v->skip_is_raw)
2176 skipped = get_bits1(gb);
2177 else
2178 skipped = v->s.mbskip_table[mb_pos];
2179
2180 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
2181 if (!fourmv) /* 1MV mode */
2182 {
2183 if (!skipped)
2184 {
2185 GET_MVDATA(dmv_x, dmv_y);
2186
2187 if (s->mb_intra) {
2188 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
2189 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
2190 }
2191 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
2192 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
2193
2194 /* FIXME Set DC val for inter block ? */
2195 if (s->mb_intra && !mb_has_coeffs)
2196 {
2197 GET_MQUANT();
2198 s->ac_pred = get_bits1(gb);
2199 cbp = 0;
2200 }
2201 else if (mb_has_coeffs)
2202 {
2203 if (s->mb_intra) s->ac_pred = get_bits1(gb);
2204 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
2205 GET_MQUANT();
2206 }
2207 else
2208 {
2209 mquant = v->pq;
2210 cbp = 0;
2211 }
2212 s->current_picture.qscale_table[mb_pos] = mquant;
2213
2214 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
2215 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
2216 VC1_TTMB_VLC_BITS, 2);
2217 if(!s->mb_intra) vc1_mc_1mv(v, 0);
2218 dst_idx = 0;
2219 for (i=0; i<6; i++)
2220 {
2221 s->dc_val[0][s->block_index[i]] = 0;
2222 dst_idx += i >> 2;
2223 val = ((cbp >> (5 - i)) & 1);
2224 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
2225 v->mb_type[0][s->block_index[i]] = s->mb_intra;
2226 if(s->mb_intra) {
2227 /* check if prediction blocks A and C are available */
2228 v->a_avail = v->c_avail = 0;
2229 if(i == 2 || i == 3 || !s->first_slice_line)
2230 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
2231 if(i == 1 || i == 3 || s->mb_x)
2232 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
2233
2234 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
2235 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
2236 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
2237 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
2238 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
2239 if(v->pq >= 9 && v->overlap) {
2240 if(v->c_avail)
2241 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
2242 if(v->a_avail)
2243 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
2244 }
2245 if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
2246 int left_cbp, top_cbp;
2247 if(i & 4){
2248 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
2249 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
2250 }else{
2251 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
2252 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
2253 }
2254 if(left_cbp & 0xC)
2255 v->vc1dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
2256 if(top_cbp & 0xA)
2257 v->vc1dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
2258 }
2259 block_cbp |= 0xF << (i << 2);
2260 } else if(val) {
2261 int left_cbp = 0, top_cbp = 0, filter = 0;
2262 if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
2263 filter = 1;
2264 if(i & 4){
2265 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
2266 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
2267 }else{
2268 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
2269 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
2270 }
2271 if(left_cbp & 0xC)
2272 v->vc1dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
2273 if(top_cbp & 0xA)
2274 v->vc1dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
2275 }
2276 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
2277 block_cbp |= pat << (i << 2);
2278 if(!v->ttmbf && ttmb < 8) ttmb = -1;
2279 first_block = 0;
2280 }
2281 }
2282 }
2283 else //Skipped
2284 {
2285 s->mb_intra = 0;
2286 for(i = 0; i < 6; i++) {
2287 v->mb_type[0][s->block_index[i]] = 0;
2288 s->dc_val[0][s->block_index[i]] = 0;
2289 }
2290 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
2291 s->current_picture.qscale_table[mb_pos] = 0;
2292 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
2293 vc1_mc_1mv(v, 0);
2294 return 0;
2295 }
2296 } //1MV mode
2297 else //4MV mode
2298 {
2299 if (!skipped /* unskipped MB */)
2300 {
2301 int intra_count = 0, coded_inter = 0;
2302 int is_intra[6], is_coded[6];
2303 /* Get CBPCY */
2304 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
2305 for (i=0; i<6; i++)
2306 {
2307 val = ((cbp >> (5 - i)) & 1);
2308 s->dc_val[0][s->block_index[i]] = 0;
2309 s->mb_intra = 0;
2310 if(i < 4) {
2311 dmv_x = dmv_y = 0;
2312 s->mb_intra = 0;
2313 mb_has_coeffs = 0;
2314 if(val) {
2315 GET_MVDATA(dmv_x, dmv_y);
2316 }
2317 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
2318 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
2319 intra_count += s->mb_intra;
2320 is_intra[i] = s->mb_intra;
2321 is_coded[i] = mb_has_coeffs;
2322 }
2323 if(i&4){
2324 is_intra[i] = (intra_count >= 3);
2325 is_coded[i] = val;
2326 }
2327 if(i == 4) vc1_mc_4mv_chroma(v);
2328 v->mb_type[0][s->block_index[i]] = is_intra[i];
2329 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
2330 }
2331 // if there are no coded blocks then don't do anything more
2332 if(!intra_count && !coded_inter) return 0;
2333 dst_idx = 0;
2334 GET_MQUANT();
2335 s->current_picture.qscale_table[mb_pos] = mquant;
2336 /* test if block is intra and has pred */
2337 {
2338 int intrapred = 0;
2339 for(i=0; i<6; i++)
2340 if(is_intra[i]) {
2341 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
2342 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
2343 intrapred = 1;
2344 break;
2345 }
2346 }
2347 if(intrapred)s->ac_pred = get_bits1(gb);
2348 else s->ac_pred = 0;
2349 }
2350 if (!v->ttmbf && coded_inter)
2351 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
2352 for (i=0; i<6; i++)
2353 {
2354 dst_idx += i >> 2;
2355 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
2356 s->mb_intra = is_intra[i];
2357 if (is_intra[i]) {
2358 /* check if prediction blocks A and C are available */
2359 v->a_avail = v->c_avail = 0;
2360 if(i == 2 || i == 3 || !s->first_slice_line)
2361 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
2362 if(i == 1 || i == 3 || s->mb_x)
2363 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
2364
2365 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
2366 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
2367 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
2368 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
2369 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
2370 if(v->pq >= 9 && v->overlap) {
2371 if(v->c_avail)
2372 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
2373 if(v->a_avail)
2374 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
2375 }
2376 if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
2377 int left_cbp, top_cbp;
2378 if(i & 4){
2379 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
2380 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
2381 }else{
2382 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
2383 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
2384 }
2385 if(left_cbp & 0xC)
2386 v->vc1dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
2387 if(top_cbp & 0xA)
2388 v->vc1dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
2389 }
2390 block_cbp |= 0xF << (i << 2);
2391 } else if(is_coded[i]) {
2392 int left_cbp = 0, top_cbp = 0, filter = 0;
2393 if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
2394 filter = 1;
2395 if(i & 4){
2396 left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
2397 top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
2398 }else{
2399 left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
2400 top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
2401 }
2402 if(left_cbp & 0xC)
2403 v->vc1dsp.vc1_v_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
2404 if(top_cbp & 0xA)
2405 v->vc1dsp.vc1_h_loop_filter8(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, v->pq);
2406 }
2407 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
2408 block_cbp |= pat << (i << 2);
2409 if(!v->ttmbf && ttmb < 8) ttmb = -1;
2410 first_block = 0;
2411 }
2412 }
2413 return 0;
2414 }
2415 else //Skipped MB
2416 {
2417 s->mb_intra = 0;
2418 s->current_picture.qscale_table[mb_pos] = 0;
2419 for (i=0; i<6; i++) {
2420 v->mb_type[0][s->block_index[i]] = 0;
2421 s->dc_val[0][s->block_index[i]] = 0;
2422 }
2423 for (i=0; i<4; i++)
2424 {
2425 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
2426 vc1_mc_4mv_luma(v, i);
2427 }
2428 vc1_mc_4mv_chroma(v);
2429 s->current_picture.qscale_table[mb_pos] = 0;
2430 return 0;
2431 }
2432 }
2433 v->cbp[s->mb_x] = block_cbp;
2434
2435 /* Should never happen */
2436 return -1;
2437 }
2438
2439 /** Decode one B-frame MB (in Main profile)
2440 */
2441 static void vc1_decode_b_mb(VC1Context *v)
2442 {
2443 MpegEncContext *s = &v->s;
2444 GetBitContext *gb = &s->gb;
2445 int i, j;
2446 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2447 int cbp = 0; /* cbp decoding stuff */
2448 int mqdiff, mquant; /* MB quantization */
2449 int ttmb = v->ttfrm; /* MB Transform type */
2450 int mb_has_coeffs = 0; /* last_flag */
2451 int index, index1; /* LUT indexes */
2452 int val, sign; /* temp values */
2453 int first_block = 1;
2454 int dst_idx, off;
2455 int skipped, direct;
2456 int dmv_x[2], dmv_y[2];
2457 int bmvtype = BMV_TYPE_BACKWARD;
2458
2459 mquant = v->pq; /* Loosy initialization */
2460 s->mb_intra = 0;
2461
2462 if (v->dmb_is_raw)
2463 direct = get_bits1(gb);
2464 else
2465 direct = v->direct_mb_plane[mb_pos];
2466 if (v->skip_is_raw)
2467 skipped = get_bits1(gb);
2468 else
2469 skipped = v->s.mbskip_table[mb_pos];
2470
2471 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
2472 for(i = 0; i < 6; i++) {
2473 v->mb_type[0][s->block_index[i]] = 0;
2474 s->dc_val[0][s->block_index[i]] = 0;
2475 }
2476 s->current_picture.qscale_table[mb_pos] = 0;
2477
2478 if (!direct) {
2479 if (!skipped) {
2480 GET_MVDATA(dmv_x[0], dmv_y[0]);
2481 dmv_x[1] = dmv_x[0];
2482 dmv_y[1] = dmv_y[0];
2483 }
2484 if(skipped || !s->mb_intra) {
2485 bmvtype = decode012(gb);
2486 switch(bmvtype) {
2487 case 0:
2488 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
2489 break;
2490 case 1:
2491 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
2492 break;
2493 case 2:
2494 bmvtype = BMV_TYPE_INTERPOLATED;
2495 dmv_x[0] = dmv_y[0] = 0;
2496 }
2497 }
2498 }
2499 for(i = 0; i < 6; i++)
2500 v->mb_type[0][s->block_index[i]] = s->mb_intra;
2501
2502 if (skipped) {
2503 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
2504 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2505 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2506 return;
2507 }
2508 if (direct) {
2509 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
2510 GET_MQUANT();
2511 s->mb_intra = 0;
2512 s->current_picture.qscale_table[mb_pos] = mquant;
2513 if(!v->ttmbf)
2514 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
2515 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
2516 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2517 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2518 } else {
2519 if(!mb_has_coeffs && !s->mb_intra) {
2520 /* no coded blocks - effectively skipped */
2521 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2522 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2523 return;
2524 }
2525 if(s->mb_intra && !mb_has_coeffs) {
2526 GET_MQUANT();
2527 s->current_picture.qscale_table[mb_pos] = mquant;
2528 s->ac_pred = get_bits1(gb);
2529 cbp = 0;
2530 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2531 } else {
2532 if(bmvtype == BMV_TYPE_INTERPOLATED) {
2533 GET_MVDATA(dmv_x[0], dmv_y[0]);
2534 if(!mb_has_coeffs) {
2535 /* interpolated skipped block */
2536 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2537 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2538 return;
2539 }
2540 }
2541 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2542 if(!s->mb_intra) {
2543 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2544 }
2545 if(s->mb_intra)
2546 s->ac_pred = get_bits1(gb);
2547 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
2548 GET_MQUANT();
2549 s->current_picture.qscale_table[mb_pos] = mquant;
2550 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
2551 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
2552 }
2553 }
2554 dst_idx = 0;
2555 for (i=0; i<6; i++)
2556 {
2557 s->dc_val[0][s->block_index[i]] = 0;
2558 dst_idx += i >> 2;
2559 val = ((cbp >> (5 - i)) & 1);
2560 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
2561 v->mb_type[0][s->block_index[i]] = s->mb_intra;
2562 if(s->mb_intra) {
2563 /* check if prediction blocks A and C are available */
2564 v->a_avail = v->c_avail = 0;
2565 if(i == 2 || i == 3 || !s->first_slice_line)
2566 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
2567 if(i == 1 || i == 3 || s->mb_x)
2568 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
2569
2570 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
2571 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
2572 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
2573 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
2574 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
2575 } else if(val) {
2576 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), 0, 0, 0);
2577 if(!v->ttmbf && ttmb < 8) ttmb = -1;
2578 first_block = 0;
2579 }
2580 }
2581 }
2582
2583 /** Decode blocks of I-frame
2584 */
2585 static void vc1_decode_i_blocks(VC1Context *v)
2586 {
2587 int k, j;
2588 MpegEncContext *s = &v->s;
2589 int cbp, val;
2590 uint8_t *coded_val;
2591 int mb_pos;
2592
2593 /* select codingmode used for VLC tables selection */
2594 switch(v->y_ac_table_index){
2595 case 0:
2596 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
2597 break;
2598 case 1:
2599 v->codingset = CS_HIGH_MOT_INTRA;
2600 break;
2601 case 2:
2602 v->codingset = CS_MID_RATE_INTRA;
2603 break;
2604 }
2605
2606 switch(v->c_ac_table_index){
2607 case 0:
2608 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
2609 break;
2610 case 1:
2611 v->codingset2 = CS_HIGH_MOT_INTER;
2612 break;
2613 case 2:
2614 v->codingset2 = CS_MID_RATE_INTER;
2615 break;
2616 }
2617
2618 /* Set DC scale - y and c use the same */
2619 s->y_dc_scale = s->y_dc_scale_table[v->pq];
2620 s->c_dc_scale = s->c_dc_scale_table[v->pq];
2621
2622 //do frame decode
2623 s->mb_x = s->mb_y = 0;
2624 s->mb_intra = 1;
2625 s->first_slice_line = 1;
2626 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
2627 s->mb_x = 0;
2628 ff_init_block_index(s);
2629 for(; s->mb_x < s->mb_width; s->mb_x++) {
2630 ff_update_block_index(s);
2631 s->dsp.clear_blocks(s->block[0]);
2632 mb_pos = s->mb_x + s->mb_y * s->mb_width;
2633 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
2634 s->current_picture.qscale_table[mb_pos] = v->pq;
2635 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
2636 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
2637
2638 // do actual MB decoding and displaying
2639 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
2640 v->s.ac_pred = get_bits1(&v->s.gb);
2641
2642 for(k = 0; k < 6; k++) {
2643 val = ((cbp >> (5 - k)) & 1);
2644
2645 if (k < 4) {
2646 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
2647 val = val ^ pred;
2648 *coded_val = val;
2649 }
2650 cbp |= val << (5 - k);
2651
2652 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
2653
2654 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
2655 if(v->pq >= 9 && v->overlap) {
2656 for(j = 0; j < 64; j++) s->block[k][j] += 128;
2657 }
2658 }
2659
2660 vc1_put_block(v, s->block);
2661 if(v->pq >= 9 && v->overlap) {
2662 if(s->mb_x) {
2663 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
2664 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
2665 if(!(s->flags & CODEC_FLAG_GRAY)) {
2666 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
2667 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
2668 }
2669 }
2670 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
2671 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
2672 if(!s->first_slice_line) {
2673 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
2674 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
2675 if(!(s->flags & CODEC_FLAG_GRAY)) {
2676 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
2677 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
2678 }
2679 }
2680 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
2681 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
2682 }
2683 if(v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
2684
2685 if(get_bits_count(&s->gb) > v->bits) {
2686 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
2687 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
2688 return;
2689 }
2690 }
2691 if (!v->s.loop_filter)
2692 ff_draw_horiz_band(s, s->mb_y * 16, 16);
2693 else if (s->mb_y)
2694 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
2695
2696 s->first_slice_line = 0;
2697 }
2698 if (v->s.loop_filter)
2699 ff_draw_horiz_band(s, (s->mb_height-1)*16, 16);
2700 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
2701 }
2702
2703 /** Decode blocks of I-frame for advanced profile
2704 */
2705 static void vc1_decode_i_blocks_adv(VC1Context *v)
2706 {
2707 int k, j;
2708 MpegEncContext *s = &v->s;
2709 int cbp, val;
2710 uint8_t *coded_val;
2711 int mb_pos;
2712 int mquant = v->pq;
2713 int mqdiff;
2714 int overlap;
2715 GetBitContext *gb = &s->gb;
2716
2717 /* select codingmode used for VLC tables selection */
2718 switch(v->y_ac_table_index){
2719 case 0:
2720 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
2721 break;
2722 case 1:
2723 v->codingset = CS_HIGH_MOT_INTRA;
2724 break;
2725 case 2:
2726 v->codingset = CS_MID_RATE_INTRA;
2727 break;
2728 }
2729
2730 switch(v->c_ac_table_index){
2731 case 0:
2732 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
2733 break;
2734 case 1:
2735 v->codingset2 = CS_HIGH_MOT_INTER;
2736 break;
2737 case 2:
2738 v->codingset2 = CS_MID_RATE_INTER;
2739 break;
2740 }
2741
2742 //do frame decode
2743 s->mb_x = s->mb_y = 0;
2744 s->mb_intra = 1;
2745 s->first_slice_line = 1;
2746 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
2747 s->mb_x = 0;
2748 ff_init_block_index(s);
2749 for(;s->mb_x < s->mb_width; s->mb_x++) {
2750 ff_update_block_index(s);
2751 s->dsp.clear_blocks(s->block[0]);
2752 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2753 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
2754 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
2755 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
2756
2757 // do actual MB decoding and displaying
2758 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
2759 if(v->acpred_is_raw)
2760 v->s.ac_pred = get_bits1(&v->s.gb);
2761 else
2762 v->s.ac_pred = v->acpred_plane[mb_pos];
2763
2764 if(v->condover == CONDOVER_SELECT) {
2765 if(v->overflg_is_raw)
2766 overlap = get_bits1(&v->s.gb);
2767 else
2768 overlap = v->over_flags_plane[mb_pos];
2769 } else
2770 overlap = (v->condover == CONDOVER_ALL);
2771
2772 GET_MQUANT();
2773
2774 s->current_picture.qscale_table[mb_pos] = mquant;
2775 /* Set DC scale - y and c use the same */
2776 s->y_dc_scale = s->y_dc_scale_table[mquant];
2777 s->c_dc_scale = s->c_dc_scale_table[mquant];
2778
2779 for(k = 0; k < 6; k++) {
2780 val = ((cbp >> (5 - k)) & 1);
2781
2782 if (k < 4) {
2783 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
2784 val = val ^ pred;
2785 *coded_val = val;
2786 }
2787 cbp |= val << (5 - k);
2788
2789 v->a_avail = !s->first_slice_line || (k==2 || k==3);
2790 v->c_avail = !!s->mb_x || (k==1 || k==3);
2791
2792 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
2793
2794 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
2795 for(j = 0; j < 64; j++) s->block[k][j] += 128;
2796 }
2797
2798 vc1_put_block(v, s->block);
2799 if(overlap) {
2800 if(s->mb_x) {
2801 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
2802 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
2803 if(!(s->flags & CODEC_FLAG_GRAY)) {
2804 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
2805 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
2806 }
2807 }
2808 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
2809 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
2810 if(!s->first_slice_line) {
2811 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
2812 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
2813 if(!(s->flags & CODEC_FLAG_GRAY)) {
2814 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
2815 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
2816 }
2817 }
2818 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
2819 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
2820 }
2821 if(v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
2822
2823 if(get_bits_count(&s->gb) > v->bits) {
2824 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
2825 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
2826 return;
2827 }
2828 }
2829 if (!v->s.loop_filter)
2830 ff_draw_horiz_band(s, s->mb_y * 16, 16);
2831 else if (s->mb_y)
2832 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
2833 s->first_slice_line = 0;
2834 }
2835 if (v->s.loop_filter)
2836 ff_draw_horiz_band(s, (s->mb_height-1)*16, 16);
2837 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
2838 }
2839
2840 static void vc1_decode_p_blocks(VC1Context *v)
2841 {
2842 MpegEncContext *s = &v->s;
2843
2844 /* select codingmode used for VLC tables selection */
2845 switch(v->c_ac_table_index){
2846 case 0:
2847 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
2848 break;
2849 case 1:
2850 v->codingset = CS_HIGH_MOT_INTRA;
2851 break;
2852 case 2:
2853 v->codingset = CS_MID_RATE_INTRA;
2854 break;
2855 }
2856
2857 switch(v->c_ac_table_index){
2858 case 0:
2859 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
2860 break;
2861 case 1:
2862 v->codingset2 = CS_HIGH_MOT_INTER;
2863 break;
2864 case 2:
2865 v->codingset2 = CS_MID_RATE_INTER;
2866 break;
2867 }
2868
2869 s->first_slice_line = 1;
2870 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
2871 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
2872 s->mb_x = 0;
2873 ff_init_block_index(s);
2874 for(; s->mb_x < s->mb_width; s->mb_x++) {
2875 ff_update_block_index(s);
2876
2877 vc1_decode_p_mb(v);
2878 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
2879 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
2880 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
2881 return;
2882 }
2883 }
2884 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0])*s->mb_stride);
2885 ff_draw_horiz_band(s, s->mb_y * 16, 16);
2886 s->first_slice_line = 0;
2887 }
2888 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
2889 }
2890
2891 static void vc1_decode_b_blocks(VC1Context *v)
2892 {
2893 MpegEncContext *s = &v->s;
2894
2895 /* select codingmode used for VLC tables selection */
2896 switch(v->c_ac_table_index){
2897 case 0:
2898 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
2899 break;
2900 case 1:
2901 v->codingset = CS_HIGH_MOT_INTRA;
2902 break;
2903 case 2:
2904 v->codingset = CS_MID_RATE_INTRA;
2905 break;
2906 }
2907
2908 switch(v->c_ac_table_index){
2909 case 0:
2910 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
2911 break;
2912 case 1:
2913 v->codingset2 = CS_HIGH_MOT_INTER;
2914 break;
2915 case 2:
2916 v->codingset2 = CS_MID_RATE_INTER;
2917 break;
2918 }
2919
2920 s->first_slice_line = 1;
2921 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
2922 s->mb_x = 0;
2923 ff_init_block_index(s);
2924 for(; s->mb_x < s->mb_width; s->mb_x++) {
2925 ff_update_block_index(s);
2926
2927 vc1_decode_b_mb(v);
2928 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
2929 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
2930 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
2931 return;
2932 }
2933 if(v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
2934 }
2935 if (!v->s.loop_filter)
2936 ff_draw_horiz_band(s, s->mb_y * 16, 16);
2937 else if (s->mb_y)
2938 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
2939 s->first_slice_line = 0;
2940 }
2941 if (v->s.loop_filter)
2942 ff_draw_horiz_band(s, (s->mb_height-1)*16, 16);
2943 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
2944 }
2945
2946 static void vc1_decode_skip_blocks(VC1Context *v)
2947 {
2948 MpegEncContext *s = &v->s;
2949
2950 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
2951 s->first_slice_line = 1;
2952 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
2953 s->mb_x = 0;
2954 ff_init_block_index(s);
2955 ff_update_block_index(s);
2956 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
2957 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
2958 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
2959 ff_draw_horiz_band(s, s->mb_y * 16, 16);
2960 s->first_slice_line = 0;
2961 }
2962 s->pict_type = FF_P_TYPE;
2963 }
2964
2965 static void vc1_decode_blocks(VC1Context *v)
2966 {
2967
2968 v->s.esc3_level_length = 0;
2969 if(v->x8_type){
2970 ff_intrax8_decode_picture(&v->x8, 2*v->pq+v->halfpq, v->pq*(!v->pquantizer) );
2971 }else{
2972
2973 switch(v->s.pict_type) {
2974 case FF_I_TYPE:
2975 if(v->profile == PROFILE_ADVANCED)
2976 vc1_decode_i_blocks_adv(v);
2977 else
2978 vc1_decode_i_blocks(v);
2979 break;
2980 case FF_P_TYPE:
2981 if(v->p_frame_skipped)
2982 vc1_decode_skip_blocks(v);
2983 else
2984 vc1_decode_p_blocks(v);
2985 break;
2986 case FF_B_TYPE:
2987 if(v->bi_type){
2988 if(v->profile == PROFILE_ADVANCED)
2989 vc1_decode_i_blocks_adv(v);
2990 else
2991 vc1_decode_i_blocks(v);
2992 }else
2993 vc1_decode_b_blocks(v);
2994 break;
2995 }
2996 }
2997 }
2998
2999 /** Initialize a VC1/WMV3 decoder
3000 * @todo TODO: Handle VC-1 IDUs (Transport level?)
3001 * @todo TODO: Decypher remaining bits in extra_data
3002 */
3003 static av_cold int vc1_decode_init(AVCodecContext *avctx)
3004 {
3005 VC1Context *v = avctx->priv_data;
3006 MpegEncContext *s = &v->s;
3007 GetBitContext gb;
3008 int i;
3009
3010 if (!avctx->extradata_size || !avctx->extradata) return -1;
3011 if (!(avctx->flags & CODEC_FLAG_GRAY))
3012 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
3013 else
3014 avctx->pix_fmt = PIX_FMT_GRAY8;
3015 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
3016 v->s.avctx = avctx;
3017 avctx->flags |= CODEC_FLAG_EMU_EDGE;
3018 v->s.flags |= CODEC_FLAG_EMU_EDGE;
3019
3020 if(avctx->idct_algo==FF_IDCT_AUTO){
3021 avctx->idct_algo=FF_IDCT_WMV2;
3022 }
3023
3024 if(ff_msmpeg4_decode_init(avctx) < 0)
3025 return -1;
3026 if (vc1_init_common(v) < 0) return -1;
3027 ff_vc1dsp_init(&v->vc1dsp);
3028 for (i = 0; i < 64; i++) {
3029 #define transpose(x) ((x>>3) | ((x&7)<<3))
3030 v->zz_8x8[0][i] = transpose(wmv1_scantable[0][i]);
3031 v->zz_8x8[1][i] = transpose(wmv1_scantable[1][i]);
3032 v->zz_8x8[2][i] = transpose(wmv1_scantable[2][i]);
3033 v->zz_8x8[3][i] = transpose(wmv1_scantable[3][i]);
3034 }
3035
3036 avctx->coded_width = avctx->width;
3037 avctx->coded_height = avctx->height;
3038 if (avctx->codec_id == CODEC_ID_WMV3)
3039 {
3040 int count = 0;
3041
3042 // looks like WMV3 has a sequence header stored in the extradata
3043 // advanced sequence header may be before the first frame
3044 // the last byte of the extradata is a version number, 1 for the
3045 // samples we can decode
3046
3047 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
3048
3049 if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
3050 return -1;
3051
3052 count = avctx->extradata_size*8 - get_bits_count(&gb);
3053 if (count>0)
3054 {
3055 av_log(avctx, AV_LOG_INFO, &quo