vc1dec: interlaced stream decoding support 2/3
[libav.git] / libavcodec / vc1dec.c
CommitLineData
b761659b
DB
1/*
2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006-2007 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
5 *
2912e87a 6 * This file is part of Libav.
b761659b 7 *
2912e87a 8 * Libav is free software; you can redistribute it and/or
b761659b
DB
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
2912e87a 13 * Libav is distributed in the hope that it will be useful,
b761659b
DB
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
2912e87a 19 * License along with Libav; if not, write to the Free Software
b761659b
DB
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23/**
ba87f080 24 * @file
b761659b
DB
25 * VC-1 and WMV3 decoder
26 *
27 */
28#include "internal.h"
29#include "dsputil.h"
30#include "avcodec.h"
31#include "mpegvideo.h"
d68b27a9 32#include "h263.h"
b761659b
DB
33#include "vc1.h"
34#include "vc1data.h"
35#include "vc1acdata.h"
36#include "msmpeg4data.h"
37#include "unary.h"
38#include "simple_idct.h"
39#include "mathops.h"
40#include "vdpau_internal.h"
41
42#undef NDEBUG
43#include <assert.h>
44
45#define MB_INTRA_VLC_BITS 9
46#define DC_VLC_BITS 9
47#define AC_VLC_BITS 9
b761659b
DB
48
49
50static const uint16_t vlc_offs[] = {
51 0, 520, 552, 616, 1128, 1160, 1224, 1740, 1772, 1836, 1900, 2436,
52 2986, 3050, 3610, 4154, 4218, 4746, 5326, 5390, 5902, 6554, 7658, 8620,
53 9262, 10202, 10756, 11310, 12228, 15078
54};
55
56/**
57 * Init VC-1 specific tables and VC1Context members
58 * @param v The VC1Context to initialize
59 * @return Status
60 */
61static int vc1_init_common(VC1Context *v)
62{
63 static int done = 0;
64 int i = 0;
65 static VLC_TYPE vlc_table[15078][2];
66
67 v->hrd_rate = v->hrd_buffer = NULL;
68
69 /* VLC tables */
70 if(!done)
71 {
72 INIT_VLC_STATIC(&ff_vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
73 ff_vc1_bfraction_bits, 1, 1,
74 ff_vc1_bfraction_codes, 1, 1, 1 << VC1_BFRACTION_VLC_BITS);
75 INIT_VLC_STATIC(&ff_vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
76 ff_vc1_norm2_bits, 1, 1,
77 ff_vc1_norm2_codes, 1, 1, 1 << VC1_NORM2_VLC_BITS);
78 INIT_VLC_STATIC(&ff_vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
79 ff_vc1_norm6_bits, 1, 1,
80 ff_vc1_norm6_codes, 2, 2, 556);
81 INIT_VLC_STATIC(&ff_vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
82 ff_vc1_imode_bits, 1, 1,
83 ff_vc1_imode_codes, 1, 1, 1 << VC1_IMODE_VLC_BITS);
84 for (i=0; i<3; i++)
85 {
86 ff_vc1_ttmb_vlc[i].table = &vlc_table[vlc_offs[i*3+0]];
87 ff_vc1_ttmb_vlc[i].table_allocated = vlc_offs[i*3+1] - vlc_offs[i*3+0];
88 init_vlc(&ff_vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
89 ff_vc1_ttmb_bits[i], 1, 1,
90 ff_vc1_ttmb_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
91 ff_vc1_ttblk_vlc[i].table = &vlc_table[vlc_offs[i*3+1]];
92 ff_vc1_ttblk_vlc[i].table_allocated = vlc_offs[i*3+2] - vlc_offs[i*3+1];
93 init_vlc(&ff_vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
94 ff_vc1_ttblk_bits[i], 1, 1,
95 ff_vc1_ttblk_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
96 ff_vc1_subblkpat_vlc[i].table = &vlc_table[vlc_offs[i*3+2]];
97 ff_vc1_subblkpat_vlc[i].table_allocated = vlc_offs[i*3+3] - vlc_offs[i*3+2];
98 init_vlc(&ff_vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
99 ff_vc1_subblkpat_bits[i], 1, 1,
100 ff_vc1_subblkpat_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
101 }
102 for(i=0; i<4; i++)
103 {
104 ff_vc1_4mv_block_pattern_vlc[i].table = &vlc_table[vlc_offs[i*3+9]];
105 ff_vc1_4mv_block_pattern_vlc[i].table_allocated = vlc_offs[i*3+10] - vlc_offs[i*3+9];
106 init_vlc(&ff_vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
107 ff_vc1_4mv_block_pattern_bits[i], 1, 1,
108 ff_vc1_4mv_block_pattern_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
109 ff_vc1_cbpcy_p_vlc[i].table = &vlc_table[vlc_offs[i*3+10]];
110 ff_vc1_cbpcy_p_vlc[i].table_allocated = vlc_offs[i*3+11] - vlc_offs[i*3+10];
111 init_vlc(&ff_vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
112 ff_vc1_cbpcy_p_bits[i], 1, 1,
113 ff_vc1_cbpcy_p_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
114 ff_vc1_mv_diff_vlc[i].table = &vlc_table[vlc_offs[i*3+11]];
115 ff_vc1_mv_diff_vlc[i].table_allocated = vlc_offs[i*3+12] - vlc_offs[i*3+11];
116 init_vlc(&ff_vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
117 ff_vc1_mv_diff_bits[i], 1, 1,
118 ff_vc1_mv_diff_codes[i], 2, 2, INIT_VLC_USE_NEW_STATIC);
119 }
120 for(i=0; i<8; i++){
121 ff_vc1_ac_coeff_table[i].table = &vlc_table[vlc_offs[i+21]];
122 ff_vc1_ac_coeff_table[i].table_allocated = vlc_offs[i+22] - vlc_offs[i+21];
123 init_vlc(&ff_vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
124 &vc1_ac_tables[i][0][1], 8, 4,
125 &vc1_ac_tables[i][0][0], 8, 4, INIT_VLC_USE_NEW_STATIC);
126 }
b761659b
DB
127 done = 1;
128 }
129
130 /* Other defaults */
131 v->pq = -1;
132 v->mvrange = 0; /* 7.1.1.18, p80 */
133
134 return 0;
135}
136
137/***********************************************************************/
138/**
21a19b79 139 * @name VC-1 Bitplane decoding
b761659b
DB
140 * @see 8.7, p56
141 * @{
142 */
143
144/**
145 * Imode types
146 * @{
147 */
148enum Imode {
149 IMODE_RAW,
150 IMODE_NORM2,
151 IMODE_DIFF2,
152 IMODE_NORM6,
153 IMODE_DIFF6,
154 IMODE_ROWSKIP,
155 IMODE_COLSKIP
156};
157/** @} */ //imode defines
158
159
160/** @} */ //Bitplane group
161
7d2e03af
RB
162static void vc1_put_signed_blocks_clamped(VC1Context *v)
163{
164 MpegEncContext *s = &v->s;
165
166 /* The put pixels loop is always one MB row behind the decoding loop,
167 * because we can only put pixels when overlap filtering is done, and
168 * for filtering of the bottom edge of a MB, we need the next MB row
169 * present as well.
170 * Within the row, the put pixels loop is also one MB col behind the
171 * decoding loop. The reason for this is again, because for filtering
172 * of the right MB edge, we need the next MB present. */
173 if (!s->first_slice_line) {
174 if (s->mb_x) {
175 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
176 s->dest[0] - 16 * s->linesize - 16,
177 s->linesize);
178 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][1],
179 s->dest[0] - 16 * s->linesize - 8,
180 s->linesize);
181 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][2],
182 s->dest[0] - 8 * s->linesize - 16,
183 s->linesize);
184 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
185 s->dest[0] - 8 * s->linesize - 8,
186 s->linesize);
187 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
188 s->dest[1] - 8 * s->uvlinesize - 8,
189 s->uvlinesize);
190 s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][5],
191 s->dest[2] - 8 * s->uvlinesize - 8,
192 s->uvlinesize);
193 }
194 if (s->mb_x == s->mb_width - 1) {
195 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
196 s->dest[0] - 16 * s->linesize,
197 s->linesize);
198 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][1],
199 s->dest[0] - 16 * s->linesize + 8,
200 s->linesize);
201 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][2],
202 s->dest[0] - 8 * s->linesize,
203 s->linesize);
204 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
205 s->dest[0] - 8 * s->linesize + 8,
206 s->linesize);
207 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
208 s->dest[1] - 8 * s->uvlinesize,
209 s->uvlinesize);
210 s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][5],
211 s->dest[2] - 8 * s->uvlinesize,
212 s->uvlinesize);
213 }
214 }
215
216#define inc_blk_idx(idx) do { \
217 idx++; \
218 if (idx >= v->n_allocated_blks) \
219 idx = 0; \
220 } while (0)
221
222 inc_blk_idx(v->topleft_blk_idx);
223 inc_blk_idx(v->top_blk_idx);
224 inc_blk_idx(v->left_blk_idx);
225 inc_blk_idx(v->cur_blk_idx);
226}
227
12802ec0 228static void vc1_loop_filter_iblk(VC1Context *v, int pq)
b761659b 229{
12802ec0 230 MpegEncContext *s = &v->s;
fca58a81
DC
231 int j;
232 if (!s->first_slice_line) {
12802ec0 233 v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
fca58a81 234 if (s->mb_x)
12802ec0
RB
235 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16*s->linesize, s->linesize, pq);
236 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16*s->linesize+8, s->linesize, pq);
fca58a81 237 for(j = 0; j < 2; j++){
12802ec0 238 v->vc1dsp.vc1_v_loop_filter8(s->dest[j+1], s->uvlinesize, pq);
fca58a81 239 if (s->mb_x)
12802ec0 240 v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1]-8*s->uvlinesize, s->uvlinesize, pq);
fca58a81
DC
241 }
242 }
12802ec0 243 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8*s->linesize, s->linesize, pq);
fca58a81 244
1cf82cab 245 if (s->mb_y == s->end_mb_y-1) {
fca58a81 246 if (s->mb_x) {
12802ec0
RB
247 v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
248 v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
249 v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
fca58a81 250 }
12802ec0 251 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
b761659b
DB
252 }
253}
254
7d2e03af
RB
255static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
256{
257 MpegEncContext *s = &v->s;
258 int j;
259
260 /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
261 * means it runs two rows/cols behind the decoding loop. */
262 if (!s->first_slice_line) {
263 if (s->mb_x) {
264 if (s->mb_y >= s->start_mb_y + 2) {
265 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
266
267 if (s->mb_x >= 2)
268 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
269 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
270 for(j = 0; j < 2; j++) {
271 v->vc1dsp.vc1_v_loop_filter8(s->dest[j+1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
272 if (s->mb_x >= 2) {
273 v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
274 }
275 }
276 }
277 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
278 }
279
280 if (s->mb_x == s->mb_width - 1) {
281 if (s->mb_y >= s->start_mb_y + 2) {
282 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
283
284 if (s->mb_x)
285 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
286 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
287 for(j = 0; j < 2; j++) {
288 v->vc1dsp.vc1_v_loop_filter8(s->dest[j+1] - 8 * s->uvlinesize, s->uvlinesize, pq);
289 if (s->mb_x >= 2) {
290 v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1] - 16 * s->uvlinesize, s->uvlinesize, pq);
291 }
292 }
293 }
294 v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
295 }
296
1cf82cab 297 if (s->mb_y == s->end_mb_y) {
7d2e03af
RB
298 if (s->mb_x) {
299 if (s->mb_x >= 2)
300 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
301 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
302 if (s->mb_x >= 2) {
303 for(j = 0; j < 2; j++) {
304 v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
305 }
306 }
307 }
308
309 if (s->mb_x == s->mb_width - 1) {
310 if (s->mb_x)
311 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
312 v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
313 if (s->mb_x) {
314 for(j = 0; j < 2; j++) {
315 v->vc1dsp.vc1_h_loop_filter8(s->dest[j+1] - 8 * s->uvlinesize, s->uvlinesize, pq);
316 }
317 }
318 }
319 }
320 }
321}
322
323static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
324{
325 MpegEncContext *s = &v->s;
326 int mb_pos;
327
328 if (v->condover == CONDOVER_NONE)
329 return;
330
331 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
332
333 /* Within a MB, the horizontal overlap always runs before the vertical.
334 * To accomplish that, we run the H on left and internal borders of the
335 * currently decoded MB. Then, we wait for the next overlap iteration
336 * to do H overlap on the right edge of this MB, before moving over and
337 * running the V overlap. Therefore, the V overlap makes us trail by one
338 * MB col and the H overlap filter makes us trail by one MB row. This
339 * is reflected in the time at which we run the put_pixels loop. */
340 if(v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
341 if(s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
342 v->over_flags_plane[mb_pos - 1])) {
343 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][1],
344 v->block[v->cur_blk_idx][0]);
345 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
346 v->block[v->cur_blk_idx][2]);
347 if(!(s->flags & CODEC_FLAG_GRAY)) {
348 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
349 v->block[v->cur_blk_idx][4]);
350 v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
351 v->block[v->cur_blk_idx][5]);
352 }
353 }
354 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][0],
355 v->block[v->cur_blk_idx][1]);
356 v->vc1dsp.vc1_h_s_overlap(v->block[v->cur_blk_idx][2],
357 v->block[v->cur_blk_idx][3]);
358
359 if (s->mb_x == s->mb_width - 1) {
360 if(!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
361 v->over_flags_plane[mb_pos - s->mb_stride])) {
362 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][2],
363 v->block[v->cur_blk_idx][0]);
364 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
365 v->block[v->cur_blk_idx][1]);
366 if(!(s->flags & CODEC_FLAG_GRAY)) {
367 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
368 v->block[v->cur_blk_idx][4]);
369 v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
370 v->block[v->cur_blk_idx][5]);
371 }
372 }
373 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][0],
374 v->block[v->cur_blk_idx][2]);
375 v->vc1dsp.vc1_v_s_overlap(v->block[v->cur_blk_idx][1],
376 v->block[v->cur_blk_idx][3]);
377 }
378 }
379 if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
380 if(!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
381 v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
382 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][2],
383 v->block[v->left_blk_idx][0]);
384 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
385 v->block[v->left_blk_idx][1]);
386 if(!(s->flags & CODEC_FLAG_GRAY)) {
387 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
388 v->block[v->left_blk_idx][4]);
389 v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
390 v->block[v->left_blk_idx][5]);
391 }
392 }
393 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][0],
394 v->block[v->left_blk_idx][2]);
395 v->vc1dsp.vc1_v_s_overlap(v->block[v->left_blk_idx][1],
396 v->block[v->left_blk_idx][3]);
397 }
398}
399
b761659b
DB
400/** Do motion compensation over 1 macroblock
401 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
402 */
403static void vc1_mc_1mv(VC1Context *v, int dir)
404{
405 MpegEncContext *s = &v->s;
406 DSPContext *dsp = &v->s.dsp;
407 uint8_t *srcY, *srcU, *srcV;
408 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
409
657ccb5a 410 if(!v->s.last_picture.f.data[0])return;
b761659b
DB
411
412 mx = s->mv[dir][0][0];
413 my = s->mv[dir][0][1];
414
415 // store motion vectors for further use in B frames
975a1447 416 if(s->pict_type == AV_PICTURE_TYPE_P) {
657ccb5a
DB
417 s->current_picture.f.motion_val[1][s->block_index[0]][0] = mx;
418 s->current_picture.f.motion_val[1][s->block_index[0]][1] = my;
b761659b
DB
419 }
420 uvmx = (mx + ((mx & 3) == 3)) >> 1;
421 uvmy = (my + ((my & 3) == 3)) >> 1;
c47d3835
RB
422 v->luma_mv[s->mb_x][0] = uvmx;
423 v->luma_mv[s->mb_x][1] = uvmy;
b761659b
DB
424 if(v->fastuvmc) {
425 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
426 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
427 }
428 if(!dir) {
657ccb5a
DB
429 srcY = s->last_picture.f.data[0];
430 srcU = s->last_picture.f.data[1];
431 srcV = s->last_picture.f.data[2];
b761659b 432 } else {
657ccb5a
DB
433 srcY = s->next_picture.f.data[0];
434 srcU = s->next_picture.f.data[1];
435 srcV = s->next_picture.f.data[2];
b761659b
DB
436 }
437
438 src_x = s->mb_x * 16 + (mx >> 2);
439 src_y = s->mb_y * 16 + (my >> 2);
440 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
441 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
442
443 if(v->profile != PROFILE_ADVANCED){
444 src_x = av_clip( src_x, -16, s->mb_width * 16);
445 src_y = av_clip( src_y, -16, s->mb_height * 16);
446 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
447 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
448 }else{
449 src_x = av_clip( src_x, -17, s->avctx->coded_width);
450 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
451 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
452 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
453 }
454
455 srcY += src_y * s->linesize + src_x;
456 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
457 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
458
459 /* for grayscale we should not try to read from unknown area */
460 if(s->flags & CODEC_FLAG_GRAY) {
461 srcU = s->edge_emu_buffer + 18 * s->linesize;
462 srcV = s->edge_emu_buffer + 18 * s->linesize;
463 }
464
465 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
466 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
467 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
468 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
469
470 srcY -= s->mspel * (1 + s->linesize);
2e279598 471 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
b761659b
DB
472 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
473 srcY = s->edge_emu_buffer;
2e279598 474 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
b761659b 475 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2e279598 476 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
b761659b
DB
477 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
478 srcU = uvbuf;
479 srcV = uvbuf + 16;
480 /* if we deal with range reduction we need to scale source blocks */
481 if(v->rangeredfrm) {
482 int i, j;
483 uint8_t *src, *src2;
484
485 src = srcY;
486 for(j = 0; j < 17 + s->mspel*2; j++) {
487 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
488 src += s->linesize;
489 }
490 src = srcU; src2 = srcV;
491 for(j = 0; j < 9; j++) {
492 for(i = 0; i < 9; i++) {
493 src[i] = ((src[i] - 128) >> 1) + 128;
494 src2[i] = ((src2[i] - 128) >> 1) + 128;
495 }
496 src += s->uvlinesize;
497 src2 += s->uvlinesize;
498 }
499 }
500 /* if we deal with intensity compensation we need to scale source blocks */
501 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
502 int i, j;
503 uint8_t *src, *src2;
504
505 src = srcY;
506 for(j = 0; j < 17 + s->mspel*2; j++) {
507 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
508 src += s->linesize;
509 }
510 src = srcU; src2 = srcV;
511 for(j = 0; j < 9; j++) {
512 for(i = 0; i < 9; i++) {
513 src[i] = v->lutuv[src[i]];
514 src2[i] = v->lutuv[src2[i]];
515 }
516 src += s->uvlinesize;
517 src2 += s->uvlinesize;
518 }
519 }
520 srcY += s->mspel * (1 + s->linesize);
521 }
522
523 if(s->mspel) {
524 dxy = ((my & 3) << 2) | (mx & 3);
12802ec0
RB
525 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
526 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
b761659b 527 srcY += s->linesize * 8;
12802ec0
RB
528 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
529 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
b761659b
DB
530 } else { // hpel mc - always used for luma
531 dxy = (my & 2) | ((mx & 2) >> 1);
532
533 if(!v->rnd)
534 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
535 else
536 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
537 }
538
539 if(s->flags & CODEC_FLAG_GRAY) return;
540 /* Chroma MC always uses qpel bilinear */
541 uvmx = (uvmx&3)<<1;
542 uvmy = (uvmy&3)<<1;
543 if(!v->rnd){
544 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
545 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
546 }else{
12802ec0
RB
547 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
548 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
b761659b
DB
549 }
550}
551
552/** Do motion compensation for 4-MV macroblock - luminance block
553 */
554static void vc1_mc_4mv_luma(VC1Context *v, int n)
555{
556 MpegEncContext *s = &v->s;
557 DSPContext *dsp = &v->s.dsp;
558 uint8_t *srcY;
559 int dxy, mx, my, src_x, src_y;
560 int off;
561
657ccb5a 562 if(!v->s.last_picture.f.data[0])return;
b761659b
DB
563 mx = s->mv[0][n][0];
564 my = s->mv[0][n][1];
657ccb5a 565 srcY = s->last_picture.f.data[0];
b761659b
DB
566
567 off = s->linesize * 4 * (n&2) + (n&1) * 8;
568
569 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
570 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
571
572 if(v->profile != PROFILE_ADVANCED){
573 src_x = av_clip( src_x, -16, s->mb_width * 16);
574 src_y = av_clip( src_y, -16, s->mb_height * 16);
575 }else{
576 src_x = av_clip( src_x, -17, s->avctx->coded_width);
577 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
578 }
579
580 srcY += src_y * s->linesize + src_x;
581
582 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
583 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
584 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
585 srcY -= s->mspel * (1 + s->linesize);
2e279598 586 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
b761659b
DB
587 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
588 srcY = s->edge_emu_buffer;
589 /* if we deal with range reduction we need to scale source blocks */
590 if(v->rangeredfrm) {
591 int i, j;
592 uint8_t *src;
593
594 src = srcY;
595 for(j = 0; j < 9 + s->mspel*2; j++) {
596 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
597 src += s->linesize;
598 }
599 }
600 /* if we deal with intensity compensation we need to scale source blocks */
601 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
602 int i, j;
603 uint8_t *src;
604
605 src = srcY;
606 for(j = 0; j < 9 + s->mspel*2; j++) {
607 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
608 src += s->linesize;
609 }
610 }
611 srcY += s->mspel * (1 + s->linesize);
612 }
613
614 if(s->mspel) {
615 dxy = ((my & 3) << 2) | (mx & 3);
12802ec0 616 v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
b761659b
DB
617 } else { // hpel mc - always used for luma
618 dxy = (my & 2) | ((mx & 2) >> 1);
619 if(!v->rnd)
620 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
621 else
622 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
623 }
624}
625
626static inline int median4(int a, int b, int c, int d)
627{
628 if(a < b) {
629 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
630 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
631 } else {
632 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
633 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
634 }
635}
636
637
638/** Do motion compensation for 4-MV macroblock - both chroma blocks
639 */
640static void vc1_mc_4mv_chroma(VC1Context *v)
641{
642 MpegEncContext *s = &v->s;
643 DSPContext *dsp = &v->s.dsp;
644 uint8_t *srcU, *srcV;
645 int uvmx, uvmy, uvsrc_x, uvsrc_y;
646 int i, idx, tx = 0, ty = 0;
647 int mvx[4], mvy[4], intra[4];
648 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
649
657ccb5a 650 if(!v->s.last_picture.f.data[0])return;
b761659b
DB
651 if(s->flags & CODEC_FLAG_GRAY) return;
652
653 for(i = 0; i < 4; i++) {
654 mvx[i] = s->mv[0][i][0];
655 mvy[i] = s->mv[0][i][1];
656 intra[i] = v->mb_type[0][s->block_index[i]];
657 }
658
659 /* calculate chroma MV vector from four luma MVs */
660 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
661 if(!idx) { // all blocks are inter
662 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
663 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
664 } else if(count[idx] == 1) { // 3 inter blocks
665 switch(idx) {
666 case 0x1:
667 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
668 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
669 break;
670 case 0x2:
671 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
672 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
673 break;
674 case 0x4:
675 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
676 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
677 break;
678 case 0x8:
679 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
680 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
681 break;
682 }
683 } else if(count[idx] == 2) {
684 int t1 = 0, t2 = 0;
685 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
686 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
687 tx = (mvx[t1] + mvx[t2]) / 2;
688 ty = (mvy[t1] + mvy[t2]) / 2;
689 } else {
657ccb5a
DB
690 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
691 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
c47d3835 692 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
b761659b
DB
693 return; //no need to do MC for inter blocks
694 }
695
657ccb5a
DB
696 s->current_picture.f.motion_val[1][s->block_index[0]][0] = tx;
697 s->current_picture.f.motion_val[1][s->block_index[0]][1] = ty;
b761659b
DB
698 uvmx = (tx + ((tx&3) == 3)) >> 1;
699 uvmy = (ty + ((ty&3) == 3)) >> 1;
c47d3835
RB
700 v->luma_mv[s->mb_x][0] = uvmx;
701 v->luma_mv[s->mb_x][1] = uvmy;
b761659b
DB
702 if(v->fastuvmc) {
703 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
704 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
705 }
706
707 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
708 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
709
710 if(v->profile != PROFILE_ADVANCED){
711 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
712 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
713 }else{
714 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
715 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
716 }
717
657ccb5a
DB
718 srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
719 srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
b761659b
DB
720 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
721 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
722 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
2e279598 723 s->dsp.emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
b761659b 724 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2e279598 725 s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
b761659b
DB
726 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
727 srcU = s->edge_emu_buffer;
728 srcV = s->edge_emu_buffer + 16;
729
730 /* if we deal with range reduction we need to scale source blocks */
731 if(v->rangeredfrm) {
732 int i, j;
733 uint8_t *src, *src2;
734
735 src = srcU; src2 = srcV;
736 for(j = 0; j < 9; j++) {
737 for(i = 0; i < 9; i++) {
738 src[i] = ((src[i] - 128) >> 1) + 128;
739 src2[i] = ((src2[i] - 128) >> 1) + 128;
740 }
741 src += s->uvlinesize;
742 src2 += s->uvlinesize;
743 }
744 }
745 /* if we deal with intensity compensation we need to scale source blocks */
746 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
747 int i, j;
748 uint8_t *src, *src2;
749
750 src = srcU; src2 = srcV;
751 for(j = 0; j < 9; j++) {
752 for(i = 0; i < 9; i++) {
753 src[i] = v->lutuv[src[i]];
754 src2[i] = v->lutuv[src2[i]];
755 }
756 src += s->uvlinesize;
757 src2 += s->uvlinesize;
758 }
759 }
760 }
761
762 /* Chroma MC always uses qpel bilinear */
763 uvmx = (uvmx&3)<<1;
764 uvmy = (uvmy&3)<<1;
765 if(!v->rnd){
766 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
767 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
768 }else{
12802ec0
RB
769 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
770 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
b761659b
DB
771 }
772}
773
774/***********************************************************************/
775/**
21a19b79 776 * @name VC-1 Block-level functions
b761659b
DB
777 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
778 * @{
779 */
780
781/**
782 * @def GET_MQUANT
783 * @brief Get macroblock-level quantizer scale
784 */
785#define GET_MQUANT() \
786 if (v->dquantfrm) \
787 { \
788 int edges = 0; \
789 if (v->dqprofile == DQPROFILE_ALL_MBS) \
790 { \
791 if (v->dqbilevel) \
792 { \
793 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
794 } \
795 else \
796 { \
797 mqdiff = get_bits(gb, 3); \
798 if (mqdiff != 7) mquant = v->pq + mqdiff; \
799 else mquant = get_bits(gb, 5); \
800 } \
801 } \
802 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
803 edges = 1 << v->dqsbedge; \
804 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
805 edges = (3 << v->dqsbedge) % 15; \
806 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
807 edges = 15; \
808 if((edges&1) && !s->mb_x) \
809 mquant = v->altpq; \
810 if((edges&2) && s->first_slice_line) \
811 mquant = v->altpq; \
812 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
813 mquant = v->altpq; \
814 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
815 mquant = v->altpq; \
816 }
817
818/**
819 * @def GET_MVDATA(_dmv_x, _dmv_y)
820 * @brief Get MV differentials
821 * @see MVDATA decoding from 8.3.5.2, p(1)20
822 * @param _dmv_x Horizontal differential for decoded MV
823 * @param _dmv_y Vertical differential for decoded MV
824 */
825#define GET_MVDATA(_dmv_x, _dmv_y) \
826 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table,\
827 VC1_MV_DIFF_VLC_BITS, 2); \
828 if (index > 36) \
829 { \
830 mb_has_coeffs = 1; \
831 index -= 37; \
832 } \
833 else mb_has_coeffs = 0; \
834 s->mb_intra = 0; \
835 if (!index) { _dmv_x = _dmv_y = 0; } \
836 else if (index == 35) \
837 { \
838 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
839 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
840 } \
841 else if (index == 36) \
842 { \
843 _dmv_x = 0; \
844 _dmv_y = 0; \
845 s->mb_intra = 1; \
846 } \
847 else \
848 { \
849 index1 = index%6; \
850 if (!s->quarter_sample && index1 == 5) val = 1; \
851 else val = 0; \
852 if(size_table[index1] - val > 0) \
853 val = get_bits(gb, size_table[index1] - val); \
854 else val = 0; \
855 sign = 0 - (val&1); \
856 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
857 \
858 index1 = index/6; \
859 if (!s->quarter_sample && index1 == 5) val = 1; \
860 else val = 0; \
861 if(size_table[index1] - val > 0) \
862 val = get_bits(gb, size_table[index1] - val); \
863 else val = 0; \
864 sign = 0 - (val&1); \
865 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
866 }
867
868/** Predict and set motion vector
869 */
c47d3835 870static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
b761659b 871{
c47d3835 872 MpegEncContext *s = &v->s;
b761659b
DB
873 int xy, wrap, off = 0;
874 int16_t *A, *B, *C;
875 int px, py;
876 int sum;
877
878 /* scale MV difference to be quad-pel */
879 dmv_x <<= 1 - s->quarter_sample;
880 dmv_y <<= 1 - s->quarter_sample;
881
882 wrap = s->b8_stride;
883 xy = s->block_index[n];
884
885 if(s->mb_intra){
657ccb5a
DB
886 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
887 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
888 s->current_picture.f.motion_val[1][xy][0] = 0;
889 s->current_picture.f.motion_val[1][xy][1] = 0;
b761659b 890 if(mv1) { /* duplicate motion data for 1-MV block */
657ccb5a
DB
891 s->current_picture.f.motion_val[0][xy + 1][0] = 0;
892 s->current_picture.f.motion_val[0][xy + 1][1] = 0;
893 s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
894 s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
895 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
896 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
c47d3835 897 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
657ccb5a
DB
898 s->current_picture.f.motion_val[1][xy + 1][0] = 0;
899 s->current_picture.f.motion_val[1][xy + 1][1] = 0;
900 s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
901 s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
902 s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
903 s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
b761659b
DB
904 }
905 return;
906 }
907
657ccb5a
DB
908 C = s->current_picture.f.motion_val[0][xy - 1];
909 A = s->current_picture.f.motion_val[0][xy - wrap];
b761659b
DB
910 if(mv1)
911 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
912 else {
913 //in 4-MV mode different blocks have different B predictor position
914 switch(n){
915 case 0:
916 off = (s->mb_x > 0) ? -1 : 1;
917 break;
918 case 1:
919 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
920 break;
921 case 2:
922 off = 1;
923 break;
924 case 3:
925 off = -1;
926 }
927 }
657ccb5a 928 B = s->current_picture.f.motion_val[0][xy - wrap + off];
b761659b
DB
929
930 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
931 if(s->mb_width == 1) {
932 px = A[0];
933 py = A[1];
934 } else {
935 px = mid_pred(A[0], B[0], C[0]);
936 py = mid_pred(A[1], B[1], C[1]);
937 }
938 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
939 px = C[0];
940 py = C[1];
941 } else {
942 px = py = 0;
943 }
944 /* Pullback MV as specified in 8.3.5.3.4 */
945 {
946 int qx, qy, X, Y;
947 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
948 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
949 X = (s->mb_width << 6) - 4;
950 Y = (s->mb_height << 6) - 4;
951 if(mv1) {
952 if(qx + px < -60) px = -60 - qx;
953 if(qy + py < -60) py = -60 - qy;
954 } else {
955 if(qx + px < -28) px = -28 - qx;
956 if(qy + py < -28) py = -28 - qy;
957 }
958 if(qx + px > X) px = X - qx;
959 if(qy + py > Y) py = Y - qy;
960 }
961 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
962 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
963 if(is_intra[xy - wrap])
964 sum = FFABS(px) + FFABS(py);
965 else
966 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
967 if(sum > 32) {
968 if(get_bits1(&s->gb)) {
969 px = A[0];
970 py = A[1];
971 } else {
972 px = C[0];
973 py = C[1];
974 }
975 } else {
976 if(is_intra[xy - 1])
977 sum = FFABS(px) + FFABS(py);
978 else
979 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
980 if(sum > 32) {
981 if(get_bits1(&s->gb)) {
982 px = A[0];
983 py = A[1];
984 } else {
985 px = C[0];
986 py = C[1];
987 }
988 }
989 }
990 }
991 /* store MV using signed modulus of MV range defined in 4.11 */
657ccb5a
DB
992 s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
993 s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
b761659b 994 if(mv1) { /* duplicate motion data for 1-MV block */
657ccb5a
DB
995 s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
996 s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
997 s->current_picture.f.motion_val[0][xy + wrap][0] = s->current_picture.f.motion_val[0][xy][0];
998 s->current_picture.f.motion_val[0][xy + wrap][1] = s->current_picture.f.motion_val[0][xy][1];
999 s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1000 s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
b761659b
DB
1001 }
1002}
1003
1004/** Motion compensation for direct or interpolated blocks in B-frames
1005 */
1006static void vc1_interp_mc(VC1Context *v)
1007{
1008 MpegEncContext *s = &v->s;
1009 DSPContext *dsp = &v->s.dsp;
1010 uint8_t *srcY, *srcU, *srcV;
1011 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1012
657ccb5a 1013 if(!v->s.next_picture.f.data[0])return;
b761659b
DB
1014
1015 mx = s->mv[1][0][0];
1016 my = s->mv[1][0][1];
1017 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1018 uvmy = (my + ((my & 3) == 3)) >> 1;
1019 if(v->fastuvmc) {
1020 uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
1021 uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
1022 }
657ccb5a
DB
1023 srcY = s->next_picture.f.data[0];
1024 srcU = s->next_picture.f.data[1];
1025 srcV = s->next_picture.f.data[2];
b761659b
DB
1026
1027 src_x = s->mb_x * 16 + (mx >> 2);
1028 src_y = s->mb_y * 16 + (my >> 2);
1029 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1030 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1031
1032 if(v->profile != PROFILE_ADVANCED){
1033 src_x = av_clip( src_x, -16, s->mb_width * 16);
1034 src_y = av_clip( src_y, -16, s->mb_height * 16);
1035 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1036 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1037 }else{
1038 src_x = av_clip( src_x, -17, s->avctx->coded_width);
1039 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1040 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1041 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1042 }
1043
1044 srcY += src_y * s->linesize + src_x;
1045 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1046 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1047
1048 /* for grayscale we should not try to read from unknown area */
1049 if(s->flags & CODEC_FLAG_GRAY) {
1050 srcU = s->edge_emu_buffer + 18 * s->linesize;
1051 srcV = s->edge_emu_buffer + 18 * s->linesize;
1052 }
1053
1054 if(v->rangeredfrm
74a30595
KS
1055 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
1056 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
b761659b
DB
1057 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
1058
1059 srcY -= s->mspel * (1 + s->linesize);
2e279598 1060 s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
b761659b
DB
1061 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
1062 srcY = s->edge_emu_buffer;
2e279598 1063 s->dsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
b761659b 1064 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2e279598 1065 s->dsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
b761659b
DB
1066 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1067 srcU = uvbuf;
1068 srcV = uvbuf + 16;
1069 /* if we deal with range reduction we need to scale source blocks */
1070 if(v->rangeredfrm) {
1071 int i, j;
1072 uint8_t *src, *src2;
1073
1074 src = srcY;
1075 for(j = 0; j < 17 + s->mspel*2; j++) {
1076 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
1077 src += s->linesize;
1078 }
1079 src = srcU; src2 = srcV;
1080 for(j = 0; j < 9; j++) {
1081 for(i = 0; i < 9; i++) {
1082 src[i] = ((src[i] - 128) >> 1) + 128;
1083 src2[i] = ((src2[i] - 128) >> 1) + 128;
1084 }
1085 src += s->uvlinesize;
1086 src2 += s->uvlinesize;
1087 }
1088 }
1089 srcY += s->mspel * (1 + s->linesize);
1090 }
1091
1092 if(s->mspel) {
1093 dxy = ((my & 3) << 2) | (mx & 3);
12802ec0
RB
1094 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
1095 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
b761659b 1096 srcY += s->linesize * 8;
12802ec0
RB
1097 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
1098 v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
b761659b
DB
1099 } else { // hpel mc
1100 dxy = (my & 2) | ((mx & 2) >> 1);
1101
1102 if(!v->rnd)
1103 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
1104 else
1105 dsp->avg_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
1106 }
1107
1108 if(s->flags & CODEC_FLAG_GRAY) return;
1109 /* Chroma MC always uses qpel blilinear */
1110 uvmx = (uvmx&3)<<1;
1111 uvmy = (uvmy&3)<<1;
1112 if(!v->rnd){
1113 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1114 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1115 }else{
12802ec0
RB
1116 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1117 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
b761659b
DB
1118 }
1119}
1120
1121static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
1122{
1123 int n = bfrac;
1124
1125#if B_FRACTION_DEN==256
1126 if(inv)
1127 n -= 256;
1128 if(!qs)
1129 return 2 * ((value * n + 255) >> 9);
1130 return (value * n + 128) >> 8;
1131#else
1132 if(inv)
1133 n -= B_FRACTION_DEN;
1134 if(!qs)
1135 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
1136 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
1137#endif
1138}
1139
1140/** Reconstruct motion vector for B-frame and do motion compensation
1141 */
1142static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
1143{
1144 if(v->use_ic) {
1145 v->mv_mode2 = v->mv_mode;
1146 v->mv_mode = MV_PMODE_INTENSITY_COMP;
1147 }
1148 if(direct) {
1149 vc1_mc_1mv(v, 0);
1150 vc1_interp_mc(v);
1151 if(v->use_ic) v->mv_mode = v->mv_mode2;
1152 return;
1153 }
1154 if(mode == BMV_TYPE_INTERPOLATED) {
1155 vc1_mc_1mv(v, 0);
1156 vc1_interp_mc(v);
1157 if(v->use_ic) v->mv_mode = v->mv_mode2;
1158 return;
1159 }
1160
1161 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
1162 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
1163 if(v->use_ic) v->mv_mode = v->mv_mode2;
1164}
1165
1166static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
1167{
1168 MpegEncContext *s = &v->s;
1169 int xy, wrap, off = 0;
1170 int16_t *A, *B, *C;
1171 int px, py;
1172 int sum;
1173 int r_x, r_y;
1174 const uint8_t *is_intra = v->mb_type[0];
1175
1176 r_x = v->range_x;
1177 r_y = v->range_y;
1178 /* scale MV difference to be quad-pel */
1179 dmv_x[0] <<= 1 - s->quarter_sample;
1180 dmv_y[0] <<= 1 - s->quarter_sample;
1181 dmv_x[1] <<= 1 - s->quarter_sample;
1182 dmv_y[1] <<= 1 - s->quarter_sample;
1183
1184 wrap = s->b8_stride;
1185 xy = s->block_index[0];
1186
1187 if(s->mb_intra) {
657ccb5a
DB
1188 s->current_picture.f.motion_val[0][xy][0] =
1189 s->current_picture.f.motion_val[0][xy][1] =
1190 s->current_picture.f.motion_val[1][xy][0] =
1191 s->current_picture.f.motion_val[1][xy][1] = 0;
b761659b
DB
1192 return;
1193 }
657ccb5a
DB
1194 s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
1195 s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
1196 s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
1197 s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
b761659b
DB
1198
1199 /* Pullback predicted motion vectors as specified in 8.4.5.4 */
1200 s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
1201 s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
1202 s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
1203 s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
1204 if(direct) {
657ccb5a
DB
1205 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
1206 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
1207 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
1208 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
b761659b
DB
1209 return;
1210 }
1211
1212 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
657ccb5a
DB
1213 C = s->current_picture.f.motion_val[0][xy - 2];
1214 A = s->current_picture.f.motion_val[0][xy - wrap*2];
b761659b 1215 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
657ccb5a 1216 B = s->current_picture.f.motion_val[0][xy - wrap*2 + off];
b761659b
DB
1217
1218 if(!s->mb_x) C[0] = C[1] = 0;
1219 if(!s->first_slice_line) { // predictor A is not out of bounds
1220 if(s->mb_width == 1) {
1221 px = A[0];
1222 py = A[1];
1223 } else {
1224 px = mid_pred(A[0], B[0], C[0]);
1225 py = mid_pred(A[1], B[1], C[1]);
1226 }
1227 } else if(s->mb_x) { // predictor C is not out of bounds
1228 px = C[0];
1229 py = C[1];
1230 } else {
1231 px = py = 0;
1232 }
1233 /* Pullback MV as specified in 8.3.5.3.4 */
1234 {
1235 int qx, qy, X, Y;
1236 if(v->profile < PROFILE_ADVANCED) {
1237 qx = (s->mb_x << 5);
1238 qy = (s->mb_y << 5);
1239 X = (s->mb_width << 5) - 4;
1240 Y = (s->mb_height << 5) - 4;
1241 if(qx + px < -28) px = -28 - qx;
1242 if(qy + py < -28) py = -28 - qy;
1243 if(qx + px > X) px = X - qx;
1244 if(qy + py > Y) py = Y - qy;
1245 } else {
1246 qx = (s->mb_x << 6);
1247 qy = (s->mb_y << 6);
1248 X = (s->mb_width << 6) - 4;
1249 Y = (s->mb_height << 6) - 4;
1250 if(qx + px < -60) px = -60 - qx;
1251 if(qy + py < -60) py = -60 - qy;
1252 if(qx + px > X) px = X - qx;
1253 if(qy + py > Y) py = Y - qy;
1254 }
1255 }
1256 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1257 if(0 && !s->first_slice_line && s->mb_x) {
1258 if(is_intra[xy - wrap])
1259 sum = FFABS(px) + FFABS(py);
1260 else
1261 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
1262 if(sum > 32) {
1263 if(get_bits1(&s->gb)) {
1264 px = A[0];
1265 py = A[1];
1266 } else {
1267 px = C[0];
1268 py = C[1];
1269 }
1270 } else {
1271 if(is_intra[xy - 2])
1272 sum = FFABS(px) + FFABS(py);
1273 else
1274 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
1275 if(sum > 32) {
1276 if(get_bits1(&s->gb)) {
1277 px = A[0];
1278 py = A[1];
1279 } else {
1280 px = C[0];
1281 py = C[1];
1282 }
1283 }
1284 }
1285 }
1286 /* store MV using signed modulus of MV range defined in 4.11 */
1287 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
1288 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
1289 }
1290 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
657ccb5a
DB
1291 C = s->current_picture.f.motion_val[1][xy - 2];
1292 A = s->current_picture.f.motion_val[1][xy - wrap*2];
b761659b 1293 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
657ccb5a 1294 B = s->current_picture.f.motion_val[1][xy - wrap*2 + off];
b761659b
DB
1295
1296 if(!s->mb_x) C[0] = C[1] = 0;
1297 if(!s->first_slice_line) { // predictor A is not out of bounds
1298 if(s->mb_width == 1) {
1299 px = A[0];
1300 py = A[1];
1301 } else {
1302 px = mid_pred(A[0], B[0], C[0]);
1303 py = mid_pred(A[1], B[1], C[1]);
1304 }
1305 } else if(s->mb_x) { // predictor C is not out of bounds
1306 px = C[0];
1307 py = C[1];
1308 } else {
1309 px = py = 0;
1310 }
1311 /* Pullback MV as specified in 8.3.5.3.4 */
1312 {
1313 int qx, qy, X, Y;
1314 if(v->profile < PROFILE_ADVANCED) {
1315 qx = (s->mb_x << 5);
1316 qy = (s->mb_y << 5);
1317 X = (s->mb_width << 5) - 4;
1318 Y = (s->mb_height << 5) - 4;
1319 if(qx + px < -28) px = -28 - qx;
1320 if(qy + py < -28) py = -28 - qy;
1321 if(qx + px > X) px = X - qx;
1322 if(qy + py > Y) py = Y - qy;
1323 } else {
1324 qx = (s->mb_x << 6);
1325 qy = (s->mb_y << 6);
1326 X = (s->mb_width << 6) - 4;
1327 Y = (s->mb_height << 6) - 4;
1328 if(qx + px < -60) px = -60 - qx;
1329 if(qy + py < -60) py = -60 - qy;
1330 if(qx + px > X) px = X - qx;
1331 if(qy + py > Y) py = Y - qy;
1332 }
1333 }
1334 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1335 if(0 && !s->first_slice_line && s->mb_x) {
1336 if(is_intra[xy - wrap])
1337 sum = FFABS(px) + FFABS(py);
1338 else
1339 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
1340 if(sum > 32) {
1341 if(get_bits1(&s->gb)) {
1342 px = A[0];
1343 py = A[1];
1344 } else {
1345 px = C[0];
1346 py = C[1];
1347 }
1348 } else {
1349 if(is_intra[xy - 2])
1350 sum = FFABS(px) + FFABS(py);
1351 else
1352 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
1353 if(sum > 32) {
1354 if(get_bits1(&s->gb)) {
1355 px = A[0];
1356 py = A[1];
1357 } else {
1358 px = C[0];
1359 py = C[1];
1360 }
1361 }
1362 }
1363 }
1364 /* store MV using signed modulus of MV range defined in 4.11 */
1365
1366 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
1367 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
1368 }
657ccb5a
DB
1369 s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
1370 s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
1371 s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
1372 s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
b761659b
DB
1373}
1374
1375/** Get predicted DC value for I-frames only
1376 * prediction dir: left=0, top=1
1377 * @param s MpegEncContext
1378 * @param overlap flag indicating that overlap filtering is used
1379 * @param pq integer part of picture quantizer
1380 * @param[in] n block index in the current MB
1381 * @param dc_val_ptr Pointer to DC predictor
1382 * @param dir_ptr Prediction direction for use in AC prediction
1383 */
1384static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
1385 int16_t **dc_val_ptr, int *dir_ptr)
1386{
1387 int a, b, c, wrap, pred, scale;
1388 int16_t *dc_val;
1389 static const uint16_t dcpred[32] = {
1390 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
1391 114, 102, 93, 85, 79, 73, 68, 64,
1392 60, 57, 54, 51, 49, 47, 45, 43,
1393 41, 39, 38, 37, 35, 34, 33
1394 };
1395
1396 /* find prediction - wmv3_dc_scale always used here in fact */
1397 if (n < 4) scale = s->y_dc_scale;
1398 else scale = s->c_dc_scale;
1399
1400 wrap = s->block_wrap[n];
1401 dc_val= s->dc_val[0] + s->block_index[n];
1402
1403 /* B A
1404 * C X
1405 */
1406 c = dc_val[ - 1];
1407 b = dc_val[ - 1 - wrap];
1408 a = dc_val[ - wrap];
1409
1410 if (pq < 9 || !overlap)
1411 {
1412 /* Set outer values */
1413 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
1414 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
1415 }
1416 else
1417 {
1418 /* Set outer values */
1419 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
1420 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
1421 }
1422
1423 if (abs(a - b) <= abs(b - c)) {
1424 pred = c;
1425 *dir_ptr = 1;//left
1426 } else {
1427 pred = a;
1428 *dir_ptr = 0;//top
1429 }
1430
1431 /* update predictor */
1432 *dc_val_ptr = &dc_val[0];
1433 return pred;
1434}
1435
1436
1437/** Get predicted DC value
1438 * prediction dir: left=0, top=1
1439 * @param s MpegEncContext
1440 * @param overlap flag indicating that overlap filtering is used
1441 * @param pq integer part of picture quantizer
1442 * @param[in] n block index in the current MB
1443 * @param a_avail flag indicating top block availability
1444 * @param c_avail flag indicating left block availability
1445 * @param dc_val_ptr Pointer to DC predictor
1446 * @param dir_ptr Prediction direction for use in AC prediction
1447 */
1448static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
1449 int a_avail, int c_avail,
1450 int16_t **dc_val_ptr, int *dir_ptr)
1451{
1452 int a, b, c, wrap, pred;
1453 int16_t *dc_val;
1454 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1455 int q1, q2 = 0;
1456
1457 wrap = s->block_wrap[n];
1458 dc_val= s->dc_val[0] + s->block_index[n];
1459
1460 /* B A
1461 * C X
1462 */
1463 c = dc_val[ - 1];
1464 b = dc_val[ - 1 - wrap];
1465 a = dc_val[ - wrap];
1466 /* scale predictors if needed */
657ccb5a 1467 q1 = s->current_picture.f.qscale_table[mb_pos];
b761659b 1468 if(c_avail && (n!= 1 && n!=3)) {
657ccb5a 1469 q2 = s->current_picture.f.qscale_table[mb_pos - 1];
b761659b
DB
1470 if(q2 && q2 != q1)
1471 c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
1472 }
1473 if(a_avail && (n!= 2 && n!=3)) {
657ccb5a 1474 q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
b761659b
DB
1475 if(q2 && q2 != q1)
1476 a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
1477 }
1478 if(a_avail && c_avail && (n!=3)) {
1479 int off = mb_pos;
1480 if(n != 1) off--;
1481 if(n != 2) off -= s->mb_stride;
657ccb5a 1482 q2 = s->current_picture.f.qscale_table[off];
b761659b
DB
1483 if(q2 && q2 != q1)
1484 b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
1485 }
1486
1487 if(a_avail && c_avail) {
1488 if(abs(a - b) <= abs(b - c)) {
1489 pred = c;
1490 *dir_ptr = 1;//left
1491 } else {
1492 pred = a;
1493 *dir_ptr = 0;//top
1494 }
1495 } else if(a_avail) {
1496 pred = a;
1497 *dir_ptr = 0;//top
1498 } else if(c_avail) {
1499 pred = c;
1500 *dir_ptr = 1;//left
1501 } else {
1502 pred = 0;
1503 *dir_ptr = 1;//left
1504 }
1505
1506 /* update predictor */
1507 *dc_val_ptr = &dc_val[0];
1508 return pred;
1509}
1510
1511/** @} */ // Block group
1512
1513/**
21a19b79 1514 * @name VC1 Macroblock-level functions in Simple/Main Profiles
b761659b
DB
1515 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1516 * @{
1517 */
1518
1519static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
1520{
1521 int xy, wrap, pred, a, b, c;
1522
1523 xy = s->block_index[n];
1524 wrap = s->b8_stride;
1525
1526 /* B C
1527 * A X
1528 */
1529 a = s->coded_block[xy - 1 ];
1530 b = s->coded_block[xy - 1 - wrap];
1531 c = s->coded_block[xy - wrap];
1532
1533 if (b == c) {
1534 pred = a;
1535 } else {
1536 pred = c;
1537 }
1538
1539 /* store value */
1540 *coded_block_ptr = &s->coded_block[xy];
1541
1542 return pred;
1543}
1544
1545/**
1546 * Decode one AC coefficient
1547 * @param v The VC1 context
1548 * @param last Last coefficient
1549 * @param skip How much zero coefficients to skip
1550 * @param value Decoded AC coefficient value
1551 * @param codingset set of VLC to decode data
1552 * @see 8.1.3.4
1553 */
1554static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
1555{
1556 GetBitContext *gb = &v->s.gb;
1557 int index, escape, run = 0, level = 0, lst = 0;
1558
1559 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
1560 if (index != vc1_ac_sizes[codingset] - 1) {
1561 run = vc1_index_decode_table[codingset][index][0];
1562 level = vc1_index_decode_table[codingset][index][1];
2bbec1ed 1563 lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
b761659b
DB
1564 if(get_bits1(gb))
1565 level = -level;
1566 } else {
1567 escape = decode210(gb);
1568 if (escape != 2) {
1569 index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
1570 run = vc1_index_decode_table[codingset][index][0];
1571 level = vc1_index_decode_table[codingset][index][1];
1572 lst = index >= vc1_last_decode_table[codingset];
1573 if(escape == 0) {
1574 if(lst)
1575 level += vc1_last_delta_level_table[codingset][run];
1576 else
1577 level += vc1_delta_level_table[codingset][run];
1578 } else {
1579 if(lst)
1580 run += vc1_last_delta_run_table[codingset][level] + 1;
1581 else
1582 run += vc1_delta_run_table[codingset][level] + 1;
1583 }
1584 if(get_bits1(gb))
1585 level = -level;
1586 } else {
1587 int sign;
1588 lst = get_bits1(gb);
1589 if(v->s.esc3_level_length == 0) {
1590 if(v->pq < 8 || v->dquantfrm) { // table 59
1591 v->s.esc3_level_length = get_bits(gb, 3);
1592 if(!v->s.esc3_level_length)
1593 v->s.esc3_level_length = get_bits(gb, 2) + 8;
1594 } else { //table 60
1595 v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
1596 }
1597 v->s.esc3_run_length = 3 + get_bits(gb, 2);
1598 }
1599 run = get_bits(gb, v->s.esc3_run_length);
1600 sign = get_bits1(gb);
1601 level = get_bits(gb, v->s.esc3_level_length);
1602 if(sign)
1603 level = -level;
1604 }
1605 }
1606
1607 *last = lst;
1608 *skip = run;
1609 *value = level;
1610}
1611
1612/** Decode intra block in intra frames - should be faster than decode_intra_block
1613 * @param v VC1Context
1614 * @param block block to decode
1615 * @param[in] n subblock index
1616 * @param coded are AC coeffs present or not
1617 * @param codingset set of VLC to decode data
1618 */
1619static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
1620{
1621 GetBitContext *gb = &v->s.gb;
1622 MpegEncContext *s = &v->s;
1623 int dc_pred_dir = 0; /* Direction of the DC prediction used */
1624 int i;
1625 int16_t *dc_val;
1626 int16_t *ac_val, *ac_val2;
1627 int dcdiff;
1628
1629 /* Get DC differential */
1630 if (n < 4) {
1631 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1632 } else {
1633 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1634 }
1635 if (dcdiff < 0){
1636 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
1637 return -1;
1638 }
1639 if (dcdiff)
1640 {
1641 if (dcdiff == 119 /* ESC index value */)
1642 {
1643 /* TODO: Optimize */
1644 if (v->pq == 1) dcdiff = get_bits(gb, 10);
1645 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
1646 else dcdiff = get_bits(gb, 8);
1647 }
1648 else
1649 {
1650 if (v->pq == 1)
1651 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
1652 else if (v->pq == 2)
1653 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
1654 }
1655 if (get_bits1(gb))
1656 dcdiff = -dcdiff;
1657 }
1658
1659 /* Prediction */
1660 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
1661 *dc_val = dcdiff;
1662
1663 /* Store the quantized DC coeff, used for prediction */
1664 if (n < 4) {
1665 block[0] = dcdiff * s->y_dc_scale;
1666 } else {
1667 block[0] = dcdiff * s->c_dc_scale;
1668 }
1669 /* Skip ? */
1670 if (!coded) {
1671 goto not_coded;
1672 }
1673
1674 //AC Decoding
1675 i = 1;
1676
1677 {
1678 int last = 0, skip, value;
0724a674 1679 const uint8_t *zz_table;
b761659b
DB
1680 int scale;
1681 int k;
1682
1683 scale = v->pq * 2 + v->halfpq;
1684
1685 if(v->s.ac_pred) {
1686 if(!dc_pred_dir)
1da6ea39 1687 zz_table = v->zz_8x8[2];
b761659b 1688 else
1da6ea39 1689 zz_table = v->zz_8x8[3];
b761659b 1690 } else
1da6ea39 1691 zz_table = v->zz_8x8[1];
b761659b
DB
1692
1693 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
1694 ac_val2 = ac_val;
1695 if(dc_pred_dir) //left
1696 ac_val -= 16;
1697 else //top
1698 ac_val -= 16 * s->block_wrap[n];
1699
1700 while (!last) {
1701 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
1702 i += skip;
1703 if(i > 63)
1704 break;
1705 block[zz_table[i++]] = value;
1706 }
1707
1708 /* apply AC prediction if needed */
1709 if(s->ac_pred) {
1710 if(dc_pred_dir) { //left
1711 for(k = 1; k < 8; k++)
58bb6b7d 1712 block[k << v->left_blk_sh] += ac_val[k];
b761659b
DB
1713 } else { //top
1714 for(k = 1; k < 8; k++)
58bb6b7d 1715 block[k << v->top_blk_sh] += ac_val[k + 8];
b761659b
DB
1716 }
1717 }
1718 /* save AC coeffs for further prediction */
1719 for(k = 1; k < 8; k++) {
58bb6b7d
RB
1720 ac_val2[k] = block[k << v->left_blk_sh];
1721 ac_val2[k + 8] = block[k << v->top_blk_sh];
b761659b
DB
1722 }
1723
1724 /* scale AC coeffs */
1725 for(k = 1; k < 64; k++)
1726 if(block[k]) {
1727 block[k] *= scale;
1728 if(!v->pquantizer)
1729 block[k] += (block[k] < 0) ? -v->pq : v->pq;
1730 }
1731
1732 if(s->ac_pred) i = 63;
1733 }
1734
1735not_coded:
1736 if(!coded) {
1737 int k, scale;
1738 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
1739 ac_val2 = ac_val;
1740
1741 i = 0;
1742 scale = v->pq * 2 + v->halfpq;
1743 memset(ac_val2, 0, 16 * 2);
1744 if(dc_pred_dir) {//left
1745 ac_val -= 16;
1746 if(s->ac_pred)
1747 memcpy(ac_val2, ac_val, 8 * 2);
1748 } else {//top
1749 ac_val -= 16 * s->block_wrap[n];
1750 if(s->ac_pred)
1751 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
1752 }
1753
1754 /* apply AC prediction if needed */
1755 if(s->ac_pred) {
1756 if(dc_pred_dir) { //left
1757 for(k = 1; k < 8; k++) {
58bb6b7d
RB
1758 block[k << v->left_blk_sh] = ac_val[k] * scale;
1759 if(!v->pquantizer && block[k << v->left_blk_sh])
1760 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
b761659b
DB
1761 }
1762 } else { //top
1763 for(k = 1; k < 8; k++) {
58bb6b7d
RB
1764 block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
1765 if(!v->pquantizer && block[k << v->top_blk_sh])
1766 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
b761659b
DB
1767 }
1768 }
1769 i = 63;
1770 }
1771 }
1772 s->block_last_index[n] = i;
1773
1774 return 0;
1775}
1776
1777/** Decode intra block in intra frames - should be faster than decode_intra_block
1778 * @param v VC1Context
1779 * @param block block to decode
1780 * @param[in] n subblock number
1781 * @param coded are AC coeffs present or not
1782 * @param codingset set of VLC to decode data
1783 * @param mquant quantizer value for this macroblock
1784 */
1785static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
1786{
1787 GetBitContext *gb = &v->s.gb;
1788 MpegEncContext *s = &v->s;
1789 int dc_pred_dir = 0; /* Direction of the DC prediction used */
1790 int i;
1791 int16_t *dc_val;
1792 int16_t *ac_val, *ac_val2;
1793 int dcdiff;
1794 int a_avail = v->a_avail, c_avail = v->c_avail;
1795 int use_pred = s->ac_pred;
1796 int scale;
1797 int q1, q2 = 0;
1798 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1799
1800 /* Get DC differential */
1801 if (n < 4) {
1802 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1803 } else {
1804 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
1805 }
1806 if (dcdiff < 0){
1807 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
1808 return -1;
1809 }
1810 if (dcdiff)
1811 {
1812 if (dcdiff == 119 /* ESC index value */)
1813 {
1814 /* TODO: Optimize */
1815 if (mquant == 1) dcdiff = get_bits(gb, 10);
1816 else if (mquant == 2) dcdiff = get_bits(gb, 9);
1817 else dcdiff = get_bits(gb, 8);
1818 }
1819 else
1820 {
1821 if (mquant == 1)
1822 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
1823 else if (mquant == 2)
1824 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
1825 }
1826 if (get_bits1(gb))
1827 dcdiff = -dcdiff;
1828 }
1829
1830 /* Prediction */
1831 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
1832 *dc_val = dcdiff;
1833
1834 /* Store the quantized DC coeff, used for prediction */
1835 if (n < 4) {
1836 block[0] = dcdiff * s->y_dc_scale;
1837 } else {
1838 block[0] = dcdiff * s->c_dc_scale;
1839 }
1840
1841 //AC Decoding
1842 i = 1;
1843
1844 /* check if AC is needed at all */
1845 if(!a_avail && !c_avail) use_pred = 0;
1846 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
1847 ac_val2 = ac_val;
1848
1849 scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
1850
1851 if(dc_pred_dir) //left
1852 ac_val -= 16;
1853 else //top
1854 ac_val -= 16 * s->block_wrap[n];
1855
657ccb5a
DB
1856 q1 = s->current_picture.f.qscale_table[mb_pos];
1857 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.f.qscale_table[mb_pos - 1];
1858 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
b761659b
DB
1859 if(dc_pred_dir && n==1) q2 = q1;
1860 if(!dc_pred_dir && n==2) q2 = q1;
1861 if(n==3) q2 = q1;
1862
1863 if(coded) {
1864 int last = 0, skip, value;
0724a674 1865 const uint8_t *zz_table;
b761659b
DB
1866 int k;
1867
1868 if(v->s.ac_pred) {
1869 if(!dc_pred_dir)
1da6ea39 1870 zz_table = v->zz_8x8[2];
b761659b 1871 else
1da6ea39 1872 zz_table = v->zz_8x8[3];
b761659b 1873 } else
1da6ea39 1874 zz_table = v->zz_8x8[1];
b761659b
DB
1875
1876 while (!last) {
1877 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
1878 i += skip;
1879 if(i > 63)
1880 break;
1881 block[zz_table[i++]] = value;
1882 }
1883
1884 /* apply AC prediction if needed */
1885 if(use_pred) {
1886 /* scale predictors if needed*/
1887 if(q2 && q1!=q2) {
1888 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
1889 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
1890
1891 if(dc_pred_dir) { //left
1892 for(k = 1; k < 8; k++)
58bb6b7d 1893 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
b761659b
DB
1894 } else { //top
1895 for(k = 1; k < 8; k++)
58bb6b7d 1896 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
b761659b
DB
1897 }
1898 } else {
1899 if(dc_pred_dir) { //left
1900 for(k = 1; k < 8; k++)
58bb6b7d 1901 block[k << v->left_blk_sh] += ac_val[k];
b761659b
DB
1902 } else { //top
1903 for(k = 1; k < 8; k++)
58bb6b7d 1904 block[k << v->top_blk_sh] += ac_val[k + 8];
b761659b
DB
1905 }
1906 }
1907 }
1908 /* save AC coeffs for further prediction */
1909 for(k = 1; k < 8; k++) {
58bb6b7d
RB
1910 ac_val2[k ] = block[k << v->left_blk_sh];
1911 ac_val2[k + 8] = block[k << v->top_blk_sh];
b761659b
DB
1912 }
1913
1914 /* scale AC coeffs */
1915 for(k = 1; k < 64; k++)
1916 if(block[k]) {
1917 block[k] *= scale;
1918 if(!v->pquantizer)
1919 block[k] += (block[k] < 0) ? -mquant : mquant;
1920 }
1921
1922 if(use_pred) i = 63;
1923 } else { // no AC coeffs
1924 int k;
1925
1926 memset(ac_val2, 0, 16 * 2);
1927 if(dc_pred_dir) {//left
1928 if(use_pred) {
1929 memcpy(ac_val2, ac_val, 8 * 2);
1930 if(q2 && q1!=q2) {
1931 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
1932 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
1933 for(k = 1; k < 8; k++)
1934 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1935 }
1936 }
1937 } else {//top
1938 if(use_pred) {
1939 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
1940 if(q2 && q1!=q2) {
1941 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
1942 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
1943 for(k = 1; k < 8; k++)
1944 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
1945 }
1946 }
1947 }
1948
1949 /* apply AC prediction if needed */
1950 if(use_pred) {
1951 if(dc_pred_dir) { //left
1952 for(k = 1; k < 8; k++) {
58bb6b7d
RB
1953 block[k << v->left_blk_sh] = ac_val2[k] * scale;
1954 if(!v->pquantizer && block[k << v->left_blk_sh])
1955 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
b761659b
DB
1956 }
1957 } else { //top
1958 for(k = 1; k < 8; k++) {
58bb6b7d
RB
1959 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
1960 if(!v->pquantizer && block[k << v->top_blk_sh])
1961 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
b761659b
DB
1962 }
1963 }
1964 i = 63;
1965 }
1966 }
1967 s->block_last_index[n] = i;
1968
1969 return 0;
1970}
1971
1972/** Decode intra block in inter frames - more generic version than vc1_decode_i_block
1973 * @param v VC1Context
1974 * @param block block to decode
1975 * @param[in] n subblock index
1976 * @param coded are AC coeffs present or not
1977 * @param mquant block quantizer
1978 * @param codingset set of VLC to decode data
1979 */
1980static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
1981{
1982 GetBitContext *gb = &v->s.gb;
1983 MpegEncContext *s = &v->s;
1984 int dc_pred_dir = 0; /* Direction of the DC prediction used */
1985 int i;
1986 int16_t *dc_val;
1987 int16_t *ac_val, *ac_val2;
1988 int dcdiff;
1989 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1990 int a_avail = v->a_avail, c_avail = v->c_avail;
1991 int use_pred = s->ac_pred;
1992 int scale;
1993 int q1, q2 = 0;
1994
010f98f9
JGG
1995 s->dsp.clear_block(block);
1996
b761659b
DB
1997 /* XXX: Guard against dumb values of mquant */
1998 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
1999
2000 /* Set DC scale - y and c use the same */
2001 s->y_dc_scale = s->y_dc_scale_table[mquant];
2002 s->c_dc_scale = s->c_dc_scale_table[mquant];
2003
2004 /* Get DC differential */
2005 if (n < 4) {
2006 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2007 } else {
2008 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2009 }
2010 if (dcdiff < 0){
2011 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2012 return -1;
2013 }
2014 if (dcdiff)
2015 {
2016 if (dcdiff == 119 /* ESC index value */)
2017 {
2018 /* TODO: Optimize */
2019 if (mquant == 1) dcdiff = get_bits(gb, 10);
2020 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2021 else dcdiff = get_bits(gb, 8);
2022 }
2023 else
2024 {
2025 if (mquant == 1)
2026 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2027 else if (mquant == 2)
2028 dcdiff = (dcdiff<<1) + get_bits1(gb) - 1;
2029 }
2030 if (get_bits1(gb))
2031 dcdiff = -dcdiff;
2032 }
2033
2034 /* Prediction */
2035 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2036 *dc_val = dcdiff;
2037
2038 /* Store the quantized DC coeff, used for prediction */
2039
2040 if (n < 4) {
2041 block[0] = dcdiff * s->y_dc_scale;
2042 } else {
2043 block[0] = dcdiff * s->c_dc_scale;
2044 }
2045
2046 //AC Decoding
2047 i = 1;
2048
2049 /* check if AC is needed at all and adjust direction if needed */
2050 if(!a_avail) dc_pred_dir = 1;
2051 if(!c_avail) dc_pred_dir = 0;
2052 if(!a_avail && !c_avail) use_pred = 0;
2053 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2054 ac_val2 = ac_val;
2055
2056 scale = mquant * 2 + v->halfpq;
2057
2058 if(dc_pred_dir) //left
2059 ac_val -= 16;
2060 else //top
2061 ac_val -= 16 * s->block_wrap[n];
2062
657ccb5a
DB
2063 q1 = s->current_picture.f.qscale_table[mb_pos];
2064 if(dc_pred_dir && c_avail && mb_pos) q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2065 if(!dc_pred_dir && a_avail && mb_pos >= s->mb_stride) q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
b761659b
DB
2066 if(dc_pred_dir && n==1) q2 = q1;
2067 if(!dc_pred_dir && n==2) q2 = q1;
2068 if(n==3) q2 = q1;
2069
2070 if(coded) {
2071 int last = 0, skip, value;
b761659b
DB
2072 int k;
2073
b761659b
DB
2074 while (!last) {
2075 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2076 i += skip;
2077 if(i > 63)
2078 break;
1da6ea39 2079 block[v->zz_8x8[0][i++]] = value;
b761659b
DB
2080 }
2081
2082 /* apply AC prediction if needed */
2083 if(use_pred) {
2084 /* scale predictors if needed*/
2085 if(q2 && q1!=q2) {
2086 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2087 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2088
2089 if(dc_pred_dir) { //left
2090 for(k = 1; k < 8; k++)
58bb6b7d 2091 block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
b761659b
DB
2092 } else { //top
2093 for(k = 1; k < 8; k++)
58bb6b7d 2094 block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
b761659b
DB
2095 }
2096 } else {
2097 if(dc_pred_dir) { //left
2098 for(k = 1; k < 8; k++)
58bb6b7d 2099 block[k << v->left_blk_sh] += ac_val[k];
b761659b
DB
2100 } else { //top
2101 for(k = 1; k < 8; k++)
58bb6b7d 2102 block[k << v->top_blk_sh] += ac_val[k + 8];
b761659b
DB
2103 }
2104 }
2105 }
2106 /* save AC coeffs for further prediction */
2107 for(k = 1; k < 8; k++) {
58bb6b7d
RB
2108 ac_val2[k ] = block[k << v->left_blk_sh];
2109 ac_val2[k + 8] = block[k << v->top_blk_sh];
b761659b
DB
2110 }
2111
2112 /* scale AC coeffs */
2113 for(k = 1; k < 64; k++)
2114 if(block[k]) {
2115 block[k] *= scale;
2116 if(!v->pquantizer)
2117 block[k] += (block[k] < 0) ? -mquant : mquant;
2118 }
2119
2120 if(use_pred) i = 63;
2121 } else { // no AC coeffs
2122 int k;
2123
2124 memset(ac_val2, 0, 16 * 2);
2125 if(dc_pred_dir) {//left
2126 if(use_pred) {
2127 memcpy(ac_val2, ac_val, 8 * 2);
2128 if(q2 && q1!=q2) {
2129 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2130 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2131 for(k = 1; k < 8; k++)
2132 ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2133 }
2134 }
2135 } else {//top
2136 if(use_pred) {
2137 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2138 if(q2 && q1!=q2) {
2139 q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2140 q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2141 for(k = 1; k < 8; k++)
2142 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2143 }
2144 }
2145 }
2146
2147 /* apply AC prediction if needed */
2148 if(use_pred) {
2149 if(dc_pred_dir) { //left
2150 for(k = 1; k < 8; k++) {
58bb6b7d
RB
2151 block[k << v->left_blk_sh] = ac_val2[k] * scale;
2152 if(!v->pquantizer && block[k << v->left_blk_sh])
2153 block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
b761659b
DB
2154 }
2155 } else { //top
2156 for(k = 1; k < 8; k++) {
58bb6b7d
RB
2157 block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2158 if(!v->pquantizer && block[k << v->top_blk_sh])
2159 block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
b761659b
DB
2160 }
2161 }
2162 i = 63;
2163 }
2164 }
2165 s->block_last_index[n] = i;
2166
2167 return 0;
2168}
2169
2170/** Decode P block
2171 */
2172static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block,
c47d3835 2173 uint8_t *dst, int linesize, int skip_block, int *ttmb_out)
b761659b
DB
2174{
2175 MpegEncContext *s = &v->s;
2176 GetBitContext *gb = &s->gb;
2177 int i, j;
2178 int subblkpat = 0;
2179 int scale, off, idx, last, skip, value;
2180 int ttblk = ttmb & 7;
2181 int pat = 0;
2182
010f98f9
JGG
2183 s->dsp.clear_block(block);
2184
b761659b
DB
2185 if(ttmb == -1) {
2186 ttblk = ff_vc1_ttblk_to_tt[v->tt_index][get_vlc2(gb, ff_vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
2187 }
2188 if(ttblk == TT_4X4) {
2189 subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
2190 }
42ff9d7a
KS
2191 if((ttblk != TT_8X8 && ttblk != TT_4X4)
2192 && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
2193 || (!v->res_rtm_flag && !first_block))) {
b761659b
DB
2194 subblkpat = decode012(gb);
2195 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
2196 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
2197 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
2198 }
2199 scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
2200
2201 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
2202 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
2203 subblkpat = 2 - (ttblk == TT_8X4_TOP);
2204 ttblk = TT_8X4;
2205 }
2206 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
2207 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
2208 ttblk = TT_4X8;
2209 }
2210 switch(ttblk) {
2211 case TT_8X8:
2212 pat = 0xF;
2213 i = 0;
2214 last = 0;
2215 while (!last) {
2216 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
2217 i += skip;
2218 if(i > 63)
2219 break;
1da6ea39 2220 idx = v->zz_8x8[0][i++];
b761659b
DB
2221 block[idx] = value * scale;
2222 if(!v->pquantizer)
2223 block[idx] += (block[idx] < 0) ? -mquant : mquant;
2224 }
2225 if(!skip_block){
4f717c69 2226 if(i==1)
12802ec0 2227 v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
4f717c69 2228 else{
18b6a69c
RB
2229 v->vc1dsp.vc1_inv_trans_8x8(block);
2230 s->dsp.add_pixels_clamped(block, dst, linesize);
4f717c69 2231 }
b761659b
DB
2232 }
2233 break;
2234 case TT_4X4:
2235 pat = ~subblkpat & 0xF;
2236 for(j = 0; j < 4; j++) {
2237 last = subblkpat & (1 << (3 - j));
2238 i = 0;
2239 off = (j & 1) * 4 + (j & 2) * 16;
2240 while (!last) {
2241 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
2242 i += skip;
2243 if(i > 15)
2244 break;
2245 idx = ff_vc1_simple_progressive_4x4_zz[i++];
2246 block[idx + off] = value * scale;
2247 if(!v->pquantizer)
2248 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
2249 }
2250 if(!(subblkpat & (1 << (3 - j))) && !skip_block){
4f717c69 2251 if(i==1)
12802ec0 2252 v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off);
4f717c69 2253 else
12802ec0 2254 v->vc1dsp.vc1_inv_trans_4x4(dst + (j&1)*4 + (j&2)*2*linesize, linesize, block + off);
b761659b
DB
2255 }
2256 }
2257 break;
2258 case TT_8X4:
2259 pat = ~((subblkpat & 2)*6 + (subblkpat & 1)*3) & 0xF;
2260 for(j = 0; j < 2; j++) {
2261 last = subblkpat & (1 << (1 - j));
2262 i = 0;
2263 off = j * 32;
2264 while (!last) {
2265 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
2266 i += skip;
2267 if(i > 31)
2268 break;
2269 idx = v->zz_8x4[i++]+off;
2270 block[idx] = value * scale;
2271 if(!v->pquantizer)
2272 block[idx] += (block[idx] < 0) ? -mquant : mquant;
2273 }
2274 if(!(subblkpat & (1 << (1 - j))) && !skip_block){
4f717c69 2275 if(i==1)
12802ec0 2276 v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j*4*linesize, linesize, block + off);
4f717c69 2277 else
12802ec0 2278 v->vc1dsp.vc1_inv_trans_8x4(dst + j*4*linesize, linesize, block + off);
b761659b
DB
2279 }
2280 }
2281 break;
2282 case TT_4X8:
2283 pat = ~(subblkpat*5) & 0xF;
2284 for(j = 0; j < 2; j++) {
2285 last = subblkpat & (1 << (1 - j));
2286 i = 0;
2287 off = j * 4;
2288 while (!last) {
2289 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
2290 i += skip;
2291 if(i > 31)
2292 break;
2293 idx = v->zz_4x8[i++]+off;
2294 block[idx] = value * scale;
2295 if(!v->pquantizer)
2296 block[idx] += (block[idx] < 0) ? -mquant : mquant;
2297 }
2298 if(!(subblkpat & (1 << (1 - j))) && !skip_block){
4f717c69 2299 if(i==1)
12802ec0 2300 v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j*4, linesize, block + off);
4f717c69 2301 else
12802ec0 2302 v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
b761659b
DB
2303 }
2304 }
2305 break;
2306 }
c47d3835
RB
2307 if (ttmb_out)
2308 *ttmb_out |= ttblk << (n * 4);
b761659b
DB
2309 return pat;
2310}
2311
2312/** @} */ // Macroblock group
2313
2314static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
2315static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
2316
c47d3835
RB
2317static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
2318{
2319 MpegEncContext *s = &v->s;
2320 int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
2321 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
2322 mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
2323 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
2324 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
2325 uint8_t *dst;
2326
2327 if(block_num > 3) {
2328 dst = s->dest[block_num - 3];
2329 } else {
2330 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
2331 }
1cf82cab 2332 if (s->mb_y != s->end_mb_y || block_num < 2) {
c47d3835
RB
2333 int16_t (*mv)[2];
2334 int mv_stride;
2335
2336 if(block_num > 3) {
2337 bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
2338 bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
2339 mv = &v->luma_mv[s->mb_x - s->mb_stride];
2340 mv_stride = s->mb_stride;
2341 } else {
2342 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4)) :
2343 (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
2344 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4)) :
2345 (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
2346 mv_stride = s->b8_stride;
657ccb5a 2347 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
c47d3835
RB
2348 }
2349
2350 if (bottom_is_intra & 1 || block_is_intra & 1 ||
2351 mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
2352 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
2353 } else {
2354 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
2355 if(idx == 3) {
2356 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
2357 } else if (idx) {
2358 if (idx == 1)
2359 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
2360 else
2361 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
2362 }
2363 }
2364 }
2365
2366 dst -= 4 * linesize;
2367 ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xf;
2368 if (ttblk == TT_4X4 || ttblk == TT_8X4) {
2369 idx = (block_cbp | (block_cbp >> 2)) & 3;
2370 if (idx == 3) {
2371 v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
2372 } else if (idx) {
2373 if (idx == 1)
2374 v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
2375 else
2376 v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
2377 }
2378 }
2379}
2380
2381static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
2382{
2383 MpegEncContext *s = &v->s;
2384 int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
2385 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
2386 mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
2387 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
2388 int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
2389 uint8_t *dst;
2390
2391 if (block_num > 3) {
2392 dst = s->dest[block_num - 3] - 8 * linesize;
2393 } else {
2394 dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
2395 }
2396
2397 if (s->mb_x != s->mb_width || !(block_num & 5)) {
2398 int16_t (*mv)[2];
2399
2400 if(block_num > 3) {
2401 right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
2402 right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
2403 mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
2404 }else{
2405 right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4)) :
2406 (mb_cbp >> ((block_num + 1) * 4));
2407 right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4)) :
2408 (mb_is_intra >> ((block_num + 1) * 4));
657ccb5a 2409 mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
c47d3835
RB
2410 }
2411 if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
2412 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
2413 } else {
2414 idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
2415 if (idx == 5) {
2416 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
2417 } else if (idx) {
2418 if (idx == 1)
2419 v->vc1dsp.vc1_h_loop_filter4(dst+4*linesize, linesize, v->pq);
2420 else
2421 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
2422 }
2423 }
2424 }
2425
2426 dst -= 4;
2427 ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
2428 if (ttblk == TT_4X4 || ttblk == TT_4X8) {
2429 idx = (block_cbp | (block_cbp >> 1)) & 5;
2430 if (idx == 5) {
2431 v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
2432 } else if (idx) {
2433 if (idx == 1)
2434 v->vc1dsp.vc1_h_loop_filter4(dst + linesize*4, linesize, v->pq);
2435 else
2436 v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
2437 }
2438 }
2439}
2440
2441static void vc1_apply_p_loop_filter(VC1Context *v)
2442{
2443 MpegEncContext *s = &v->s;
2444 int i;
2445
2446 for (i = 0; i < 6; i++) {
2447 vc1_apply_p_v_loop_filter(v, i);
2448 }
2449
2450 /* V always preceedes H, therefore we run H one MB before V;
2451 * at the end of a row, we catch up to complete the row */
2452 if (s->mb_x) {
2453 for (i = 0; i < 6; i++) {
2454 vc1_apply_p_h_loop_filter(v, i);
2455 }
2456 if (s->mb_x == s->mb_width - 1) {
2457 s->mb_x++;
2458 ff_update_block_index(s);
2459 for (i = 0; i < 6; i++) {
2460 vc1_apply_p_h_loop_filter(v, i);
2461 }
2462 }
2463 }
2464}
2465
b761659b
DB
2466/** Decode one P-frame MB (in Simple/Main profile)
2467 */
2468static int vc1_decode_p_mb(VC1Context *v)
2469{
2470 MpegEncContext *s = &v->s;
2471 GetBitContext *gb = &s->gb;
18b6a69c 2472 int i, j;
b761659b
DB
2473 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2474 int cbp; /* cbp decoding stuff */
2475 int mqdiff, mquant; /* MB quantization */
2476 int ttmb = v->ttfrm; /* MB Transform type */
2477
2478 int mb_has_coeffs = 1; /* last_flag */
2479 int dmv_x, dmv_y; /* Differential MV components */
2480 int index, index1; /* LUT indexes */
2481 int val, sign; /* temp values */
2482 int first_block = 1;
2483 int dst_idx, off;
2484 int skipped, fourmv;
c47d3835 2485 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
b761659b
DB
2486
2487 mquant = v->pq; /* Loosy initialization */
2488
2489 if (v->mv_type_is_raw)
2490 fourmv = get_bits1(gb);
2491 else
2492 fourmv = v->mv_type_mb_plane[mb_pos];
2493 if (v->skip_is_raw)
2494 skipped = get_bits1(gb);
2495 else
2496 skipped = v->s.mbskip_table[mb_pos];
2497
b761659b
DB
2498 if (!fourmv) /* 1MV mode */
2499 {
2500 if (!skipped)
2501 {
2502 GET_MVDATA(dmv_x, dmv_y);
2503
2504 if (s->mb_intra) {
657ccb5a
DB
2505 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
2506 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
b761659b 2507 }
657ccb5a 2508 s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
c47d3835 2509 vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
b761659b
DB
2510
2511 /* FIXME Set DC val for inter block ? */
2512 if (s->mb_intra && !mb_has_coeffs)
2513 {
2514 GET_MQUANT();
2515 s->ac_pred = get_bits1(gb);
2516 cbp = 0;
2517 }
2518 else if (mb_has_coeffs)
2519 {
2520 if (s->mb_intra) s->ac_pred = get_bits1(gb);
2521 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
2522 GET_MQUANT();
2523 }
2524 else
2525 {
2526 mquant = v->pq;
2527 cbp = 0;
2528 }
657ccb5a 2529 s->current_picture.f.qscale_table[mb_pos] = mquant;
b761659b
DB
2530
2531 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
2532 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
2533 VC1_TTMB_VLC_BITS, 2);
2534 if(!s->mb_intra) vc1_mc_1mv(v, 0);
2535 dst_idx = 0;
2536 for (i=0; i<6; i++)
2537 {
2538 s->dc_val[0][s->block_index[i]] = 0;
2539 dst_idx += i >> 2;
2540 val = ((cbp >> (5 - i)) & 1);
2541 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
2542 v->mb_type[0][s->block_index[i]] = s->mb_intra;
2543 if(s->mb_intra) {
2544 /* check if prediction blocks A and C are available */
2545 v->a_avail = v->c_avail = 0;
2546 if(i == 2 || i == 3 || !s->first_slice_line)
2547 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
2548 if(i == 1 || i == 3 || s->mb_x)
2549 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
2550
2551 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
2552 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
18b6a69c
RB
2553 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
2554 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
2555 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
b761659b
DB
2556 if(v->pq >= 9 && v->overlap) {
2557 if(v->c_avail)
12802ec0 2558 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
b761659b 2559 if(v->a_avail)
12802ec0 2560 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
b761659b 2561 }
b761659b 2562 block_cbp |= 0xF << (i << 2);
c47d3835 2563 block_intra |= 1 << i;
b761659b 2564 } else if(val) {
c47d3835 2565 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
b761659b
DB
2566 block_cbp |= pat << (i << 2);
2567 if(!v->ttmbf && ttmb < 8) ttmb = -1;
2568 first_block = 0;
2569 }
2570 }
2571 }
2572 else //Skipped
2573 {
2574 s->mb_intra = 0;
2575 for(i = 0; i < 6; i++) {
2576 v->mb_type[0][s->block_index[i]] = 0;
2577 s->dc_val[0][s->block_index[i]] = 0;
2578 }
657ccb5a
DB
2579 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
2580 s->current_picture.f.qscale_table[mb_pos] = 0;
c47d3835 2581 vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
b761659b 2582 vc1_mc_1mv(v, 0);
b761659b
DB
2583 }
2584 } //1MV mode
2585 else //4MV mode
2586 {
2587 if (!skipped /* unskipped MB */)
2588 {
2589 int intra_count = 0, coded_inter = 0;
2590 int is_intra[6], is_coded[6];
2591 /* Get CBPCY */
2592 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
2593 for (i=0; i<6; i++)
2594 {
2595 val = ((cbp >> (5 - i)) & 1);
2596 s->dc_val[0][s->block_index[i]] = 0;
2597 s->mb_intra = 0;
2598 if(i < 4) {
2599 dmv_x = dmv_y = 0;
2600 s->mb_intra = 0;
2601 mb_has_coeffs = 0;
2602 if(val) {
2603 GET_MVDATA(dmv_x, dmv_y);
2604 }
c47d3835 2605 vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
b761659b
DB
2606 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
2607 intra_count += s->mb_intra;
2608 is_intra[i] = s->mb_intra;
2609 is_coded[i] = mb_has_coeffs;
2610 }
2611 if(i&4){
2612 is_intra[i] = (intra_count >= 3);
2613 is_coded[i] = val;
2614 }
2615 if(i == 4) vc1_mc_4mv_chroma(v);
2616 v->mb_type[0][s->block_index[i]] = is_intra[i];
2617 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
2618 }
2619 // if there are no coded blocks then don't do anything more
b761659b 2620 dst_idx = 0;
c47d3835
RB
2621 if(!intra_count && !coded_inter)
2622 goto end;
b761659b 2623 GET_MQUANT();
657ccb5a 2624 s->current_picture.f.qscale_table[mb_pos] = mquant;
b761659b
DB
2625 /* test if block is intra and has pred */
2626 {
2627 int intrapred = 0;
2628 for(i=0; i<6; i++)
2629 if(is_intra[i]) {
2630 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
2631 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
2632 intrapred = 1;
2633 break;
2634 }
2635 }
2636 if(intrapred)s->ac_pred = get_bits1(gb);
2637 else s->ac_pred = 0;
2638 }
2639 if (!v->ttmbf && coded_inter)
2640 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
2641 for (i=0; i<6; i++)
2642 {
2643 dst_idx += i >> 2;
2644 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
2645 s->mb_intra = is_intra[i];
2646 if (is_intra[i]) {
2647 /* check if prediction blocks A and C are available */
2648 v->a_avail = v->c_avail = 0;
2649 if(i == 2 || i == 3 || !s->first_slice_line)
2650 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
2651 if(i == 1 || i == 3 || s->mb_x)
2652 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
2653
2654 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
2655 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
18b6a69c
RB
2656 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
2657 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
2658 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
b761659b
DB
2659 if(v->pq >= 9 && v->overlap) {
2660 if(v->c_avail)
12802ec0 2661 v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
b761659b 2662 if(v->a_avail)
12802ec0 2663 v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
b761659b 2664 }
b761659b 2665 block_cbp |= 0xF << (i << 2);
c47d3835 2666 block_intra |= 1 << i;
b761659b 2667 } else if(is_coded[i]) {
c47d3835 2668 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
b761659b
DB
2669 block_cbp |= pat << (i << 2);
2670 if(!v->ttmbf && ttmb < 8) ttmb = -1;
2671 first_block = 0;
2672 }
2673 }
b761659b
DB
2674 }
2675 else //Skipped MB
2676 {
2677 s->mb_intra = 0;
657ccb5a 2678 s->current_picture.f.qscale_table[mb_pos] = 0;
b761659b
DB
2679 for (i=0; i<6; i++) {
2680 v->mb_type[0][s->block_index[i]] = 0;
2681 s->dc_val[0][s->block_index[i]] = 0;
2682 }
2683 for (i=0; i<4; i++)
2684 {
c47d3835 2685 vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
b761659b
DB
2686 vc1_mc_4mv_luma(v, i);
2687 }
2688 vc1_mc_4mv_chroma(v);
657ccb5a 2689 s->current_picture.f.qscale_table[mb_pos] = 0;
b761659b
DB
2690 }
2691 }
c47d3835 2692end:
b761659b 2693 v->cbp[s->mb_x] = block_cbp;
c47d3835
RB
2694 v->ttblk[s->mb_x] = block_tt;
2695 v->is_intra[s->mb_x] = block_intra;
b761659b 2696
c47d3835 2697 return 0;
b761659b
DB
2698}
2699
2700/** Decode one B-frame MB (in Main profile)
2701 */
2702static void vc1_decode_b_mb(VC1Context *v)
2703{
2704 MpegEncContext *s = &v->s;
2705 GetBitContext *gb = &s->gb;
18b6a69c 2706 int i, j;
b761659b
DB
2707 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2708 int cbp = 0; /* cbp decoding stuff */
2709 int mqdiff, mquant; /* MB quantization */
2710 int ttmb = v->ttfrm; /* MB Transform type */
2711 int mb_has_coeffs = 0; /* last_flag */
2712 int index, index1; /* LUT indexes */
2713 int val, sign; /* temp values */
2714 int first_block = 1;
2715 int dst_idx, off;
2716 int skipped, direct;
2717 int dmv_x[2], dmv_y[2];
2718 int bmvtype = BMV_TYPE_BACKWARD;
2719
2720 mquant = v->pq; /* Loosy initialization */
2721 s->mb_intra = 0;
2722
2723 if (v->dmb_is_raw)
2724 direct = get_bits1(gb);
2725 else
2726 direct = v->direct_mb_plane[mb_pos];
2727 if (v->skip_is_raw)
2728 skipped = get_bits1(gb);
2729 else
2730 skipped = v->s.mbskip_table[mb_pos];
2731
b761659b
DB
2732 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
2733 for(i = 0; i < 6; i++) {
2734 v->mb_type[0][s->block_index[i]] = 0;
2735 s->dc_val[0][s->block_index[i]] = 0;
2736 }
657ccb5a 2737 s->current_picture.f.qscale_table[mb_pos] = 0;
b761659b
DB
2738
2739 if (!direct) {
2740 if (!skipped) {
2741 GET_MVDATA(dmv_x[0], dmv_y[0]);
2742 dmv_x[1] = dmv_x[0];
2743 dmv_y[1] = dmv_y[0];
2744 }
2745 if(skipped || !s->mb_intra) {
2746 bmvtype = decode012(gb);
2747 switch(bmvtype) {
2748 case 0:
2749 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
2750 break;
2751 case 1:
2752 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
2753 break;
2754 case 2:
2755 bmvtype = BMV_TYPE_INTERPOLATED;
2756 dmv_x[0] = dmv_y[0] = 0;
2757 }
2758 }
2759 }
2760 for(i = 0; i < 6; i++)
2761 v->mb_type[0][s->block_index[i]] = s->mb_intra;
2762
2763 if (skipped) {
2764 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
2765 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2766 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2767 return;
2768 }
2769 if (direct) {
2770 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
2771 GET_MQUANT();
2772 s->mb_intra = 0;
657ccb5a 2773 s->current_picture.f.qscale_table[mb_pos] = mquant;
b761659b
DB
2774 if(!v->ttmbf)
2775 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
2776 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
2777 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2778 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2779 } else {
2780 if(!mb_has_coeffs && !s->mb_intra) {
2781 /* no coded blocks - effectively skipped */
2782 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2783 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2784 return;
2785 }
2786 if(s->mb_intra && !mb_has_coeffs) {
2787 GET_MQUANT();
657ccb5a 2788 s->current_picture.f.qscale_table[mb_pos] = mquant;
b761659b
DB
2789 s->ac_pred = get_bits1(gb);
2790 cbp = 0;
2791 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2792 } else {
2793 if(bmvtype == BMV_TYPE_INTERPOLATED) {
2794 GET_MVDATA(dmv_x[0], dmv_y[0]);
2795 if(!mb_has_coeffs) {
2796 /* interpolated skipped block */
2797 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2798 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2799 return;
2800 }
2801 }
2802 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
2803 if(!s->mb_intra) {
2804 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
2805 }
2806 if(s->mb_intra)
2807 s->ac_pred = get_bits1(gb);
2808 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
2809 GET_MQUANT();
657ccb5a 2810 s->current_picture.f.qscale_table[mb_pos] = mquant;
b761659b
DB
2811 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
2812 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
2813 }
2814 }
2815 dst_idx = 0;
2816 for (i=0; i<6; i++)
2817 {
2818 s->dc_val[0][s->block_index[i]] = 0;
2819 dst_idx += i >> 2;
2820 val = ((cbp >> (5 - i)) & 1);
2821 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
2822 v->mb_type[0][s->block_index[i]] = s->mb_intra;
2823 if(s->mb_intra) {
2824 /* check if prediction blocks A and C are available */
2825 v->a_avail = v->c_avail = 0;
2826 if(i == 2 || i == 3 || !s->first_slice_line)
2827 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
2828 if(i == 1 || i == 3 || s->mb_x)
2829 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
2830
2831 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
2832 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
18b6a69c
RB
2833 v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
2834 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
2835 s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
b761659b 2836 } else if(val) {
c47d3835 2837 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), NULL);
b761659b
DB
2838 if(!v->ttmbf && ttmb < 8) ttmb = -1;
2839 first_block = 0;
2840 }
2841 }
2842}
2843
2844/** Decode blocks of I-frame
2845 */
2846static void vc1_decode_i_blocks(VC1Context *v)
2847{
18b6a69c 2848 int k, j;
b761659b
DB
2849 MpegEncContext *s = &v->s;
2850 int cbp, val;
2851 uint8_t *coded_val;
2852 int mb_pos;
2853
2854 /* select codingmode used for VLC tables selection */
2855 switch(v->y_ac_table_index){
2856 case 0:
2857 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
2858 break;
2859 case 1:
2860 v->codingset = CS_HIGH_MOT_INTRA;
2861 break;
2862 case 2:
2863 v->codingset = CS_MID_RATE_INTRA;
2864 break;
2865 }
2866
2867 switch(v->c_ac_table_index){
2868 case 0:
2869 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
2870 break;
2871 case 1:
2872 v->codingset2 = CS_HIGH_MOT_INTER;
2873 break;
2874 case 2:
2875 v->codingset2 = CS_MID_RATE_INTER;
2876 break;
2877 }
2878
2879 /* Set DC scale - y and c use the same */
2880 s->y_dc_scale = s->y_dc_scale_table[v->pq];
2881 s->c_dc_scale = s->c_dc_scale_table[v->pq];
2882
2883 //do frame decode
2884 s->mb_x = s->mb_y = 0;
2885 s->mb_intra = 1;
2886 s->first_slice_line = 1;
2887 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
351653a5
JGG
2888 s->mb_x = 0;
2889 ff_init_block_index(s);
2890 for(; s->mb_x < s->mb_width; s->mb_x++) {
bbfd2e7a 2891 uint8_t *dst[6];
b761659b 2892 ff_update_block_index(s);
bbfd2e7a
RB
2893 dst[0] = s->dest[0];
2894 dst[1] = dst[0] + 8;
2895 dst[2] = s->dest[0] + s->linesize * 8;
2896 dst[3] = dst[2] + 8;
2897 dst[4] = s->dest[1];
2898 dst[5] = s->dest[2];
b761659b
DB
2899 s->dsp.clear_blocks(s->block[0]);
2900 mb_pos = s->mb_x + s->mb_y * s->mb_width;
657ccb5a
DB
2901 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
2902 s->current_picture.f.qscale_table[mb_pos] = v->pq;
2903 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
2904 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
b761659b
DB
2905
2906 // do actual MB decoding and displaying
2907 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
2908 v->s.ac_pred = get_bits1(&v->s.gb);
2909
2910 for(k = 0; k < 6; k++) {
2911 val = ((cbp >> (5 - k)) & 1);
2912
2913 if (k < 4) {
2914 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
2915 val = val ^ pred;
2916 *coded_val = val;
2917 }
2918 cbp |= val << (5 - k);
2919
2920 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
2921
bbfd2e7a 2922 if (k > 3 && (s->flags & CODEC_FLAG_GRAY)) continue;
18b6a69c
RB
2923 v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
2924 if(v->pq >= 9 && v->overlap) {
2925 if (v->rangeredfrm) for(j = 0; j < 64; j++) s->block[k][j] <<= 1;
2926 s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
2927 } else {
2928 if (v->rangeredfrm) for(j = 0; j < 64; j++) s->block[k][j] = (s->block[k][j] - 64) << 1;
2929 s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
2930 }
b761659b
DB
2931 }
2932
b761659b
DB
2933 if(v->pq >= 9 && v->overlap) {
2934 if(s->mb_x) {
12802ec0
RB
2935 v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
2936 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
b761659b 2937 if(!(s->flags & CODEC_FLAG_GRAY)) {
12802ec0
RB
2938 v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
2939 v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
b761659b
DB
2940 }
2941 }
12802ec0
RB
2942 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
2943 v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
b761659b 2944 if(!s->first_slice_line) {
12802ec0
RB
2945 v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
2946 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
b761659b 2947 if(!(s->flags & CODEC_FLAG_GRAY)) {
12802ec0
RB
2948 v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
2949 v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
b761659b
DB
2950 }
2951 }
12802ec0
RB
2952 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
2953 v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
b761659b 2954 }
12802ec0 2955 if(v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
b761659b
DB
2956
2957 if(get_bits_count(&s->gb) > v->bits) {
2958 ff_er_add_slice(s, 0, 0, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
2959 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
2960 return;
2961 }
2962 }
3683b7e5
DC
2963 if (!v->s.loop_filter)
2964 ff_draw_horiz_band(s, s->mb_y * 16, 16);
2965 else if (s->mb_y)
2966 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
2967
b761659b
DB
2968 s->first_slice_line = 0;
2969 }
3683b7e5
DC
2970 if (v->s.loop_filter)
2971 ff_draw_horiz_band(s, (s->mb_height-1)*16, 16);
b761659b
DB
2972 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
2973}
2974
2975/** Decode blocks of I-frame for advanced profile
2976 */
5c9f147e 2977static void vc1_decode_i_blocks_adv(VC1Context *v)
b761659b 2978{
70aa916e 2979 int k;
b761659b
DB
2980 MpegEncContext *s = &v->s;
2981 int cbp, val;
2982 uint8_t *coded_val;
2983 int mb_pos;
2984 int mquant = v->pq;
2985 int mqdiff;
b761659b
DB
2986 GetBitContext *gb = &s->gb;
2987
2988 /* select codingmode used for VLC tables selection */
2989 switch(v->y_ac_table_index){
2990 case 0:
2991 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
2992 break;
2993 case 1:
2994 v->codingset = CS_HIGH_MOT_INTRA;
2995 break;
2996 case 2:
2997 v->codingset = CS_MID_RATE_INTRA;
2998 break;
2999 }
3000
3001 switch(v->c_ac_table_index){
3002 case 0:
3003 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3004 break;
3005 case 1:
3006 v->codingset2 = CS_HIGH_MOT_INTER;
3007 break;
3008 case 2:
3009 v->codingset2 = CS_MID_RATE_INTER;
3010 break;
3011 }
3012
3013 //do frame decode
3014 s->mb_x = s->mb_y = 0;
3015 s->mb_intra = 1;
3016 s->first_slice_line = 1;
5c9f147e
RB
3017 s->mb_y = s->start_mb_y;
3018 if (s->start_mb_y) {
f44d6445
RB
3019 s->mb_x = 0;
3020 ff_init_block_index(s);
3021 memset(&s->coded_block[s->block_index[0]-s->b8_stride], 0,
d4b99744 3022 (1 + s->b8_stride) * sizeof(*s->coded_block));
f44d6445 3023 }
5c9f147e 3024 for(; s->mb_y < s->end_mb_y; s->mb_y++) {
351653a5
JGG
3025 s->mb_x = 0;
3026 ff_init_block_index(s);
3027 for(;s->mb_x < s->mb_width; s->mb_x++) {
7d2e03af 3028 DCTELEM (*block)[64] = v->block[v->cur_blk_idx];
b761659b 3029 ff_update_block_index(s);
7d2e03af 3030 s->dsp.clear_blocks(block[0]);
b761659b 3031 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
657ccb5a
DB
3032 s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3033 s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3034 s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
b761659b
DB
3035
3036 // do actual MB decoding and displaying
3037 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3038 if(v->acpred_is_raw)
3039 v->s.ac_pred = get_bits1(&v->s.gb);
3040 else
3041 v->s.ac_pred = v->acpred_plane[mb_pos];
3042
7d2e03af
RB
3043 if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
3044 v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
b761659b
DB
3045
3046 GET_MQUANT();
3047
657ccb5a 3048 s->current_picture.f.qscale_table[mb_pos] = mquant;
b761659b
DB
3049 /* Set DC scale - y and c use the same */
3050 s->y_dc_scale = s->y_dc_scale_table[mquant];
3051 s->c_dc_scale = s->c_dc_scale_table[mquant];
3052
3053 for(k = 0; k < 6; k++) {
3054 val = ((cbp >> (5 - k)) & 1);
3055
3056 if (k < 4) {
3057 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3058 val = val ^ pred;
3059 *coded_val = val;
3060 }
3061 cbp |= val << (5 - k);
3062
3063 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3064 v->c_avail = !!s->mb_x || (k==1 || k==3);
3065
7d2e03af 3066 vc1_decode_i_block_adv(v, block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
b761659b 3067
70aa916e 3068 if (k > 3 && (s->flags & CODEC_FLAG_GRAY)) continue;
7d2e03af 3069 v->vc1dsp.vc1_inv_trans_8x8(block[k]);
b761659b
DB
3070 }
3071
7d2e03af
RB
3072 vc1_smooth_overlap_filter_iblk(v);
3073 vc1_put_signed_blocks_clamped(v);
3074 if(v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
b761659b
DB
3075
3076 if(get_bits_count(&s->gb) > v->bits) {
5c9f147e 3077 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
b761659b
DB
3078 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3079 return;
3080 }
3081 }
3683b7e5
DC
3082 if (!v->s.loop_filter)
3083 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3084 else if (s->mb_y)
3085 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
b761659b
DB
3086 s->first_slice_line = 0;
3087 }
7d2e03af
RB
3088
3089 /* raw bottom MB row */
3090 s->mb_x = 0;
3091 ff_init_block_index(s);
3092 for(;s->mb_x < s->mb_width; s->mb_x++) {
3093 ff_update_block_index(s);
3094 vc1_put_signed_blocks_clamped(v);
3095 if(v->s.loop_filter) vc1_loop_filter_iblk_delayed(v, v->pq);
3096 }
3683b7e5 3097 if (v->s.loop_filter)
1cf82cab 3098 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5c9f147e 3099 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
b761659b
DB
3100}
3101
5c9f147e 3102static void vc1_decode_p_blocks(VC1Context *v)
b761659b
DB
3103{
3104 MpegEncContext *s = &v->s;
c47d3835 3105 int apply_loop_filter;
b761659b
DB
3106
3107 /* select codingmode used for VLC tables selection */
3108 switch(v->c_ac_table_index){
3109 case 0:
3110 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3111 break;
3112 case 1:
3113 v->codingset = CS_HIGH_MOT_INTRA;
3114 break;
3115 case 2:
3116 v->codingset = CS_MID_RATE_INTRA;
3117 break;
3118 }
3119
3120 switch(v->c_ac_table_index){
3121 case 0:
3122 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3123 break;
3124 case 1:
3125 v->codingset2 = CS_HIGH_MOT_INTER;
3126 break;
3127 case 2:
3128 v->codingset2 = CS_MID_RATE_INTER;
3129 break;
3130 }
3131
c47d3835 3132 apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
b761659b
DB
3133 s->first_slice_line = 1;
3134 memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5c9f147e 3135 for(s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
351653a5
JGG
3136 s->mb_x = 0;
3137 ff_init_block_index(s);
3138 for(; s->mb_x < s->mb_width; s->mb_x++) {
b761659b 3139 ff_update_block_index(s);
b761659b
DB
3140
3141 vc1_decode_p_mb(v);
5c9f147e 3142 if (s->mb_y != s->start_mb_y && apply_loop_filter)
c47d3835 3143 vc1_apply_p_loop_filter(v);
b761659b 3144 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5c9f147e 3145 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
b761659b
DB
3146 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3147 return;
3148 }
3149 }
3150 memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0])*s->mb_stride);
c47d3835
RB
3151 memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0])*s->mb_stride);
3152 memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3153 memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0])*s->mb_stride);
5c9f147e 3154 if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
b761659b
DB
3155 s->first_slice_line = 0;
3156 }
c47d3835
RB
3157 if (apply_loop_filter) {
3158 s->mb_x = 0;
3159 ff_init_block_index(s);
3160 for (; s->mb_x < s->mb_width; s->mb_x++) {
3161 ff_update_block_index(s);
3162 vc1_apply_p_loop_filter(v);
3163 }
3164 }
5c9f147e
RB
3165 if (s->end_mb_y >= s->start_mb_y)
3166 ff_draw_horiz_band(s, (s->end_mb_y-1) * 16, 16);
3167 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
b761659b
DB
3168}
3169
5c9f147e 3170static void vc1_decode_b_blocks(VC1Context *v)
b761659b
DB
3171{
3172 MpegEncContext *s = &v->s;
3173
3174 /* select codingmode used for VLC tables selection */
3175 switch(v->c_ac_table_index){
3176 case 0:
3177 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3178 break;
3179 case 1:
3180 v->codingset = CS_HIGH_MOT_INTRA;
3181 break;
3182 case 2:
3183 v->codingset = CS_MID_RATE_INTRA;
3184 break;
3185 }
3186
3187 switch(v->c_ac_table_index){
3188 case 0:
3189 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3190 break;
3191 case 1:
3192 v->codingset2 = CS_HIGH_MOT_INTER;
3193 break;
3194 case 2:
3195 v->codingset2 = CS_MID_RATE_INTER;
3196 break;
3197 }
3198
3199 s->first_slice_line = 1;
5c9f147e 3200 for(s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
351653a5
JGG
3201 s->mb_x = 0;
3202 ff_init_block_index(s);
3203 for(; s->mb_x < s->mb_width; s->mb_x++) {
b761659b 3204 ff_update_block_index(s);
b761659b
DB
3205
3206 vc1_decode_b_mb(v);
3207 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5c9f147e 3208 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_x, s->mb_y, (AC_END|DC_END|MV_END));
b761659b
DB
3209 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3210 return;
3211 }
12802ec0 3212 if(v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
b761659b 3213 }
3683b7e5
DC
3214 if (!v->s.loop_filter)
3215 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3216 else if (s->mb_y)
3217 ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
b761659b
DB
3218 s->first_slice_line = 0;
3219 }
3683b7e5 3220 if (v->s.loop_filter)
1cf82cab 3221 ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5c9f147e 3222 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
b761659b
DB
3223}
3224
3225static void vc1_decode_skip_blocks(VC1Context *v)
3226{
3227 MpegEncContext *s = &v->s;
3228
1cf82cab 3229 ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, (AC_END|DC_END|MV_END));
b761659b 3230 s->first_slice_line = 1;
1cf82cab 3231 for(s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
b761659b
DB
3232 s->mb_x = 0;
3233 ff_init_block_index(s);
3234 ff_update_block_index(s);
657ccb5a
DB
3235 memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
3236 memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3237 memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
b761659b
DB
3238 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3239 s->first_slice_line = 0;
3240 }
975a1447 3241 s->pict_type = AV_PICTURE_TYPE_P;
b761659b
DB
3242}
3243
5c9f147e 3244static void vc1_decode_blocks(VC1Context *v)
b761659b
DB
3245{
3246
3247 v->s.esc3_level_length = 0;
3248 if(v->x8_type){
3249 ff_intrax8_decode_picture(&v->x8, 2*v->pq+v->halfpq, v->pq*(!v->pquantizer) );
3250 }else{
7d2e03af
RB
3251 v->cur_blk_idx = 0;
3252 v->left_blk_idx = -1;
3253 v->topleft_blk_idx = 1;
3254 v->top_blk_idx = 2;
b761659b 3255 switch(v->s.pict_type) {
975a1447 3256 case AV_PICTURE_TYPE_I:
b761659b 3257 if(v->profile == PROFILE_ADVANCED)
5c9f147e 3258 vc1_decode_i_blocks_adv(v);
b761659b
DB
3259 else
3260 vc1_decode_i_blocks(v);
3261 break;
975a1447 3262 case AV_PICTURE_TYPE_P:
b761659b
DB
3263 if(v->p_frame_skipped)
3264 vc1_decode_skip_blocks(v);
3265 else
5c9f147e 3266 vc1_decode_p_blocks(v);
b761659b 3267 break;
975a1447 3268 case AV_PICTURE_TYPE_B:
b761659b
DB
3269 if(v->bi_type){
3270 if(v->profile == PROFILE_ADVANCED)
5c9f147e 3271 vc1_decode_i_blocks_adv(v);
b761659b
DB
3272 else
3273 vc1_decode_i_blocks(v);
3274 }else
5c9f147e 3275 vc1_decode_b_blocks(v);
b761659b
DB
3276 break;
3277 }
3278 }
3279}
3280
45ecda85
AD
3281#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
3282
3283typedef struct {
3284 /**
3285 * Transform coefficients for both sprites in 16.16 fixed point format,
3286 * in the order they appear in the bitstream:
3287 * x scale
3288 * rotation 1 (unused)
3289 * x offset
3290 * rotation 2 (unused)
3291 * y scale
3292 * y offset
3293 * alpha
3294 */
3295 int coefs[2][7];
3296
3297 int effect_type, effect_flag;
3298 int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
3299 int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
3300} SpriteData;
3301
3302static inline int get_fp_val(GetBitContext* gb)
768c5251 3303{
45ecda85 3304 return (get_bits_long(gb, 30) - (1<<29)) << 1;
768c5251
AD
3305}
3306
45ecda85 3307static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
768c5251 3308{
45ecda85 3309 c[1] = c[3] = 0;
768c5251
AD
3310
3311 switch (get_bits(gb, 2)) {
3312 case 0:
45ecda85
AD
3313 c[0] = 1<<16;
3314 c[2] = get_fp_val(gb);
3315 c[4] = 1<<16;
768c5251
AD
3316 break;
3317 case 1:
45ecda85
AD
3318 c[0] = c[4] = get_fp_val(gb);
3319 c[2] = get_fp_val(gb);
768c5251
AD
3320 break;
3321 case 2:
45ecda85
AD
3322 c[0] = get_fp_val(gb);
3323 c[2] = get_fp_val(gb);
3324 c[4] = get_fp_val(gb);
768c5251
AD
3325 break;
3326 case 3:
45ecda85
AD
3327 c[0] = get_fp_val(gb);
3328 c[1] = get_fp_val(gb);
3329 c[2] = get_fp_val(gb);
3330 c[3] = get_fp_val(gb);
3331 c[4] = get_fp_val(gb);
768c5251
AD
3332 break;
3333 }
45ecda85 3334 c[5] = get_fp_val(gb);
768c5251 3335 if (get_bits1(gb))
45ecda85 3336 c[6] = get_fp_val(gb);
768c5251 3337 else
45ecda85 3338 c[6] = 1<<16;
768c5251
AD
3339}
3340
45ecda85 3341static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
768c5251 3342{
45ecda85
AD
3343 AVCodecContext *avctx = v->s.avctx;
3344 int sprite, i;
3345
3346 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
3347 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
3348 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
3349 av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
3350 av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
768c5251 3351 for (i = 0; i < 7; i++)
45ecda85
AD
3352 av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
3353 sd->coefs[sprite][i] / (1<<16),
3354 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1<<16));
3355 av_log(avctx, AV_LOG_DEBUG, "\n");
768c5251 3356 }
45ecda85 3357
768c5251 3358 skip_bits(gb, 2);
45ecda85
AD
3359 if (sd->effect_type = get_bits_long(gb, 30)) {
3360 switch (sd->effect_pcount1 = get_bits(gb, 4)) {
768c5251 3361 case 7:
45ecda85 3362 vc1_sprite_parse_transform(gb, sd->effect_params1);
768c5251
AD
3363 break;
3364 case 14:
45ecda85
AD
3365 vc1_sprite_parse_transform(gb, sd->effect_params1);
3366 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
768c5251
AD
3367 break;
3368 default:
45ecda85
AD
3369 for (i = 0; i < sd->effect_pcount1; i++)
3370 sd->effect_params1[i] = get_fp_val(gb);
768c5251 3371 }
45ecda85 3372 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
768c5251 3373 // effect 13 is simple alpha blending and matches the opacity above
45ecda85
AD
3374 av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
3375 for (i = 0; i < sd->effect_pcount1; i++)
3376 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
3377 sd->effect_params1[i] / (1<<16),
3378 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1<<16));
3379 av_log(avctx, AV_LOG_DEBUG, "\n");
768c5251
AD
3380 }
3381
45ecda85
AD
3382 sd->effect_pcount2 = get_bits(gb, 16);
3383 if (sd->effect_pcount2 > 10) {
3384 av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
768c5251 3385 return;
45ecda85
AD
3386 } else if (sd->effect_pcount2) {
3387 i = -1;
3388 av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
3389 while (++i < sd->effect_pcount2){
3390 sd->effect_params2[i] = get_fp_val(gb);
3391 av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
3392 sd->effect_params2[i] / (1<<16),
3393 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1<<16));
768c5251 3394 }
45ecda85 3395 av_log(avctx, AV_LOG_DEBUG, "\n");
768c5251
AD
3396 }
3397 }
45ecda85
AD
3398 if (sd->effect_flag = get_bits1(gb))
3399 av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
768c5251
AD
3400
3401 if (get_bits_count(gb) >= gb->size_in_bits +
45ecda85
AD
3402 (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
3403 av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
768c5251 3404 if (get_bits_count(gb) < gb->size_in_bits - 8)
45ecda85
AD
3405 av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
3406}
3407
3408static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
3409{
3410 int i, plane, row, sprite;
3411 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
3412 uint8_t* src_h[2][2];
3413 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
3414 int ysub[2];
3415 MpegEncContext *s = &v->s;
3416
3417 for (i = 0; i < 2; i++) {
3418 xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
3419 xadv[i] = sd->coefs[i][0];
3420 if (xadv[i] != 1<<16 || (v->sprite_width<<16) - (v->output_width<<16) - xoff[i])
3421 xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
3422
3423 yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
3424 yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height<<16) - yoff[i]) / v->output_height);
3425 }
3426 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
3427
3428 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
3429 int width = v->output_width>>!!plane;
3430
3431 for (row = 0; row < v->output_height>>!!plane; row++) {
3432 uint8_t *dst = v->sprite_output_frame.data[plane] +
3433 v->sprite_output_frame.linesize[plane] * row;
3434
3435 for (sprite = 0; sprite <= v->two_sprites; sprite++) {
3436 uint8_t *iplane = s->current_picture.f.data[plane];
3437 int iline = s->current_picture.f.linesize[plane];
3438 int ycoord = yoff[sprite] + yadv[sprite]*row;
3439 int yline = ycoord>>16;
3440 ysub[sprite] = ycoord&0xFFFF;
3441 if (sprite) {
3442 iplane = s->last_picture.f.data[plane];
3443 iline = s->last_picture.f.linesize[plane];
3444 }
3445 if (!(xoff[sprite]&0xFFFF) && xadv[sprite] == 1<<16) {
3446 src_h[sprite][0] = iplane+(xoff[sprite]>>16)+ yline *iline;
3447 if (ysub[sprite])
3448 src_h[sprite][1] = iplane+(xoff[sprite]>>16)+(yline+1)*iline;
3449 } else {
3450 if (sr_cache[sprite][0] != yline) {
3451 if (sr_cache[sprite][1] == yline) {
3452 FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
3453 FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
3454 } else {
3455 v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane+yline*iline, xoff[sprite], xadv[sprite], width);
3456 sr_cache[sprite][0] = yline;
3457 }
3458 }
3459 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
3460 v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane+(yline+1)*iline, xoff[sprite], xadv[sprite], width);
3461 sr_cache[sprite][1] = yline + 1;
3462 }
3463 src_h[sprite][0] = v->sr_rows[sprite][0];
3464 src_h[sprite][1] = v->sr_rows[sprite][1];
3465 }
3466 }
3467
3468 if (!v->two_sprites) {
3469 if (ysub[0]) {
3470 v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
3471 } else {
3472 memcpy(dst, src_h[0][0], width);
3473 }
3474 } else {
3475 if (ysub[0] && ysub[1]) {
3476 v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
3477 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
3478 } else if (ysub[0]) {
3479 v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
3480 src_h[1][0], alpha, width);
3481 } else if (ysub[1]) {
3482 v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
3483 src_h[0][0], (1<<16)-1-alpha, width);
3484 } else {
3485 v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
3486 }
3487 }
3488 }
3489
3490 if (!plane) {
3491 for (i = 0; i < 2; i++) {
3492 xoff[i] >>= 1;
3493 yoff[i] >>= 1;
3494 }
3495 }
3496
3497 }
3498}
3499
3500
3501static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
3502{
3503 MpegEncContext *s = &v->s;
3504 AVCodecContext *avctx = s->avctx;
3505 SpriteData sd;
3506
3507 vc1_parse_sprites(v, gb, &sd);
3508
3509 if (!s->current_picture.f.data[0]) {
3510 av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
3511 return -1;
3512 }
3513
3514 if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
3515 av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
3516 v->two_sprites = 0;
3517 }
3518
3519 if (v->sprite_output_frame.data[0])
3520 avctx->release_buffer(avctx, &v->sprite_output_frame);
3521
3522 v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
3523 v->sprite_output_frame.reference = 0;
3524 if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
3525 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
3526 return -1;
3527 }
3528
3529 vc1_draw_sprites(v, &sd);
3530
3531 return 0;
3532}
3533
3534static void vc1_sprite_flush(AVCodecContext *avctx)
3535{
3536 VC1Context *v = avctx->priv_data;
3537 MpegEncContext *s = &v->s;
3538 AVFrame *f = &s->current_picture.f;
3539 int plane, i;
3540
3541 /* Windows Media Image codecs have a convergence interval of two keyframes.
3542 Since we can't enforce it, clear to black the missing sprite. This is
3543 wrong but it looks better than doing nothing. */
3544
3545 if (f->data[0])
3546 for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
3547 for (i = 0; i < v->sprite_height>>!!plane; i++)
3548 memset(f->data[plane]+i*f->linesize[plane],
3549 plane ? 128 : 0, f->linesize[plane]);
768c5251
AD
3550}
3551
45ecda85
AD
3552#endif
3553
d2f119a1
AD
3554static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
3555{
3556 MpegEncContext *s = &v->s;
3557 int i;
3558
3559 /* Allocate mb bitplanes */
3560 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
3561 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
3562 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
3563 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
3564
3565 v->n_allocated_blks = s->mb_width + 2;
3566 v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
3567 v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
3568 v->cbp = v->cbp_base + s->mb_stride;
3569 v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
3570 v->ttblk = v->ttblk_base + s->mb_stride;
3571 v->is_intra_base = av_malloc(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
3572 v->is_intra = v->is_intra_base + s->mb_stride;
3573 v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
3574 v->luma_mv = v->luma_mv_base + s->mb_stride;
3575
3576 /* allocate block type info in that way so it could be used with s->block_index[] */
3577 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
3578 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
3579 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
3580 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
3581
3582 /* Init coded blocks info */
3583 if (v->profile == PROFILE_ADVANCED)
3584 {
3585// if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
3586// return -1;
3587// if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
3588// return -1;
3589 }
3590
3591 ff_intrax8_common_init(&v->x8,s);
3592
3593 if (s->avctx->codec_id == CODEC_ID_WMV3IMAGE || s->avctx->codec_id == CODEC_ID_VC1IMAGE) {
3594 for (i = 0; i < 4; i++)
3595 if (!(v->sr_rows[i>>1][i%2] = av_malloc(v->output_width))) return -1;
3596 }
3597
3598 if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
3599 !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
3600 !v->mb_type_base)
3601 return -1;
3602
3603 return 0;
3604}
3605
b761659b
DB
3606/** Initialize a VC1/WMV3 decoder
3607 * @todo TODO: Handle VC-1 IDUs (Transport level?)
3608 * @todo TODO: Decypher remaining bits in extra_data
3609 */
3610static av_cold int vc1_decode_init(AVCodecContext *avctx)
3611{
3612 VC1Context *v = avctx->priv_data;
3613 MpegEncContext *s = &v->s;
3614 GetBitContext gb;
d2f119a1 3615 int i;
b761659b 3616
45ecda85
AD
3617 /* save the container output size for WMImage */
3618 v->output_width = avctx->width;
3619 v->output_height = avctx->height;
3620
b761659b
DB
3621 if (!avctx->extradata_size || !avctx->extradata) return -1;
3622 if (!(avctx->flags & CODEC_FLAG_GRAY))
3623 avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
3624 else
3625 avctx->pix_fmt = PIX_FMT_GRAY8;
3626 avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
3627 v->s.avctx = avctx;
3628 avctx->flags |= CODEC_FLAG_EMU_EDGE;
3629 v->s.flags |= CODEC_FLAG_EMU_EDGE;
3630
3631 if(avctx->idct_algo==FF_IDCT_AUTO){
3632 avctx->idct_algo=FF_IDCT_WMV2;
3633 }
3634
b761659b 3635 if (vc1_init_common(v) < 0) return -1;
12802ec0 3636 ff_vc1dsp_init(&v->vc1dsp);
b761659b 3637
45ecda85 3638 if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE)
b761659b
DB
3639 {
3640 int count = 0;
3641
3642 // looks like WMV3 has a sequence header stored in the extradata
3643 // advanced sequence header may be before the first frame
3644 // the last byte of the extradata is a version number, 1 for the
3645 // samples we can decode
3646
3647 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
3648
3649 if (vc1_decode_sequence_header(avctx, v, &gb) < 0)
3650 return -1;
3651
3652 count = avctx->extradata_size*8 - get_bits_count(&gb);
3653 if (count>0)
3654 {
3655 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
3656 count, get_bits(&gb, count));
3657 }
3658 else if (count < 0)
3659 {
3660 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
3661 }
768c5251 3662 } else { // VC1/WVC1/WVP2
b761659b
DB
3663 const uint8_t *start = avctx->extradata;
3664 uint8_t *end = avctx->extradata + avctx->extradata_size;
3665 const uint8_t *next;
3666 int size, buf2_size;
3667 uint8_t *buf2 = NULL;
3668 int seq_initialized = 0, ep_initialized = 0;
3669
3670 if(avctx->extradata_size < 16) {
3671 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
3672 return -1;