3 * Copyright (C) 1999-2001 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
5 * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
6 * See http://libmpeg2.sourceforge.net/ for updates.
8 * mpeg2dec is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * mpeg2dec is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with mpeg2dec; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/common.h"
24 #include "libavcodec/dsputil.h"
26 #include "dsputil_mmx.h"
29 #define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align)))
34 #define round(bias) ((int)(((bias)+0.5) * (1<<ROW_SHIFT)))
35 #define rounder(bias) {round (bias), round (bias)}
39 /* C row IDCT - it is just here to document the MMXEXT and MMX versions */
40 static inline void idct_row (int16_t * row
, int offset
,
41 int16_t * table
, int32_t * rounder
)
43 int C1
, C2
, C3
, C4
, C5
, C6
, C7
;
44 int a0
, a1
, a2
, a3
, b0
, b1
, b2
, b3
;
56 a0
= C4
*row
[0] + C2
*row
[2] + C4
*row
[4] + C6
*row
[6] + *rounder
;
57 a1
= C4
*row
[0] + C6
*row
[2] - C4
*row
[4] - C2
*row
[6] + *rounder
;
58 a2
= C4
*row
[0] - C6
*row
[2] - C4
*row
[4] + C2
*row
[6] + *rounder
;
59 a3
= C4
*row
[0] - C2
*row
[2] + C4
*row
[4] - C6
*row
[6] + *rounder
;
61 b0
= C1
*row
[1] + C3
*row
[3] + C5
*row
[5] + C7
*row
[7];
62 b1
= C3
*row
[1] - C7
*row
[3] - C1
*row
[5] - C5
*row
[7];
63 b2
= C5
*row
[1] - C1
*row
[3] + C7
*row
[5] + C3
*row
[7];
64 b3
= C7
*row
[1] - C5
*row
[3] + C3
*row
[5] - C1
*row
[7];
66 row
[0] = (a0
+ b0
) >> ROW_SHIFT
;
67 row
[1] = (a1
+ b1
) >> ROW_SHIFT
;
68 row
[2] = (a2
+ b2
) >> ROW_SHIFT
;
69 row
[3] = (a3
+ b3
) >> ROW_SHIFT
;
70 row
[4] = (a3
- b3
) >> ROW_SHIFT
;
71 row
[5] = (a2
- b2
) >> ROW_SHIFT
;
72 row
[6] = (a1
- b1
) >> ROW_SHIFT
;
73 row
[7] = (a0
- b0
) >> ROW_SHIFT
;
80 #define mmxext_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, -c4, -c2, \
89 static inline void mmxext_row_head (int16_t * const row
, const int offset
,
90 const int16_t * const table
)
92 movq_m2r (*(row
+offset
), mm2
); /* mm2 = x6 x4 x2 x0 */
94 movq_m2r (*(row
+offset
+4), mm5
); /* mm5 = x7 x5 x3 x1 */
95 movq_r2r (mm2
, mm0
); /* mm0 = x6 x4 x2 x0 */
97 movq_m2r (*table
, mm3
); /* mm3 = -C2 -C4 C2 C4 */
98 movq_r2r (mm5
, mm6
); /* mm6 = x7 x5 x3 x1 */
100 movq_m2r (*(table
+4), mm4
); /* mm4 = C6 C4 C6 C4 */
101 pmaddwd_r2r (mm0
, mm3
); /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */
103 pshufw_r2r (mm2
, mm2
, 0x4e); /* mm2 = x2 x0 x6 x4 */
106 static inline void mmxext_row (const int16_t * const table
,
107 const int32_t * const rounder
)
109 movq_m2r (*(table
+8), mm1
); /* mm1 = -C5 -C1 C3 C1 */
110 pmaddwd_r2r (mm2
, mm4
); /* mm4 = C4*x0+C6*x2 C4*x4+C6*x6 */
112 pmaddwd_m2r (*(table
+16), mm0
); /* mm0 = C4*x4-C6*x6 C4*x0-C6*x2 */
113 pshufw_r2r (mm6
, mm6
, 0x4e); /* mm6 = x3 x1 x7 x5 */
115 movq_m2r (*(table
+12), mm7
); /* mm7 = -C7 C3 C7 C5 */
116 pmaddwd_r2r (mm5
, mm1
); /* mm1 = -C1*x5-C5*x7 C1*x1+C3*x3 */
118 paddd_m2r (*rounder
, mm3
); /* mm3 += rounder */
119 pmaddwd_r2r (mm6
, mm7
); /* mm7 = C3*x1-C7*x3 C5*x5+C7*x7 */
121 pmaddwd_m2r (*(table
+20), mm2
); /* mm2 = C4*x0-C2*x2 -C4*x4+C2*x6 */
122 paddd_r2r (mm4
, mm3
); /* mm3 = a1 a0 + rounder */
124 pmaddwd_m2r (*(table
+24), mm5
); /* mm5 = C3*x5-C1*x7 C5*x1-C1*x3 */
125 movq_r2r (mm3
, mm4
); /* mm4 = a1 a0 + rounder */
127 pmaddwd_m2r (*(table
+28), mm6
); /* mm6 = C7*x1-C5*x3 C7*x5+C3*x7 */
128 paddd_r2r (mm7
, mm1
); /* mm1 = b1 b0 */
130 paddd_m2r (*rounder
, mm0
); /* mm0 += rounder */
131 psubd_r2r (mm1
, mm3
); /* mm3 = a1-b1 a0-b0 + rounder */
133 psrad_i2r (ROW_SHIFT
, mm3
); /* mm3 = y6 y7 */
134 paddd_r2r (mm4
, mm1
); /* mm1 = a1+b1 a0+b0 + rounder */
136 paddd_r2r (mm2
, mm0
); /* mm0 = a3 a2 + rounder */
137 psrad_i2r (ROW_SHIFT
, mm1
); /* mm1 = y1 y0 */
139 paddd_r2r (mm6
, mm5
); /* mm5 = b3 b2 */
140 movq_r2r (mm0
, mm4
); /* mm4 = a3 a2 + rounder */
142 paddd_r2r (mm5
, mm0
); /* mm0 = a3+b3 a2+b2 + rounder */
143 psubd_r2r (mm5
, mm4
); /* mm4 = a3-b3 a2-b2 + rounder */
146 static inline void mmxext_row_tail (int16_t * const row
, const int store
)
148 psrad_i2r (ROW_SHIFT
, mm0
); /* mm0 = y3 y2 */
150 psrad_i2r (ROW_SHIFT
, mm4
); /* mm4 = y4 y5 */
152 packssdw_r2r (mm0
, mm1
); /* mm1 = y3 y2 y1 y0 */
154 packssdw_r2r (mm3
, mm4
); /* mm4 = y6 y7 y4 y5 */
156 movq_r2m (mm1
, *(row
+store
)); /* save y3 y2 y1 y0 */
157 pshufw_r2r (mm4
, mm4
, 0xb1); /* mm4 = y7 y6 y5 y4 */
161 movq_r2m (mm4
, *(row
+store
+4)); /* save y7 y6 y5 y4 */
164 static inline void mmxext_row_mid (int16_t * const row
, const int store
,
166 const int16_t * const table
)
168 movq_m2r (*(row
+offset
), mm2
); /* mm2 = x6 x4 x2 x0 */
169 psrad_i2r (ROW_SHIFT
, mm0
); /* mm0 = y3 y2 */
171 movq_m2r (*(row
+offset
+4), mm5
); /* mm5 = x7 x5 x3 x1 */
172 psrad_i2r (ROW_SHIFT
, mm4
); /* mm4 = y4 y5 */
174 packssdw_r2r (mm0
, mm1
); /* mm1 = y3 y2 y1 y0 */
175 movq_r2r (mm5
, mm6
); /* mm6 = x7 x5 x3 x1 */
177 packssdw_r2r (mm3
, mm4
); /* mm4 = y6 y7 y4 y5 */
178 movq_r2r (mm2
, mm0
); /* mm0 = x6 x4 x2 x0 */
180 movq_r2m (mm1
, *(row
+store
)); /* save y3 y2 y1 y0 */
181 pshufw_r2r (mm4
, mm4
, 0xb1); /* mm4 = y7 y6 y5 y4 */
183 movq_m2r (*table
, mm3
); /* mm3 = -C2 -C4 C2 C4 */
184 movq_r2m (mm4
, *(row
+store
+4)); /* save y7 y6 y5 y4 */
186 pmaddwd_r2r (mm0
, mm3
); /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */
188 movq_m2r (*(table
+4), mm4
); /* mm4 = C6 C4 C6 C4 */
189 pshufw_r2r (mm2
, mm2
, 0x4e); /* mm2 = x2 x0 x6 x4 */
195 #define mmx_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, c4, c6, \
204 static inline void mmx_row_head (int16_t * const row
, const int offset
,
205 const int16_t * const table
)
207 movq_m2r (*(row
+offset
), mm2
); /* mm2 = x6 x4 x2 x0 */
209 movq_m2r (*(row
+offset
+4), mm5
); /* mm5 = x7 x5 x3 x1 */
210 movq_r2r (mm2
, mm0
); /* mm0 = x6 x4 x2 x0 */
212 movq_m2r (*table
, mm3
); /* mm3 = C6 C4 C2 C4 */
213 movq_r2r (mm5
, mm6
); /* mm6 = x7 x5 x3 x1 */
215 punpckldq_r2r (mm0
, mm0
); /* mm0 = x2 x0 x2 x0 */
217 movq_m2r (*(table
+4), mm4
); /* mm4 = -C2 -C4 C6 C4 */
218 pmaddwd_r2r (mm0
, mm3
); /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */
220 movq_m2r (*(table
+8), mm1
); /* mm1 = -C7 C3 C3 C1 */
221 punpckhdq_r2r (mm2
, mm2
); /* mm2 = x6 x4 x6 x4 */
224 static inline void mmx_row (const int16_t * const table
,
225 const int32_t * const rounder
)
227 pmaddwd_r2r (mm2
, mm4
); /* mm4 = -C4*x4-C2*x6 C4*x4+C6*x6 */
228 punpckldq_r2r (mm5
, mm5
); /* mm5 = x3 x1 x3 x1 */
230 pmaddwd_m2r (*(table
+16), mm0
); /* mm0 = C4*x0-C2*x2 C4*x0-C6*x2 */
231 punpckhdq_r2r (mm6
, mm6
); /* mm6 = x7 x5 x7 x5 */
233 movq_m2r (*(table
+12), mm7
); /* mm7 = -C5 -C1 C7 C5 */
234 pmaddwd_r2r (mm5
, mm1
); /* mm1 = C3*x1-C7*x3 C1*x1+C3*x3 */
236 paddd_m2r (*rounder
, mm3
); /* mm3 += rounder */
237 pmaddwd_r2r (mm6
, mm7
); /* mm7 = -C1*x5-C5*x7 C5*x5+C7*x7 */
239 pmaddwd_m2r (*(table
+20), mm2
); /* mm2 = C4*x4-C6*x6 -C4*x4+C2*x6 */
240 paddd_r2r (mm4
, mm3
); /* mm3 = a1 a0 + rounder */
242 pmaddwd_m2r (*(table
+24), mm5
); /* mm5 = C7*x1-C5*x3 C5*x1-C1*x3 */
243 movq_r2r (mm3
, mm4
); /* mm4 = a1 a0 + rounder */
245 pmaddwd_m2r (*(table
+28), mm6
); /* mm6 = C3*x5-C1*x7 C7*x5+C3*x7 */
246 paddd_r2r (mm7
, mm1
); /* mm1 = b1 b0 */
248 paddd_m2r (*rounder
, mm0
); /* mm0 += rounder */
249 psubd_r2r (mm1
, mm3
); /* mm3 = a1-b1 a0-b0 + rounder */
251 psrad_i2r (ROW_SHIFT
, mm3
); /* mm3 = y6 y7 */
252 paddd_r2r (mm4
, mm1
); /* mm1 = a1+b1 a0+b0 + rounder */
254 paddd_r2r (mm2
, mm0
); /* mm0 = a3 a2 + rounder */
255 psrad_i2r (ROW_SHIFT
, mm1
); /* mm1 = y1 y0 */
257 paddd_r2r (mm6
, mm5
); /* mm5 = b3 b2 */
258 movq_r2r (mm0
, mm7
); /* mm7 = a3 a2 + rounder */
260 paddd_r2r (mm5
, mm0
); /* mm0 = a3+b3 a2+b2 + rounder */
261 psubd_r2r (mm5
, mm7
); /* mm7 = a3-b3 a2-b2 + rounder */
264 static inline void mmx_row_tail (int16_t * const row
, const int store
)
266 psrad_i2r (ROW_SHIFT
, mm0
); /* mm0 = y3 y2 */
268 psrad_i2r (ROW_SHIFT
, mm7
); /* mm7 = y4 y5 */
270 packssdw_r2r (mm0
, mm1
); /* mm1 = y3 y2 y1 y0 */
272 packssdw_r2r (mm3
, mm7
); /* mm7 = y6 y7 y4 y5 */
274 movq_r2m (mm1
, *(row
+store
)); /* save y3 y2 y1 y0 */
275 movq_r2r (mm7
, mm4
); /* mm4 = y6 y7 y4 y5 */
277 pslld_i2r (16, mm7
); /* mm7 = y7 0 y5 0 */
279 psrld_i2r (16, mm4
); /* mm4 = 0 y6 0 y4 */
281 por_r2r (mm4
, mm7
); /* mm7 = y7 y6 y5 y4 */
285 movq_r2m (mm7
, *(row
+store
+4)); /* save y7 y6 y5 y4 */
288 static inline void mmx_row_mid (int16_t * const row
, const int store
,
289 const int offset
, const int16_t * const table
)
291 movq_m2r (*(row
+offset
), mm2
); /* mm2 = x6 x4 x2 x0 */
292 psrad_i2r (ROW_SHIFT
, mm0
); /* mm0 = y3 y2 */
294 movq_m2r (*(row
+offset
+4), mm5
); /* mm5 = x7 x5 x3 x1 */
295 psrad_i2r (ROW_SHIFT
, mm7
); /* mm7 = y4 y5 */
297 packssdw_r2r (mm0
, mm1
); /* mm1 = y3 y2 y1 y0 */
298 movq_r2r (mm5
, mm6
); /* mm6 = x7 x5 x3 x1 */
300 packssdw_r2r (mm3
, mm7
); /* mm7 = y6 y7 y4 y5 */
301 movq_r2r (mm2
, mm0
); /* mm0 = x6 x4 x2 x0 */
303 movq_r2m (mm1
, *(row
+store
)); /* save y3 y2 y1 y0 */
304 movq_r2r (mm7
, mm1
); /* mm1 = y6 y7 y4 y5 */
306 punpckldq_r2r (mm0
, mm0
); /* mm0 = x2 x0 x2 x0 */
307 psrld_i2r (16, mm7
); /* mm7 = 0 y6 0 y4 */
309 movq_m2r (*table
, mm3
); /* mm3 = C6 C4 C2 C4 */
310 pslld_i2r (16, mm1
); /* mm1 = y7 0 y5 0 */
312 movq_m2r (*(table
+4), mm4
); /* mm4 = -C2 -C4 C6 C4 */
313 por_r2r (mm1
, mm7
); /* mm7 = y7 y6 y5 y4 */
315 movq_m2r (*(table
+8), mm1
); /* mm1 = -C7 C3 C3 C1 */
316 punpckhdq_r2r (mm2
, mm2
); /* mm2 = x6 x4 x6 x4 */
318 movq_r2m (mm7
, *(row
+store
+4)); /* save y7 y6 y5 y4 */
319 pmaddwd_r2r (mm0
, mm3
); /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */
324 /* C column IDCT - it is just here to document the MMXEXT and MMX versions */
325 static inline void idct_col (int16_t * col
, int offset
)
327 /* multiplication - as implemented on mmx */
328 #define F(c,x) (((c) * (x)) >> 16)
330 /* saturation - it helps us handle torture test cases */
331 #define S(x) (((x)>32767) ? 32767 : ((x)<-32768) ? -32768 : (x))
333 int16_t x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
;
334 int16_t y0
, y1
, y2
, y3
, y4
, y5
, y6
, y7
;
335 int16_t a0
, a1
, a2
, a3
, b0
, b1
, b2
, b3
;
336 int16_t u04
, v04
, u26
, v26
, u17
, v17
, u35
, v35
, u12
, v12
;
351 u26
= S (F (T2
, x6
) + x2
);
352 v26
= S (F (T2
, x2
) - x6
);
359 u17
= S (F (T1
, x7
) + x1
);
360 v17
= S (F (T1
, x1
) - x7
);
361 u35
= S (F (T3
, x5
) + x3
);
362 v35
= S (F (T3
, x3
) - x5
);
368 u12
= S (2 * F (C4
, u12
));
369 v12
= S (2 * F (C4
, v12
));
373 y0
= S (a0
+ b0
) >> COL_SHIFT
;
374 y1
= S (a1
+ b1
) >> COL_SHIFT
;
375 y2
= S (a2
+ b2
) >> COL_SHIFT
;
376 y3
= S (a3
+ b3
) >> COL_SHIFT
;
378 y4
= S (a3
- b3
) >> COL_SHIFT
;
379 y5
= S (a2
- b2
) >> COL_SHIFT
;
380 y6
= S (a1
- b1
) >> COL_SHIFT
;
381 y7
= S (a0
- b0
) >> COL_SHIFT
;
395 /* MMX column IDCT */
396 static inline void idct_col (int16_t * const col
, const int offset
)
403 static const short t1_vector
[] ATTR_ALIGN(8) = {T1
,T1
,T1
,T1
};
404 static const short t2_vector
[] ATTR_ALIGN(8) = {T2
,T2
,T2
,T2
};
405 static const short t3_vector
[] ATTR_ALIGN(8) = {T3
,T3
,T3
,T3
};
406 static const short c4_vector
[] ATTR_ALIGN(8) = {C4
,C4
,C4
,C4
};
408 /* column code adapted from Peter Gubanov */
409 /* http://www.elecard.com/peter/idct.shtml */
411 movq_m2r (*t1_vector
, mm0
); /* mm0 = T1 */
413 movq_m2r (*(col
+offset
+1*8), mm1
); /* mm1 = x1 */
414 movq_r2r (mm0
, mm2
); /* mm2 = T1 */
416 movq_m2r (*(col
+offset
+7*8), mm4
); /* mm4 = x7 */
417 pmulhw_r2r (mm1
, mm0
); /* mm0 = T1*x1 */
419 movq_m2r (*t3_vector
, mm5
); /* mm5 = T3 */
420 pmulhw_r2r (mm4
, mm2
); /* mm2 = T1*x7 */
422 movq_m2r (*(col
+offset
+5*8), mm6
); /* mm6 = x5 */
423 movq_r2r (mm5
, mm7
); /* mm7 = T3-1 */
425 movq_m2r (*(col
+offset
+3*8), mm3
); /* mm3 = x3 */
426 psubsw_r2r (mm4
, mm0
); /* mm0 = v17 */
428 movq_m2r (*t2_vector
, mm4
); /* mm4 = T2 */
429 pmulhw_r2r (mm3
, mm5
); /* mm5 = (T3-1)*x3 */
431 paddsw_r2r (mm2
, mm1
); /* mm1 = u17 */
432 pmulhw_r2r (mm6
, mm7
); /* mm7 = (T3-1)*x5 */
436 movq_r2r (mm4
, mm2
); /* mm2 = T2 */
437 paddsw_r2r (mm3
, mm5
); /* mm5 = T3*x3 */
439 pmulhw_m2r (*(col
+offset
+2*8), mm4
);/* mm4 = T2*x2 */
440 paddsw_r2r (mm6
, mm7
); /* mm7 = T3*x5 */
442 psubsw_r2r (mm6
, mm5
); /* mm5 = v35 */
443 paddsw_r2r (mm3
, mm7
); /* mm7 = u35 */
445 movq_m2r (*(col
+offset
+6*8), mm3
); /* mm3 = x6 */
446 movq_r2r (mm0
, mm6
); /* mm6 = v17 */
448 pmulhw_r2r (mm3
, mm2
); /* mm2 = T2*x6 */
449 psubsw_r2r (mm5
, mm0
); /* mm0 = b3 */
451 psubsw_r2r (mm3
, mm4
); /* mm4 = v26 */
452 paddsw_r2r (mm6
, mm5
); /* mm5 = v12 */
454 movq_r2m (mm0
, *(col
+offset
+3*8)); /* save b3 in scratch0 */
455 movq_r2r (mm1
, mm6
); /* mm6 = u17 */
457 paddsw_m2r (*(col
+offset
+2*8), mm2
);/* mm2 = u26 */
458 paddsw_r2r (mm7
, mm6
); /* mm6 = b0 */
460 psubsw_r2r (mm7
, mm1
); /* mm1 = u12 */
461 movq_r2r (mm1
, mm7
); /* mm7 = u12 */
463 movq_m2r (*(col
+offset
+0*8), mm3
); /* mm3 = x0 */
464 paddsw_r2r (mm5
, mm1
); /* mm1 = u12+v12 */
466 movq_m2r (*c4_vector
, mm0
); /* mm0 = C4/2 */
467 psubsw_r2r (mm5
, mm7
); /* mm7 = u12-v12 */
469 movq_r2m (mm6
, *(col
+offset
+5*8)); /* save b0 in scratch1 */
470 pmulhw_r2r (mm0
, mm1
); /* mm1 = b1/2 */
472 movq_r2r (mm4
, mm6
); /* mm6 = v26 */
473 pmulhw_r2r (mm0
, mm7
); /* mm7 = b2/2 */
475 movq_m2r (*(col
+offset
+4*8), mm5
); /* mm5 = x4 */
476 movq_r2r (mm3
, mm0
); /* mm0 = x0 */
478 psubsw_r2r (mm5
, mm3
); /* mm3 = v04 */
479 paddsw_r2r (mm5
, mm0
); /* mm0 = u04 */
481 paddsw_r2r (mm3
, mm4
); /* mm4 = a1 */
482 movq_r2r (mm0
, mm5
); /* mm5 = u04 */
484 psubsw_r2r (mm6
, mm3
); /* mm3 = a2 */
485 paddsw_r2r (mm2
, mm5
); /* mm5 = a0 */
487 paddsw_r2r (mm1
, mm1
); /* mm1 = b1 */
488 psubsw_r2r (mm2
, mm0
); /* mm0 = a3 */
490 paddsw_r2r (mm7
, mm7
); /* mm7 = b2 */
491 movq_r2r (mm3
, mm2
); /* mm2 = a2 */
493 movq_r2r (mm4
, mm6
); /* mm6 = a1 */
494 paddsw_r2r (mm7
, mm3
); /* mm3 = a2+b2 */
496 psraw_i2r (COL_SHIFT
, mm3
); /* mm3 = y2 */
497 paddsw_r2r (mm1
, mm4
); /* mm4 = a1+b1 */
499 psraw_i2r (COL_SHIFT
, mm4
); /* mm4 = y1 */
500 psubsw_r2r (mm1
, mm6
); /* mm6 = a1-b1 */
502 movq_m2r (*(col
+offset
+5*8), mm1
); /* mm1 = b0 */
503 psubsw_r2r (mm7
, mm2
); /* mm2 = a2-b2 */
505 psraw_i2r (COL_SHIFT
, mm6
); /* mm6 = y6 */
506 movq_r2r (mm5
, mm7
); /* mm7 = a0 */
508 movq_r2m (mm4
, *(col
+offset
+1*8)); /* save y1 */
509 psraw_i2r (COL_SHIFT
, mm2
); /* mm2 = y5 */
511 movq_r2m (mm3
, *(col
+offset
+2*8)); /* save y2 */
512 paddsw_r2r (mm1
, mm5
); /* mm5 = a0+b0 */
514 movq_m2r (*(col
+offset
+3*8), mm4
); /* mm4 = b3 */
515 psubsw_r2r (mm1
, mm7
); /* mm7 = a0-b0 */
517 psraw_i2r (COL_SHIFT
, mm5
); /* mm5 = y0 */
518 movq_r2r (mm0
, mm3
); /* mm3 = a3 */
520 movq_r2m (mm2
, *(col
+offset
+5*8)); /* save y5 */
521 psubsw_r2r (mm4
, mm3
); /* mm3 = a3-b3 */
523 psraw_i2r (COL_SHIFT
, mm7
); /* mm7 = y7 */
524 paddsw_r2r (mm0
, mm4
); /* mm4 = a3+b3 */
526 movq_r2m (mm5
, *(col
+offset
+0*8)); /* save y0 */
527 psraw_i2r (COL_SHIFT
, mm3
); /* mm3 = y4 */
529 movq_r2m (mm6
, *(col
+offset
+6*8)); /* save y6 */
530 psraw_i2r (COL_SHIFT
, mm4
); /* mm4 = y3 */
532 movq_r2m (mm7
, *(col
+offset
+7*8)); /* save y7 */
534 movq_r2m (mm3
, *(col
+offset
+4*8)); /* save y4 */
536 movq_r2m (mm4
, *(col
+offset
+3*8)); /* save y3 */
545 static const int32_t rounder0
[] ATTR_ALIGN(8) =
546 rounder ((1 << (COL_SHIFT
- 1)) - 0.5);
547 static const int32_t rounder4
[] ATTR_ALIGN(8) = rounder (0);
548 static const int32_t rounder1
[] ATTR_ALIGN(8) =
549 rounder (1.25683487303); /* C1*(C1/C4+C1+C7)/2 */
550 static const int32_t rounder7
[] ATTR_ALIGN(8) =
551 rounder (-0.25); /* C1*(C7/C4+C7-C1)/2 */
552 static const int32_t rounder2
[] ATTR_ALIGN(8) =
553 rounder (0.60355339059); /* C2 * (C6+C2)/2 */
554 static const int32_t rounder6
[] ATTR_ALIGN(8) =
555 rounder (-0.25); /* C2 * (C6-C2)/2 */
556 static const int32_t rounder3
[] ATTR_ALIGN(8) =
557 rounder (0.087788325588); /* C3*(-C3/C4+C3+C5)/2 */
558 static const int32_t rounder5
[] ATTR_ALIGN(8) =
559 rounder (-0.441341716183); /* C3*(-C5/C4+C5-C3)/2 */
564 #define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \
565 void idct (int16_t * const block) \
567 static const int16_t table04[] ATTR_ALIGN(16) = \
568 table (22725, 21407, 19266, 16384, 12873, 8867, 4520); \
569 static const int16_t table17[] ATTR_ALIGN(16) = \
570 table (31521, 29692, 26722, 22725, 17855, 12299, 6270); \
571 static const int16_t table26[] ATTR_ALIGN(16) = \
572 table (29692, 27969, 25172, 21407, 16819, 11585, 5906); \
573 static const int16_t table35[] ATTR_ALIGN(16) = \
574 table (26722, 25172, 22654, 19266, 15137, 10426, 5315); \
576 idct_row_head (block, 0*8, table04); \
577 idct_row (table04, rounder0); \
578 idct_row_mid (block, 0*8, 4*8, table04); \
579 idct_row (table04, rounder4); \
580 idct_row_mid (block, 4*8, 1*8, table17); \
581 idct_row (table17, rounder1); \
582 idct_row_mid (block, 1*8, 7*8, table17); \
583 idct_row (table17, rounder7); \
584 idct_row_mid (block, 7*8, 2*8, table26); \
585 idct_row (table26, rounder2); \
586 idct_row_mid (block, 2*8, 6*8, table26); \
587 idct_row (table26, rounder6); \
588 idct_row_mid (block, 6*8, 3*8, table35); \
589 idct_row (table35, rounder3); \
590 idct_row_mid (block, 3*8, 5*8, table35); \
591 idct_row (table35, rounder5); \
592 idct_row_tail (block, 5*8); \
594 idct_col (block, 0); \
595 idct_col (block, 4); \
598 declare_idct (ff_mmxext_idct
, mmxext_table
,
599 mmxext_row_head
, mmxext_row
, mmxext_row_tail
, mmxext_row_mid
)
601 declare_idct (ff_mmx_idct
, mmx_table
,
602 mmx_row_head
, mmx_row
, mmx_row_tail
, mmx_row_mid
)