7c6b79e9c0a5ec9351d3ef45ba6f2b06d0d49c2e
[libav.git] / libavcodec / ppc / idct_altivec.c
1 /*
2 * Copyright (c) 2001 Michel Lespinasse
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /*
22 * NOTE: This code is based on GPL code from the libmpeg2 project. The
23 * author, Michel Lespinasses, has given explicit permission to release
24 * under LGPL as part of FFmpeg.
25 */
26
27 /*
28 * FFmpeg integration by Dieter Shirley
29 *
30 * This file is a direct copy of the AltiVec IDCT module from the libmpeg2
31 * project. I've deleted all of the libmpeg2-specific code, renamed the
32 * functions and reordered the function parameters. The only change to the
33 * IDCT function itself was to factor out the partial transposition, and to
34 * perform a full transpose at the end of the function.
35 */
36
37
38 #include <stdlib.h> /* malloc(), free() */
39 #include <string.h>
40 #include "config.h"
41 #if HAVE_ALTIVEC_H
42 #include <altivec.h>
43 #endif
44 #include "libavcodec/dsputil.h"
45 #include "types_altivec.h"
46 #include "dsputil_ppc.h"
47 #include "dsputil_altivec.h"
48
49 #define IDCT_HALF \
50 /* 1st stage */ \
51 t1 = vec_mradds (a1, vx7, vx1 ); \
52 t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
53 t7 = vec_mradds (a2, vx5, vx3); \
54 t3 = vec_mradds (ma2, vx3, vx5); \
55 \
56 /* 2nd stage */ \
57 t5 = vec_adds (vx0, vx4); \
58 t0 = vec_subs (vx0, vx4); \
59 t2 = vec_mradds (a0, vx6, vx2); \
60 t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
61 t6 = vec_adds (t8, t3); \
62 t3 = vec_subs (t8, t3); \
63 t8 = vec_subs (t1, t7); \
64 t1 = vec_adds (t1, t7); \
65 \
66 /* 3rd stage */ \
67 t7 = vec_adds (t5, t2); \
68 t2 = vec_subs (t5, t2); \
69 t5 = vec_adds (t0, t4); \
70 t0 = vec_subs (t0, t4); \
71 t4 = vec_subs (t8, t3); \
72 t3 = vec_adds (t8, t3); \
73 \
74 /* 4th stage */ \
75 vy0 = vec_adds (t7, t1); \
76 vy7 = vec_subs (t7, t1); \
77 vy1 = vec_mradds (c4, t3, t5); \
78 vy6 = vec_mradds (mc4, t3, t5); \
79 vy2 = vec_mradds (c4, t4, t0); \
80 vy5 = vec_mradds (mc4, t4, t0); \
81 vy3 = vec_adds (t2, t6); \
82 vy4 = vec_subs (t2, t6);
83
84
85 #define IDCT \
86 vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
87 vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
88 vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias; \
89 vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8; \
90 vec_u16 shift; \
91 \
92 c4 = vec_splat (constants[0], 0); \
93 a0 = vec_splat (constants[0], 1); \
94 a1 = vec_splat (constants[0], 2); \
95 a2 = vec_splat (constants[0], 3); \
96 mc4 = vec_splat (constants[0], 4); \
97 ma2 = vec_splat (constants[0], 5); \
98 bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3); \
99 \
100 zero = vec_splat_s16 (0); \
101 shift = vec_splat_u16 (4); \
102 \
103 vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
104 vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
105 vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
106 vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
107 vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
108 vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
109 vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
110 vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
111 \
112 IDCT_HALF \
113 \
114 vx0 = vec_mergeh (vy0, vy4); \
115 vx1 = vec_mergel (vy0, vy4); \
116 vx2 = vec_mergeh (vy1, vy5); \
117 vx3 = vec_mergel (vy1, vy5); \
118 vx4 = vec_mergeh (vy2, vy6); \
119 vx5 = vec_mergel (vy2, vy6); \
120 vx6 = vec_mergeh (vy3, vy7); \
121 vx7 = vec_mergel (vy3, vy7); \
122 \
123 vy0 = vec_mergeh (vx0, vx4); \
124 vy1 = vec_mergel (vx0, vx4); \
125 vy2 = vec_mergeh (vx1, vx5); \
126 vy3 = vec_mergel (vx1, vx5); \
127 vy4 = vec_mergeh (vx2, vx6); \
128 vy5 = vec_mergel (vx2, vx6); \
129 vy6 = vec_mergeh (vx3, vx7); \
130 vy7 = vec_mergel (vx3, vx7); \
131 \
132 vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
133 vx1 = vec_mergel (vy0, vy4); \
134 vx2 = vec_mergeh (vy1, vy5); \
135 vx3 = vec_mergel (vy1, vy5); \
136 vx4 = vec_mergeh (vy2, vy6); \
137 vx5 = vec_mergel (vy2, vy6); \
138 vx6 = vec_mergeh (vy3, vy7); \
139 vx7 = vec_mergel (vy3, vy7); \
140 \
141 IDCT_HALF \
142 \
143 shift = vec_splat_u16 (6); \
144 vx0 = vec_sra (vy0, shift); \
145 vx1 = vec_sra (vy1, shift); \
146 vx2 = vec_sra (vy2, shift); \
147 vx3 = vec_sra (vy3, shift); \
148 vx4 = vec_sra (vy4, shift); \
149 vx5 = vec_sra (vy5, shift); \
150 vx6 = vec_sra (vy6, shift); \
151 vx7 = vec_sra (vy7, shift);
152
153
154 static const vec_s16 constants[5] = {
155 {23170, 13573, 6518, 21895, -23170, -21895, 32, 31},
156 {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725},
157 {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521},
158 {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692},
159 {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722}
160 };
161
162 void idct_put_altivec(uint8_t* dest, int stride, int16_t *blk)
163 {
164 POWERPC_PERF_DECLARE(altivec_idct_put_num, 1);
165 vec_s16 *block = (vec_s16*)blk;
166 vec_u8 tmp;
167
168 #if CONFIG_POWERPC_PERF
169 POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
170 #endif
171 IDCT
172
173 #define COPY(dest,src) \
174 tmp = vec_packsu (src, src); \
175 vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \
176 vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
177
178 COPY (dest, vx0) dest += stride;
179 COPY (dest, vx1) dest += stride;
180 COPY (dest, vx2) dest += stride;
181 COPY (dest, vx3) dest += stride;
182 COPY (dest, vx4) dest += stride;
183 COPY (dest, vx5) dest += stride;
184 COPY (dest, vx6) dest += stride;
185 COPY (dest, vx7)
186
187 POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
188 }
189
190 void idct_add_altivec(uint8_t* dest, int stride, int16_t *blk)
191 {
192 POWERPC_PERF_DECLARE(altivec_idct_add_num, 1);
193 vec_s16 *block = (vec_s16*)blk;
194 vec_u8 tmp;
195 vec_s16 tmp2, tmp3;
196 vec_u8 perm0;
197 vec_u8 perm1;
198 vec_u8 p0, p1, p;
199
200 #if CONFIG_POWERPC_PERF
201 POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
202 #endif
203
204 IDCT
205
206 p0 = vec_lvsl (0, dest);
207 p1 = vec_lvsl (stride, dest);
208 p = vec_splat_u8 (-1);
209 perm0 = vec_mergeh (p, p0);
210 perm1 = vec_mergeh (p, p1);
211
212 #define ADD(dest,src,perm) \
213 /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
214 tmp = vec_ld (0, dest); \
215 tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm); \
216 tmp3 = vec_adds (tmp2, src); \
217 tmp = vec_packsu (tmp3, tmp3); \
218 vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest); \
219 vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
220
221 ADD (dest, vx0, perm0) dest += stride;
222 ADD (dest, vx1, perm1) dest += stride;
223 ADD (dest, vx2, perm0) dest += stride;
224 ADD (dest, vx3, perm1) dest += stride;
225 ADD (dest, vx4, perm0) dest += stride;
226 ADD (dest, vx5, perm1) dest += stride;
227 ADD (dest, vx6, perm0) dest += stride;
228 ADD (dest, vx7, perm1)
229
230 POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
231 }
232