altivec gcc-3 fixes by (Magnus Damm <damm at opensource dot se>)
[libav.git] / libavcodec / ppc / fft_altivec.c
1 /*
2 * FFT/IFFT transforms
3 * AltiVec-enabled
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5 * Based on code Copyright (c) 2002 Fabrice Bellard.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21 #include "../dsputil.h"
22
23 #include "gcc_fixes.h"
24
25 #include "dsputil_altivec.h"
26
27 /*
28 those three macros are from libavcodec/fft.c
29 and are required for the reference C code
30 */
31 /* butter fly op */
32 #define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \
33 {\
34 FFTSample ax, ay, bx, by;\
35 bx=pre1;\
36 by=pim1;\
37 ax=qre1;\
38 ay=qim1;\
39 pre = (bx + ax);\
40 pim = (by + ay);\
41 qre = (bx - ax);\
42 qim = (by - ay);\
43 }
44 #define MUL16(a,b) ((a) * (b))
45 #define CMUL(pre, pim, are, aim, bre, bim) \
46 {\
47 pre = (MUL16(are, bre) - MUL16(aim, bim));\
48 pim = (MUL16(are, bim) + MUL16(bre, aim));\
49 }
50
51
52 /**
53 * Do a complex FFT with the parameters defined in fft_init(). The
54 * input data must be permuted before with s->revtab table. No
55 * 1.0/sqrt(n) normalization is done.
56 * AltiVec-enabled
57 * This code assumes that the 'z' pointer is 16 bytes-aligned
58 * It also assumes all FFTComplex are 8 bytes-aligned pair of float
59 * The code is exactly the same as the SSE version, except
60 * that successive MUL + ADD/SUB have been merged into
61 * fused multiply-add ('vec_madd' in altivec)
62 */
63 void fft_calc_altivec(FFTContext *s, FFTComplex *z)
64 {
65 POWERPC_TBL_DECLARE(altivec_fft_num, s->nbits >= 6);
66 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
67 int ln = s->nbits;
68 int j, np, np2;
69 int nblocks, nloops;
70 register FFTComplex *p, *q;
71 FFTComplex *exptab = s->exptab;
72 int l;
73 FFTSample tmp_re, tmp_im;
74
75 POWERPC_TBL_START_COUNT(altivec_fft_num, s->nbits >= 6);
76
77 np = 1 << ln;
78
79 /* pass 0 */
80
81 p=&z[0];
82 j=(np >> 1);
83 do {
84 BF(p[0].re, p[0].im, p[1].re, p[1].im,
85 p[0].re, p[0].im, p[1].re, p[1].im);
86 p+=2;
87 } while (--j != 0);
88
89 /* pass 1 */
90
91
92 p=&z[0];
93 j=np >> 2;
94 if (s->inverse) {
95 do {
96 BF(p[0].re, p[0].im, p[2].re, p[2].im,
97 p[0].re, p[0].im, p[2].re, p[2].im);
98 BF(p[1].re, p[1].im, p[3].re, p[3].im,
99 p[1].re, p[1].im, -p[3].im, p[3].re);
100 p+=4;
101 } while (--j != 0);
102 } else {
103 do {
104 BF(p[0].re, p[0].im, p[2].re, p[2].im,
105 p[0].re, p[0].im, p[2].re, p[2].im);
106 BF(p[1].re, p[1].im, p[3].re, p[3].im,
107 p[1].re, p[1].im, p[3].im, -p[3].re);
108 p+=4;
109 } while (--j != 0);
110 }
111 /* pass 2 .. ln-1 */
112
113 nblocks = np >> 3;
114 nloops = 1 << 2;
115 np2 = np >> 1;
116 do {
117 p = z;
118 q = z + nloops;
119 for (j = 0; j < nblocks; ++j) {
120 BF(p->re, p->im, q->re, q->im,
121 p->re, p->im, q->re, q->im);
122
123 p++;
124 q++;
125 for(l = nblocks; l < np2; l += nblocks) {
126 CMUL(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im);
127 BF(p->re, p->im, q->re, q->im,
128 p->re, p->im, tmp_re, tmp_im);
129 p++;
130 q++;
131 }
132
133 p += nloops;
134 q += nloops;
135 }
136 nblocks = nblocks >> 1;
137 nloops = nloops << 1;
138 } while (nblocks != 0);
139
140 POWERPC_TBL_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
141
142 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
143 #ifdef CONFIG_DARWIN
144 register const vector float vczero = (const vector float)(0.);
145 #else
146 register const vector float vczero = (const vector float){0.,0.,0.,0.};
147 #endif
148
149 int ln = s->nbits;
150 int j, np, np2;
151 int nblocks, nloops;
152 register FFTComplex *p, *q;
153 FFTComplex *cptr, *cptr1;
154 int k;
155
156 POWERPC_TBL_START_COUNT(altivec_fft_num, s->nbits >= 6);
157
158 np = 1 << ln;
159
160 {
161 vector float *r, a, b, a1, c1, c2;
162
163 r = (vector float *)&z[0];
164
165 c1 = vcii(p,p,n,n);
166
167 if (s->inverse)
168 {
169 c2 = vcii(p,p,n,p);
170 }
171 else
172 {
173 c2 = vcii(p,p,p,n);
174 }
175
176 j = (np >> 2);
177 do {
178 a = vec_ld(0, r);
179 a1 = vec_ld(sizeof(vector float), r);
180
181 b = vec_perm(a,a,vcprmle(1,0,3,2));
182 a = vec_madd(a,c1,b);
183 /* do the pass 0 butterfly */
184
185 b = vec_perm(a1,a1,vcprmle(1,0,3,2));
186 b = vec_madd(a1,c1,b);
187 /* do the pass 0 butterfly */
188
189 /* multiply third by -i */
190 b = vec_perm(b,b,vcprmle(2,3,1,0));
191
192 /* do the pass 1 butterfly */
193 vec_st(vec_madd(b,c2,a), 0, r);
194 vec_st(vec_nmsub(b,c2,a), sizeof(vector float), r);
195
196 r += 2;
197 } while (--j != 0);
198 }
199 /* pass 2 .. ln-1 */
200
201 nblocks = np >> 3;
202 nloops = 1 << 2;
203 np2 = np >> 1;
204
205 cptr1 = s->exptab1;
206 do {
207 p = z;
208 q = z + nloops;
209 j = nblocks;
210 do {
211 cptr = cptr1;
212 k = nloops >> 1;
213 do {
214 vector float a,b,c,t1;
215
216 a = vec_ld(0, (float*)p);
217 b = vec_ld(0, (float*)q);
218
219 /* complex mul */
220 c = vec_ld(0, (float*)cptr);
221 /* cre*re cim*re */
222 t1 = vec_madd(c, vec_perm(b,b,vcprmle(2,2,0,0)),vczero);
223 c = vec_ld(sizeof(vector float), (float*)cptr);
224 /* -cim*im cre*im */
225 b = vec_madd(c, vec_perm(b,b,vcprmle(3,3,1,1)),t1);
226
227 /* butterfly */
228 vec_st(vec_add(a,b), 0, (float*)p);
229 vec_st(vec_sub(a,b), 0, (float*)q);
230
231 p += 2;
232 q += 2;
233 cptr += 4;
234 } while (--k);
235
236 p += nloops;
237 q += nloops;
238 } while (--j);
239 cptr1 += nloops * 2;
240 nblocks = nblocks >> 1;
241 nloops = nloops << 1;
242 } while (nblocks != 0);
243
244 POWERPC_TBL_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
245
246 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
247 }