ff_fft_calc_3dn/3dn2/sse: convert intrinsics to inline asm.
[libav.git] / libavcodec / i386 / fft_3dn2.c
1 /*
2 * FFT/MDCT transform with Extended 3DNow! optimizations
3 * Copyright (c) 2006 Zuxy MENG Jie, Loren Merritt
4 * Based on fft_sse.c copyright (c) 2002 Fabrice Bellard.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20 #include "../dsputil.h"
21
22 static const int p1m1[2] __attribute__((aligned(8))) =
23 { 0, 1 << 31 };
24
25 static const int m1p1[2] __attribute__((aligned(8))) =
26 { 1 << 31, 0 };
27
28 void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z)
29 {
30 int ln = s->nbits;
31 long i, j;
32 long nblocks, nloops;
33 FFTComplex *p, *cptr;
34
35 asm volatile(
36 /* FEMMS is not a must here but recommended by AMD */
37 "femms \n\t"
38 "movq %0, %%mm7 \n\t"
39 ::"m"(*(s->inverse ? m1p1 : p1m1))
40 );
41
42 i = 8 << ln;
43 asm volatile(
44 "1: \n\t"
45 "sub $32, %0 \n\t"
46 "movq (%0,%1), %%mm0 \n\t"
47 "movq 16(%0,%1), %%mm1 \n\t"
48 "movq 8(%0,%1), %%mm2 \n\t"
49 "movq 24(%0,%1), %%mm3 \n\t"
50 "movq %%mm0, %%mm4 \n\t"
51 "movq %%mm1, %%mm5 \n\t"
52 "pfadd %%mm2, %%mm0 \n\t"
53 "pfadd %%mm3, %%mm1 \n\t"
54 "pfsub %%mm2, %%mm4 \n\t"
55 "pfsub %%mm3, %%mm5 \n\t"
56 "movq %%mm0, %%mm2 \n\t"
57 "pswapd %%mm5, %%mm5 \n\t"
58 "movq %%mm4, %%mm3 \n\t"
59 "pxor %%mm7, %%mm5 \n\t"
60 "pfadd %%mm1, %%mm0 \n\t"
61 "pfadd %%mm5, %%mm4 \n\t"
62 "pfsub %%mm1, %%mm2 \n\t"
63 "pfsub %%mm5, %%mm3 \n\t"
64 "movq %%mm0, (%0,%1) \n\t"
65 "movq %%mm4, 8(%0,%1) \n\t"
66 "movq %%mm2, 16(%0,%1) \n\t"
67 "movq %%mm3, 24(%0,%1) \n\t"
68 "jg 1b \n\t"
69 :"+r"(i)
70 :"r"(z)
71 );
72 /* pass 2 .. ln-1 */
73
74 nblocks = 1 << (ln-3);
75 nloops = 1 << 2;
76 cptr = s->exptab1;
77 do {
78 p = z;
79 j = nblocks;
80 do {
81 i = nloops*8;
82 asm volatile(
83 "1: \n\t"
84 "sub $16, %0 \n\t"
85 "movq (%1,%0), %%mm0 \n\t"
86 "movq 8(%1,%0), %%mm1 \n\t"
87 "movq (%2,%0), %%mm2 \n\t"
88 "movq 8(%2,%0), %%mm3 \n\t"
89 "movq (%3,%0,2), %%mm4 \n\t"
90 "movq 8(%3,%0,2), %%mm5 \n\t"
91 "pswapd %%mm4, %%mm6 \n\t" // no need for cptr[2] & cptr[3]
92 "pswapd %%mm5, %%mm7 \n\t"
93 "pfmul %%mm2, %%mm4 \n\t" // cre*re cim*im
94 "pfmul %%mm3, %%mm5 \n\t"
95 "pfmul %%mm2, %%mm6 \n\t" // cim*re cre*im
96 "pfmul %%mm3, %%mm7 \n\t"
97 "pfpnacc %%mm6, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im
98 "pfpnacc %%mm7, %%mm5 \n\t"
99 "movq %%mm0, %%mm2 \n\t"
100 "movq %%mm1, %%mm3 \n\t"
101 "pfadd %%mm4, %%mm0 \n\t"
102 "pfadd %%mm5, %%mm1 \n\t"
103 "pfsub %%mm4, %%mm2 \n\t"
104 "pfsub %%mm5, %%mm3 \n\t"
105 "movq %%mm0, (%1,%0) \n\t"
106 "movq %%mm1, 8(%1,%0) \n\t"
107 "movq %%mm2, (%2,%0) \n\t"
108 "movq %%mm3, 8(%2,%0) \n\t"
109 "jg 1b \n\t"
110 :"+r"(i)
111 :"r"(p), "r"(p + nloops), "r"(cptr)
112 );
113 p += nloops*2;
114 } while (--j);
115 cptr += nloops*2;
116 nblocks >>= 1;
117 nloops <<= 1;
118 } while (nblocks != 0);
119 asm volatile("femms");
120 }
121
122 void ff_imdct_calc_3dn2(MDCTContext *s, FFTSample *output,
123 const FFTSample *input, FFTSample *tmp)
124 {
125 long k, n8, n4, n2, n;
126 const uint16_t *revtab = s->fft.revtab;
127 const FFTSample *tcos = s->tcos;
128 const FFTSample *tsin = s->tsin;
129 const FFTSample *in1, *in2;
130 FFTComplex *z = (FFTComplex *)tmp;
131
132 n = 1 << s->nbits;
133 n2 = n >> 1;
134 n4 = n >> 2;
135 n8 = n >> 3;
136
137 /* pre rotation */
138 in1 = input;
139 in2 = input + n2 - 1;
140 for(k = 0; k < n4; k++) {
141 // FIXME a single block is faster, but gcc 2.95 and 3.4.x on 32bit can't compile it
142 asm volatile(
143 "movd %0, %%mm0 \n\t"
144 "movd %2, %%mm1 \n\t"
145 "punpckldq %1, %%mm0 \n\t"
146 "punpckldq %3, %%mm1 \n\t"
147 "movq %%mm0, %%mm2 \n\t"
148 "pfmul %%mm1, %%mm0 \n\t"
149 "pswapd %%mm1, %%mm1 \n\t"
150 "pfmul %%mm1, %%mm2 \n\t"
151 "pfpnacc %%mm2, %%mm0 \n\t"
152 ::"m"(in2[-2*k]), "m"(in1[2*k]),
153 "m"(tcos[k]), "m"(tsin[k])
154 );
155 asm volatile(
156 "movq %%mm0, %0 \n\t"
157 :"=m"(z[revtab[k]])
158 );
159 }
160
161 ff_fft_calc(&s->fft, z);
162
163 /* post rotation + reordering */
164 for(k = 0; k < n4; k++) {
165 asm volatile(
166 "movq %0, %%mm0 \n\t"
167 "movd %1, %%mm1 \n\t"
168 "punpckldq %2, %%mm1 \n\t"
169 "movq %%mm0, %%mm2 \n\t"
170 "pfmul %%mm1, %%mm0 \n\t"
171 "pswapd %%mm1, %%mm1 \n\t"
172 "pfmul %%mm1, %%mm2 \n\t"
173 "pfpnacc %%mm2, %%mm0 \n\t"
174 "movq %%mm0, %0 \n\t"
175 :"+m"(z[k])
176 :"m"(tcos[k]), "m"(tsin[k])
177 );
178 }
179
180 z += n8;
181 asm volatile("movd %0, %%mm7" ::"r"(1<<31));
182 for(k = 0; k < n8; k++) {
183 asm volatile(
184 "movq %0, %%mm0 \n\t"
185 "pswapd %1, %%mm1 \n\t"
186 ::"m"(z[k]), "m"(z[-1-k])
187 );
188 asm volatile(
189 "movq %%mm0, %%mm2 \n\t"
190 "pxor %%mm7, %%mm2 \n\t"
191 "punpckldq %%mm1, %%mm2 \n\t"
192 "pswapd %%mm2, %%mm3 \n\t"
193 "punpckhdq %%mm1, %%mm0 \n\t"
194 "pswapd %%mm0, %%mm4 \n\t"
195 "pxor %%mm7, %%mm0 \n\t"
196 "pxor %%mm7, %%mm4 \n\t"
197 "movq %%mm0, %0 \n\t" // { -z[n8+k].im, z[n8-1-k].re }
198 "movq %%mm4, %1 \n\t" // { -z[n8-1-k].re, z[n8+k].im }
199 "movq %%mm2, %2 \n\t" // { -z[n8+k].re, z[n8-1-k].im }
200 "movq %%mm3, %3 \n\t" // { z[n8-1-k].im, -z[n8+k].re }
201 :"=m"(output[2*k]), "=m"(output[n2-2-2*k]),
202 "=m"(output[n2+2*k]), "=m"(output[n-2-2*k])
203 ::"memory"
204 );
205 }
206 asm volatile("femms");
207 }
208