indent
[libav.git] / libavcodec / i386 / fft_sse.c
CommitLineData
bb6f5690
FB
1/*
2 * FFT/MDCT transform with SSE optimizations
3 * Copyright (c) 2002 Fabrice Bellard.
4 *
b78e7197
DB
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
bb6f5690
FB
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
b78e7197 10 * version 2.1 of the License, or (at your option) any later version.
bb6f5690 11 *
b78e7197 12 * FFmpeg is distributed in the hope that it will be useful,
bb6f5690
FB
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
b78e7197 18 * License along with FFmpeg; if not, write to the Free Software
5509bffa 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
bb6f5690 20 */
245976da
DB
21
22#include "libavutil/x86_cpu.h"
23#include "libavcodec/dsputil.h"
bb6f5690 24
ebbafcb4
LM
25static const int p1m1p1m1[4] __attribute__((aligned(16))) =
26 { 0, 1 << 31, 0, 1 << 31 };
27
28static const int m1m1m1m1[4] __attribute__((aligned(16))) =
29 { 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
30
5d0ddd1a
LM
31void ff_fft_dispatch_sse(FFTComplex *z, int nbits);
32void ff_fft_dispatch_interleave_sse(FFTComplex *z, int nbits);
bb6f5690 33
68951ecf 34void ff_fft_calc_sse(FFTContext *s, FFTComplex *z)
bb6f5690 35{
5d0ddd1a
LM
36 int n = 1 << s->nbits;
37
38 ff_fft_dispatch_interleave_sse(z, s->nbits);
39
40 if(n <= 16) {
41 x86_reg i = -8*n;
42 asm volatile(
43 "1: \n"
44 "movaps (%0,%1), %%xmm0 \n"
45 "movaps %%xmm0, %%xmm1 \n"
46 "unpcklps 16(%0,%1), %%xmm0 \n"
47 "unpckhps 16(%0,%1), %%xmm1 \n"
48 "movaps %%xmm0, (%0,%1) \n"
49 "movaps %%xmm1, 16(%0,%1) \n"
50 "add $32, %0 \n"
51 "jl 1b \n"
52 :"+r"(i)
53 :"r"(z+n)
54 :"memory"
55 );
56 }
57}
bb6f5690 58
5d0ddd1a
LM
59void ff_fft_permute_sse(FFTContext *s, FFTComplex *z)
60{
61 int n = 1 << s->nbits;
62 int i;
63 for(i=0; i<n; i+=2) {
64 asm volatile(
65 "movaps %2, %%xmm0 \n"
66 "movlps %%xmm0, %0 \n"
67 "movhps %%xmm0, %1 \n"
68 :"=m"(s->tmp_buf[s->revtab[i]]),
69 "=m"(s->tmp_buf[s->revtab[i+1]])
70 :"m"(z[i])
71 );
72 }
73 memcpy(z, s->tmp_buf, n*sizeof(FFTComplex));
bb6f5690 74}
e1958604 75
b9fa3208 76static void imdct_sse(MDCTContext *s, const FFTSample *input, FFTSample *tmp)
ebbafcb4 77{
40d0e665 78 x86_reg k;
b9fa3208 79 long n4, n2, n;
ebbafcb4
LM
80 const uint16_t *revtab = s->fft.revtab;
81 const FFTSample *tcos = s->tcos;
82 const FFTSample *tsin = s->tsin;
83 const FFTSample *in1, *in2;
84 FFTComplex *z = (FFTComplex *)tmp;
85
86 n = 1 << s->nbits;
87 n2 = n >> 1;
88 n4 = n >> 2;
ebbafcb4 89
25e4f8aa
ZM
90#ifdef ARCH_X86_64
91 asm volatile ("movaps %0, %%xmm8\n\t"::"m"(*p1m1p1m1));
92#define P1M1P1M1 "%%xmm8"
93#else
94#define P1M1P1M1 "%4"
95#endif
ebbafcb4
LM
96
97 /* pre rotation */
98 in1 = input;
99 in2 = input + n2 - 4;
100
25e4f8aa
ZM
101 /* Complex multiplication */
102 for (k = 0; k < n4; k += 4) {
ebbafcb4
LM
103 asm volatile (
104 "movaps %0, %%xmm0 \n\t" // xmm0 = r0 X r1 X : in2
105 "movaps %1, %%xmm3 \n\t" // xmm3 = X i1 X i0: in1
ee387b57
GB
106 "movaps -16+1*%0, %%xmm4 \n\t" // xmm4 = r0 X r1 X : in2
107 "movaps 16+1*%1, %%xmm7 \n\t" // xmm7 = X i1 X i0: in1
ebbafcb4
LM
108 "movlps %2, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos
109 "movlps %3, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin
ee387b57
GB
110 "movlps 8+1*%2, %%xmm5 \n\t" // xmm5 = X X R1 R0: tcos
111 "movlps 8+1*%3, %%xmm6 \n\t" // xmm6 = X X I1 I0: tsin
ebbafcb4
LM
112 "shufps $95, %%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0
113 "shufps $160,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0
25e4f8aa
ZM
114 "shufps $95, %%xmm4, %%xmm4 \n\t" // xmm4 = r1 r1 r0 r0
115 "shufps $160,%%xmm7, %%xmm7 \n\t" // xmm7 = i1 i1 i0 i0
ebbafcb4 116 "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0
25e4f8aa 117 "unpcklps %%xmm6, %%xmm5 \n\t" // xmm5 = I1 R1 I0 R0
ebbafcb4 118 "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0
25e4f8aa
ZM
119 "movaps %%xmm5, %%xmm6 \n\t" // xmm6 = I1 R1 I0 R0
120 "xorps "P1M1P1M1", %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0
121 "xorps "P1M1P1M1", %%xmm6 \n\t" // xmm6 = -I1 R1 -I0 R0
ebbafcb4 122 "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR
25e4f8aa 123 "mulps %%xmm5, %%xmm4 \n\t" // xmm4 = rI rR rI rR
ebbafcb4 124 "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0
25e4f8aa 125 "shufps $177,%%xmm6, %%xmm6 \n\t" // xmm6 = R1 -I1 R0 -I0
ebbafcb4 126 "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii
25e4f8aa 127 "mulps %%xmm6, %%xmm7 \n\t" // xmm7 = Ri -Ii Ri -Ii
ebbafcb4 128 "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result
25e4f8aa 129 "addps %%xmm7, %%xmm4 \n\t" // xmm4 = result
ebbafcb4
LM
130 ::"m"(in2[-2*k]), "m"(in1[2*k]),
131 "m"(tcos[k]), "m"(tsin[k])
25e4f8aa
ZM
132#ifndef ARCH_X86_64
133 ,"m"(*p1m1p1m1)
134#endif
ebbafcb4
LM
135 );
136 /* Should be in the same block, hack for gcc2.95 & gcc3 */
137 asm (
138 "movlps %%xmm0, %0 \n\t"
139 "movhps %%xmm0, %1 \n\t"
25e4f8aa
ZM
140 "movlps %%xmm4, %2 \n\t"
141 "movhps %%xmm4, %3 \n\t"
142 :"=m"(z[revtab[k]]), "=m"(z[revtab[k + 1]]),
143 "=m"(z[revtab[k + 2]]), "=m"(z[revtab[k + 3]])
ebbafcb4
LM
144 );
145 }
146
147 ff_fft_calc_sse(&s->fft, z);
148
25e4f8aa
ZM
149#ifndef ARCH_X86_64
150#undef P1M1P1M1
151#define P1M1P1M1 "%3"
152#endif
ebbafcb4
LM
153
154 /* post rotation + reordering */
25e4f8aa 155 for (k = 0; k < n4; k += 4) {
ebbafcb4
LM
156 asm (
157 "movaps %0, %%xmm0 \n\t" // xmm0 = i1 r1 i0 r0: z
ee387b57 158 "movaps 16+1*%0, %%xmm4 \n\t" // xmm4 = i1 r1 i0 r0: z
ebbafcb4 159 "movlps %1, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos
ee387b57 160 "movlps 8+1*%1, %%xmm5 \n\t" // xmm5 = X X R1 R0: tcos
ebbafcb4 161 "movaps %%xmm0, %%xmm3 \n\t" // xmm3 = i1 r1 i0 r0
25e4f8aa 162 "movaps %%xmm4, %%xmm7 \n\t" // xmm7 = i1 r1 i0 r0
ebbafcb4 163 "movlps %2, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin
ee387b57 164 "movlps 8+1*%2, %%xmm6 \n\t" // xmm6 = X X I1 I0: tsin
ebbafcb4
LM
165 "shufps $160,%%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0
166 "shufps $245,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0
25e4f8aa
ZM
167 "shufps $160,%%xmm4, %%xmm4 \n\t" // xmm4 = r1 r1 r0 r0
168 "shufps $245,%%xmm7, %%xmm7 \n\t" // xmm7 = i1 i1 i0 i0
ebbafcb4 169 "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0
25e4f8aa 170 "unpcklps %%xmm6, %%xmm5 \n\t" // xmm5 = I1 R1 I0 R0
ebbafcb4 171 "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0
25e4f8aa
ZM
172 "movaps %%xmm5, %%xmm6 \n\t" // xmm6 = I1 R1 I0 R0
173 "xorps "P1M1P1M1", %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0
ebbafcb4 174 "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR
25e4f8aa
ZM
175 "xorps "P1M1P1M1", %%xmm6 \n\t" // xmm6 = -I1 R1 -I0 R0
176 "mulps %%xmm5, %%xmm4 \n\t" // xmm4 = rI rR rI rR
ebbafcb4 177 "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0
25e4f8aa 178 "shufps $177,%%xmm6, %%xmm6 \n\t" // xmm6 = R1 -I1 R0 -I0
ebbafcb4 179 "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii
25e4f8aa 180 "mulps %%xmm6, %%xmm7 \n\t" // xmm7 = Ri -Ii Ri -Ii
ebbafcb4 181 "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result
25e4f8aa 182 "addps %%xmm7, %%xmm4 \n\t" // xmm4 = result
ebbafcb4 183 "movaps %%xmm0, %0 \n\t"
ee387b57 184 "movaps %%xmm4, 16+1*%0\n\t"
ebbafcb4
LM
185 :"+m"(z[k])
186 :"m"(tcos[k]), "m"(tsin[k])
25e4f8aa
ZM
187#ifndef ARCH_X86_64
188 ,"m"(*p1m1p1m1)
189#endif
ebbafcb4
LM
190 );
191 }
b9fa3208
LM
192}
193
194void ff_imdct_calc_sse(MDCTContext *s, FFTSample *output,
195 const FFTSample *input, FFTSample *tmp)
196{
197 x86_reg k;
198 long n8, n2, n;
199 FFTComplex *z = (FFTComplex *)tmp;
200
201 n = 1 << s->nbits;
202 n2 = n >> 1;
203 n8 = n >> 3;
204
205 imdct_sse(s, input, tmp);
ebbafcb4
LM
206
207 /*
208 Mnemonics:
209 0 = z[k].re
210 1 = z[k].im
211 2 = z[k + 1].re
212 3 = z[k + 1].im
213 4 = z[-k - 2].re
214 5 = z[-k - 2].im
215 6 = z[-k - 1].re
216 7 = z[-k - 1].im
217 */
218 k = 16-n;
219 asm volatile("movaps %0, %%xmm7 \n\t"::"m"(*m1m1m1m1));
220 asm volatile(
221 "1: \n\t"
222 "movaps -16(%4,%0), %%xmm1 \n\t" // xmm1 = 4 5 6 7 = z[-2-k]
223 "neg %0 \n\t"
224 "movaps (%4,%0), %%xmm0 \n\t" // xmm0 = 0 1 2 3 = z[k]
225 "xorps %%xmm7, %%xmm0 \n\t" // xmm0 = -0 -1 -2 -3
226 "movaps %%xmm0, %%xmm2 \n\t" // xmm2 = -0 -1 -2 -3
227 "shufps $141,%%xmm1, %%xmm0 \n\t" // xmm0 = -1 -3 4 6
228 "shufps $216,%%xmm1, %%xmm2 \n\t" // xmm2 = -0 -2 5 7
229 "shufps $156,%%xmm0, %%xmm0 \n\t" // xmm0 = -1 6 -3 4 !
230 "shufps $156,%%xmm2, %%xmm2 \n\t" // xmm2 = -0 7 -2 5 !
231 "movaps %%xmm0, (%1,%0) \n\t" // output[2*k]
232 "movaps %%xmm2, (%2,%0) \n\t" // output[n2+2*k]
233 "neg %0 \n\t"
234 "shufps $27, %%xmm0, %%xmm0 \n\t" // xmm0 = 4 -3 6 -1
235 "xorps %%xmm7, %%xmm0 \n\t" // xmm0 = -4 3 -6 1 !
236 "shufps $27, %%xmm2, %%xmm2 \n\t" // xmm2 = 5 -2 7 -0 !
237 "movaps %%xmm0, -16(%2,%0) \n\t" // output[n2-4-2*k]
238 "movaps %%xmm2, -16(%3,%0) \n\t" // output[n-4-2*k]
239 "add $16, %0 \n\t"
240 "jle 1b \n\t"
241 :"+r"(k)
242 :"r"(output), "r"(output+n2), "r"(output+n), "r"(z+n8)
243 :"memory"
244 );
245}
246
b9fa3208
LM
247void ff_imdct_half_sse(MDCTContext *s, FFTSample *output,
248 const FFTSample *input, FFTSample *tmp)
249{
250 x86_reg j, k;
251 long n8, n4, n;
252 FFTComplex *z = (FFTComplex *)tmp;
253
254 n = 1 << s->nbits;
255 n4 = n >> 2;
256 n8 = n >> 3;
257
258 imdct_sse(s, input, tmp);
259
260 j = -n;
261 k = n-16;
262 asm volatile("movaps %0, %%xmm7 \n\t"::"m"(*m1m1m1m1));
263 asm volatile(
264 "1: \n\t"
265 "movaps (%3,%1), %%xmm0 \n\t"
266 "movaps (%3,%0), %%xmm1 \n\t"
267 "xorps %%xmm7, %%xmm0 \n\t"
268 "movaps %%xmm0, %%xmm2 \n\t"
269 "shufps $141,%%xmm1, %%xmm0 \n\t"
270 "shufps $216,%%xmm1, %%xmm2 \n\t"
271 "shufps $54, %%xmm0, %%xmm0 \n\t"
272 "shufps $156,%%xmm2, %%xmm2 \n\t"
273 "xorps %%xmm7, %%xmm0 \n\t"
274 "movaps %%xmm2, (%2,%1) \n\t"
275 "movaps %%xmm0, (%2,%0) \n\t"
276 "sub $16, %1 \n\t"
277 "add $16, %0 \n\t"
278 "jl 1b \n\t"
279 :"+r"(j), "+r"(k)
280 :"r"(output+n4), "r"(z+n8)
281 :"memory"
282 );
283}
284