x86: check for AV_CPU_FLAG_AVXSLOW where useful
[libav.git] / libavutil / x86 / float_dsp_init.c
CommitLineData
d5a7229b
JR
1/*
2 * This file is part of Libav.
3 *
4 * Libav is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * Libav is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with Libav; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#include "config.h"
20
1fda184a 21#include "libavutil/attributes.h"
d5a7229b
JR
22#include "libavutil/cpu.h"
23#include "libavutil/float_dsp.h"
e0c6cce4 24#include "cpu.h"
e034cc6c 25#include "asm.h"
d5a7229b 26
b6649ab5
DB
27void ff_vector_fmul_sse(float *dst, const float *src0, const float *src1,
28 int len);
29void ff_vector_fmul_avx(float *dst, const float *src0, const float *src1,
30 int len);
31
32void ff_vector_fmac_scalar_sse(float *dst, const float *src, float mul,
d5a7229b 33 int len);
b6649ab5 34void ff_vector_fmac_scalar_avx(float *dst, const float *src, float mul,
d5a7229b
JR
35 int len);
36
b6649ab5
DB
37void ff_vector_fmul_scalar_sse(float *dst, const float *src, float mul,
38 int len);
947f9336 39
b6649ab5
DB
40void ff_vector_dmul_scalar_sse2(double *dst, const double *src,
41 double mul, int len);
42void ff_vector_dmul_scalar_avx(double *dst, const double *src,
43 double mul, int len);
ac7eb4cb 44
55aa03b9
RB
45void ff_vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
46 const float *src2, int len);
47void ff_vector_fmul_add_avx(float *dst, const float *src0, const float *src1,
48 const float *src2, int len);
49
42d32469
RB
50void ff_vector_fmul_reverse_sse(float *dst, const float *src0,
51 const float *src1, int len);
52void ff_vector_fmul_reverse_avx(float *dst, const float *src0,
53 const float *src1, int len);
54
d56668bd
RB
55float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
56
566b7a20
CG
57void ff_butterflies_float_sse(float *src0, float *src1, int len);
58
973b4d44 59#if HAVE_6REGS && HAVE_INLINE_ASM
e034cc6c
JR
60static void vector_fmul_window_3dnowext(float *dst, const float *src0,
61 const float *src1, const float *win,
62 int len)
63{
64 x86_reg i = -len * 4;
65 x86_reg j = len * 4 - 8;
66 __asm__ volatile (
67 "1: \n"
68 "pswapd (%5, %1), %%mm1 \n"
69 "movq (%5, %0), %%mm0 \n"
70 "pswapd (%4, %1), %%mm5 \n"
71 "movq (%3, %0), %%mm4 \n"
72 "movq %%mm0, %%mm2 \n"
73 "movq %%mm1, %%mm3 \n"
74 "pfmul %%mm4, %%mm2 \n" // src0[len + i] * win[len + i]
75 "pfmul %%mm5, %%mm3 \n" // src1[j] * win[len + j]
76 "pfmul %%mm4, %%mm1 \n" // src0[len + i] * win[len + j]
77 "pfmul %%mm5, %%mm0 \n" // src1[j] * win[len + i]
78 "pfadd %%mm3, %%mm2 \n"
79 "pfsub %%mm0, %%mm1 \n"
80 "pswapd %%mm2, %%mm2 \n"
81 "movq %%mm1, (%2, %0) \n"
82 "movq %%mm2, (%2, %1) \n"
83 "sub $8, %1 \n"
84 "add $8, %0 \n"
85 "jl 1b \n"
86 "femms \n"
87 : "+r"(i), "+r"(j)
88 : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
89 );
90}
91
92static void vector_fmul_window_sse(float *dst, const float *src0,
93 const float *src1, const float *win, int len)
94{
95 x86_reg i = -len * 4;
96 x86_reg j = len * 4 - 16;
97 __asm__ volatile (
98 "1: \n"
99 "movaps (%5, %1), %%xmm1 \n"
100 "movaps (%5, %0), %%xmm0 \n"
101 "movaps (%4, %1), %%xmm5 \n"
102 "movaps (%3, %0), %%xmm4 \n"
103 "shufps $0x1b, %%xmm1, %%xmm1 \n"
104 "shufps $0x1b, %%xmm5, %%xmm5 \n"
105 "movaps %%xmm0, %%xmm2 \n"
106 "movaps %%xmm1, %%xmm3 \n"
107 "mulps %%xmm4, %%xmm2 \n" // src0[len + i] * win[len + i]
108 "mulps %%xmm5, %%xmm3 \n" // src1[j] * win[len + j]
109 "mulps %%xmm4, %%xmm1 \n" // src0[len + i] * win[len + j]
110 "mulps %%xmm5, %%xmm0 \n" // src1[j] * win[len + i]
111 "addps %%xmm3, %%xmm2 \n"
112 "subps %%xmm0, %%xmm1 \n"
113 "shufps $0x1b, %%xmm2, %%xmm2 \n"
114 "movaps %%xmm1, (%2, %0) \n"
115 "movaps %%xmm2, (%2, %1) \n"
116 "sub $16, %1 \n"
117 "add $16, %0 \n"
118 "jl 1b \n"
119 : "+r"(i), "+r"(j)
120 : "r"(dst + len), "r"(src0 + len), "r"(src1), "r"(win + len)
121 );
122}
973b4d44 123#endif /* HAVE_6REGS && HAVE_INLINE_ASM */
e034cc6c 124
1fda184a 125av_cold void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp)
d5a7229b 126{
3ac7fa81 127 int cpu_flags = av_get_cpu_flags();
d5a7229b 128
973b4d44 129#if HAVE_6REGS && HAVE_INLINE_ASM
3ac7fa81 130 if (INLINE_AMD3DNOWEXT(cpu_flags)) {
e034cc6c
JR
131 fdsp->vector_fmul_window = vector_fmul_window_3dnowext;
132 }
3ac7fa81 133 if (INLINE_SSE(cpu_flags)) {
e034cc6c
JR
134 fdsp->vector_fmul_window = vector_fmul_window_sse;
135 }
136#endif
3ac7fa81 137 if (EXTERNAL_SSE(cpu_flags)) {
d5a7229b 138 fdsp->vector_fmul = ff_vector_fmul_sse;
82b2df97 139 fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_sse;
947f9336 140 fdsp->vector_fmul_scalar = ff_vector_fmul_scalar_sse;
55aa03b9 141 fdsp->vector_fmul_add = ff_vector_fmul_add_sse;
42d32469 142 fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_sse;
d56668bd 143 fdsp->scalarproduct_float = ff_scalarproduct_float_sse;
566b7a20 144 fdsp->butterflies_float = ff_butterflies_float_sse;
d5a7229b 145 }
3ac7fa81 146 if (EXTERNAL_SSE2(cpu_flags)) {
ac7eb4cb
JR
147 fdsp->vector_dmul_scalar = ff_vector_dmul_scalar_sse2;
148 }
d68c0538 149 if (EXTERNAL_AVX_FAST(cpu_flags)) {
d5a7229b 150 fdsp->vector_fmul = ff_vector_fmul_avx;
82b2df97 151 fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_avx;
ac7eb4cb 152 fdsp->vector_dmul_scalar = ff_vector_dmul_scalar_avx;
55aa03b9 153 fdsp->vector_fmul_add = ff_vector_fmul_add_avx;
42d32469 154 fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
d5a7229b 155 }
d5a7229b 156}