cpu: split flag checks per arch in av_cpu_max_align()
[libav.git] / libavutil / x86 / cpu.c
CommitLineData
04d7f601
DB
1/*
2 * CPU detection code, extracted from mmx.h
3 * (c)1997-99 by H. Dietz and R. Fisher
4 * Converted to C and improved by Fabrice Bellard.
5 *
2912e87a 6 * This file is part of Libav.
b78e7197 7 *
2912e87a 8 * Libav is free software; you can redistribute it and/or
04d7f601
DB
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
b78e7197 11 * version 2.1 of the License, or (at your option) any later version.
04d7f601 12 *
2912e87a 13 * Libav is distributed in the hope that it will be useful,
04d7f601
DB
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
2912e87a 19 * License along with Libav; if not, write to the Free Software
04d7f601
DB
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
de6d9b64
FB
22
23#include <stdlib.h>
65d45cea 24#include <string.h>
1f6d8699 25
c318626c 26#include "libavutil/x86/asm.h"
1f6d8699 27#include "libavutil/x86/cpu.h"
c6c98d08 28#include "libavutil/cpu.h"
b78b10c4 29#include "libavutil/cpu_internal.h"
ade6e7f3 30
39e208f4 31#if HAVE_X86ASM
1f6d8699
DB
32
33#define cpuid(index, eax, ebx, ecx, edx) \
34 ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)
35
36#define xgetbv(index, eax, edx) \
37 ff_cpu_xgetbv(index, &eax, &edx)
38
39#elif HAVE_INLINE_ASM
40
1d20b11a 41/* ebx saving is necessary for PIC. gcc seems unable to see it alone */
963cdf39
MR
42#define cpuid(index, eax, ebx, ecx, edx) \
43 __asm__ volatile ( \
1e9c5bf4
DB
44 "mov %%"FF_REG_b", %%"FF_REG_S" \n\t" \
45 "cpuid \n\t" \
46 "xchg %%"FF_REG_b", %%"FF_REG_S \
963cdf39
MR
47 : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx) \
48 : "0" (index))
c0ee695b 49
963cdf39 50#define xgetbv(index, eax, edx) \
ef669538 51 __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
358d854d 52
889c1ec4
MR
53#define get_eflags(x) \
54 __asm__ volatile ("pushfl \n" \
55 "pop %0 \n" \
56 : "=r"(x))
57
58#define set_eflags(x) \
59 __asm__ volatile ("push %0 \n" \
60 "popfl \n" \
61 :: "r"(x))
62
358d854d
RB
63#endif /* HAVE_INLINE_ASM */
64
54b24314
DB
65#if ARCH_X86_64
66
67#define cpuid_test() 1
115329f1 68
39e208f4 69#elif HAVE_X86ASM
1f6d8699
DB
70
71#define cpuid_test ff_cpu_cpuid_test
72
490df522 73#elif HAVE_INLINE_ASM
54b24314
DB
74
75static int cpuid_test(void)
76{
d05f808d 77 x86_reg a, c;
115329f1 78
889c1ec4
MR
79 /* Check if CPUID is supported by attempting to toggle the ID bit in
80 * the EFLAGS register. */
81 get_eflags(a);
82 set_eflags(a ^ 0x200000);
83 get_eflags(c);
115329f1 84
54b24314
DB
85 return a != c;
86}
d05f808d 87#endif
de6d9b64 88
54b24314
DB
89/* Function to test if multimedia instructions are supported... */
90int ff_get_cpu_flags_x86(void)
91{
92 int rval = 0;
a7329e5f
DB
93
94#ifdef cpuid
95
54b24314
DB
96 int eax, ebx, ecx, edx;
97 int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
98 int family = 0, model = 0;
99 union { int i[3]; char c[12]; } vendor;
100
101 if (!cpuid_test())
102 return 0; /* CPUID not supported */
103
cc5e9e5f 104 cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
e42a152b 105
963cdf39 106 if (max_std_level >= 1) {
e42a152b 107 cpuid(1, eax, ebx, ecx, std_caps);
963cdf39
MR
108 family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
109 model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
65345a5a
DB
110 if (std_caps & (1 << 15))
111 rval |= AV_CPU_FLAG_CMOV;
963cdf39 112 if (std_caps & (1 << 23))
7160bb71 113 rval |= AV_CPU_FLAG_MMX;
963cdf39 114 if (std_caps & (1 << 25))
239fdf1b 115 rval |= AV_CPU_FLAG_MMXEXT;
b250f9c6 116#if HAVE_SSE
963cdf39
MR
117 if (std_caps & (1 << 25))
118 rval |= AV_CPU_FLAG_SSE;
119 if (std_caps & (1 << 26))
7160bb71 120 rval |= AV_CPU_FLAG_SSE2;
392f6da8 121 if (ecx & 1)
7160bb71 122 rval |= AV_CPU_FLAG_SSE3;
5a5c770d 123 if (ecx & 0x00000200 )
7160bb71 124 rval |= AV_CPU_FLAG_SSSE3;
710441c2 125 if (ecx & 0x00080000 )
7160bb71 126 rval |= AV_CPU_FLAG_SSE4;
710441c2 127 if (ecx & 0x00100000 )
7160bb71 128 rval |= AV_CPU_FLAG_SSE42;
87f1355f
MR
129#if HAVE_AVX
130 /* Check OXSAVE and AVX bits */
131 if ((ecx & 0x18000000) == 0x18000000) {
132 /* Check for OS support */
133 xgetbv(0, eax, edx);
1b932eb1 134 if ((eax & 0x6) == 0x6) {
87f1355f 135 rval |= AV_CPU_FLAG_AVX;
1b932eb1
JA
136 if (ecx & 0x00001000)
137 rval |= AV_CPU_FLAG_FMA3;
138 }
87f1355f 139 }
d59fcdaf
JA
140#endif /* HAVE_AVX */
141#endif /* HAVE_SSE */
142 }
4d6ee072
KK
143 if (max_std_level >= 7) {
144 cpuid(7, eax, ebx, ecx, edx);
d59fcdaf
JA
145#if HAVE_AVX2
146 if (ebx & 0x00000020)
4d6ee072 147 rval |= AV_CPU_FLAG_AVX2;
4d6ee072 148#endif /* HAVE_AVX2 */
d59fcdaf
JA
149 /* BMI1/2 don't need OS support */
150 if (ebx & 0x00000008) {
151 rval |= AV_CPU_FLAG_BMI1;
152 if (ebx & 0x00000100)
153 rval |= AV_CPU_FLAG_BMI2;
154 }
e42a152b
MN
155 }
156
157 cpuid(0x80000000, max_ext_level, ebx, ecx, edx);
158
963cdf39 159 if (max_ext_level >= 0x80000001) {
e42a152b 160 cpuid(0x80000001, eax, ebx, ecx, ext_caps);
963cdf39 161 if (ext_caps & (1U << 31))
7160bb71 162 rval |= AV_CPU_FLAG_3DNOW;
963cdf39 163 if (ext_caps & (1 << 30))
7160bb71 164 rval |= AV_CPU_FLAG_3DNOWEXT;
963cdf39 165 if (ext_caps & (1 << 23))
7160bb71 166 rval |= AV_CPU_FLAG_MMX;
963cdf39 167 if (ext_caps & (1 << 22))
239fdf1b 168 rval |= AV_CPU_FLAG_MMXEXT;
74b1f968 169
f7cafb5d 170 if (!strncmp(vendor.c, "AuthenticAMD", 12)) {
74b1f968
JR
171 /* Allow for selectively disabling SSE2 functions on AMD processors
172 with SSE2 support but not SSE4a. This includes Athlon64, some
173 Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
174 than SSE2 often enough to utilize this special-case flag.
175 AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
176 so that SSE2 is used unless explicitly disabled by checking
177 AV_CPU_FLAG_SSE2SLOW. */
f7cafb5d
JA
178 if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040))
179 rval |= AV_CPU_FLAG_SSE2SLOW;
180
181 /* Similar to the above but for AVX functions on AMD processors.
182 This is necessary only for functions using YMM registers on Bulldozer
41ed7ab4 183 based CPUs as they lack 256-bit execution units. SSE/AVX functions
f7cafb5d
JA
184 using XMM registers are always faster on them.
185 AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is
186 used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW.
187 TODO: Confirm if Excavator is affected or not by this once it's
188 released, and update the check if necessary. Same for btver2. */
189 if (family == 0x15 && (rval & AV_CPU_FLAG_AVX))
190 rval |= AV_CPU_FLAG_AVXSLOW;
74b1f968 191 }
96a59cf3
JGG
192
193 /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
194 * used unless the OS has AVX support. */
195 if (rval & AV_CPU_FLAG_AVX) {
196 if (ecx & 0x00000800)
197 rval |= AV_CPU_FLAG_XOP;
198 if (ecx & 0x00010000)
199 rval |= AV_CPU_FLAG_FMA4;
200 }
de6d9b64 201 }
392f6da8 202
eba586b0
JR
203 if (!strncmp(vendor.c, "GenuineIntel", 12)) {
204 if (family == 6 && (model == 9 || model == 13 || model == 14)) {
963cdf39
MR
205 /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
206 * 6/14 (core1 "yonah") theoretically support sse2, but it's
207 * usually slower than mmx, so let's just pretend they don't.
208 * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
209 * enabled so that SSE2 is not used unless explicitly enabled
210 * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
211 * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
212 if (rval & AV_CPU_FLAG_SSE2)
213 rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2;
214 if (rval & AV_CPU_FLAG_SSE3)
215 rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3;
eba586b0
JR
216 }
217 /* The Atom processor has SSSE3 support, which is useful in many cases,
218 * but sometimes the SSSE3 version is slower than the SSE2 equivalent
219 * on the Atom, but is generally faster on other processors supporting
220 * SSSE3. This flag allows for selectively disabling certain SSSE3
221 * functions on the Atom. */
222 if (family == 6 && model == 28)
223 rval |= AV_CPU_FLAG_ATOM;
8e9cd81d
FG
224
225 /* Conroe has a slow shuffle unit. Check the model number to ensure not
226 * to include crippled low-end Penryns and Nehalems that lack SSE4. */
227 if ((rval & AV_CPU_FLAG_SSSE3) && !(rval & AV_CPU_FLAG_SSE4) &&
228 family == 6 && model < 23)
229 rval |= AV_CPU_FLAG_SSSE3SLOW;
6526976f
RB
230 }
231
a7329e5f
DB
232#endif /* cpuid */
233
e42a152b 234 return rval;
de6d9b64 235}
3d828c9f
JA
236
237size_t ff_get_cpu_max_align_x86(void)
238{
239 int flags = av_get_cpu_flags();
240
241 if (flags & AV_CPU_FLAG_AVX)
242 return 32;
243 if (flags & AV_CPU_FLAG_SSE)
244 return 16;
245 return 8;
246}