cpu: split flag checks per arch in av_cpu_max_align()
[libav.git] / libavutil / cpu.c
1 /*
2 * This file is part of Libav.
3 *
4 * Libav is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * Libav is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with Libav; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <stddef.h>
20 #include <stdint.h>
21 #include <stdatomic.h>
22
23 #include "cpu.h"
24 #include "cpu_internal.h"
25 #include "config.h"
26 #include "opt.h"
27 #include "common.h"
28
29 #if HAVE_SCHED_GETAFFINITY
30 #define _GNU_SOURCE
31 #include <sched.h>
32 #endif
33 #if HAVE_GETPROCESSAFFINITYMASK
34 #include <windows.h>
35 #endif
36 #if HAVE_SYSCTL
37 #if HAVE_SYS_PARAM_H
38 #include <sys/param.h>
39 #endif
40 #include <sys/types.h>
41 #include <sys/sysctl.h>
42 #endif
43 #if HAVE_SYSCONF
44 #include <unistd.h>
45 #endif
46
47 static atomic_int cpu_flags = ATOMIC_VAR_INIT(-1);
48
49 static int get_cpu_flags(void)
50 {
51 if (ARCH_AARCH64)
52 return ff_get_cpu_flags_aarch64();
53 if (ARCH_ARM)
54 return ff_get_cpu_flags_arm();
55 if (ARCH_PPC)
56 return ff_get_cpu_flags_ppc();
57 if (ARCH_X86)
58 return ff_get_cpu_flags_x86();
59 return 0;
60 }
61
62 int av_get_cpu_flags(void)
63 {
64 int flags = atomic_load_explicit(&cpu_flags, memory_order_relaxed);
65 if (flags == -1) {
66 flags = get_cpu_flags();
67 atomic_store_explicit(&cpu_flags, flags, memory_order_relaxed);
68 }
69 return flags;
70 }
71
72 void av_set_cpu_flags_mask(int mask)
73 {
74 atomic_store_explicit(&cpu_flags, get_cpu_flags() & mask,
75 memory_order_relaxed);
76 }
77
78 int av_parse_cpu_flags(const char *s)
79 {
80 #define CPUFLAG_MMXEXT (AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT | AV_CPU_FLAG_CMOV)
81 #define CPUFLAG_3DNOW (AV_CPU_FLAG_3DNOW | AV_CPU_FLAG_MMX)
82 #define CPUFLAG_3DNOWEXT (AV_CPU_FLAG_3DNOWEXT | CPUFLAG_3DNOW)
83 #define CPUFLAG_SSE (AV_CPU_FLAG_SSE | CPUFLAG_MMXEXT)
84 #define CPUFLAG_SSE2 (AV_CPU_FLAG_SSE2 | CPUFLAG_SSE)
85 #define CPUFLAG_SSE2SLOW (AV_CPU_FLAG_SSE2SLOW | CPUFLAG_SSE2)
86 #define CPUFLAG_SSE3 (AV_CPU_FLAG_SSE3 | CPUFLAG_SSE2)
87 #define CPUFLAG_SSE3SLOW (AV_CPU_FLAG_SSE3SLOW | CPUFLAG_SSE3)
88 #define CPUFLAG_SSSE3 (AV_CPU_FLAG_SSSE3 | CPUFLAG_SSE3)
89 #define CPUFLAG_SSE4 (AV_CPU_FLAG_SSE4 | CPUFLAG_SSSE3)
90 #define CPUFLAG_SSE42 (AV_CPU_FLAG_SSE42 | CPUFLAG_SSE4)
91 #define CPUFLAG_AVX (AV_CPU_FLAG_AVX | CPUFLAG_SSE42)
92 #define CPUFLAG_AVXSLOW (AV_CPU_FLAG_AVXSLOW | CPUFLAG_AVX)
93 #define CPUFLAG_XOP (AV_CPU_FLAG_XOP | CPUFLAG_AVX)
94 #define CPUFLAG_FMA3 (AV_CPU_FLAG_FMA3 | CPUFLAG_AVX)
95 #define CPUFLAG_FMA4 (AV_CPU_FLAG_FMA4 | CPUFLAG_AVX)
96 #define CPUFLAG_AVX2 (AV_CPU_FLAG_AVX2 | CPUFLAG_AVX)
97 #define CPUFLAG_BMI2 (AV_CPU_FLAG_BMI2 | AV_CPU_FLAG_BMI1)
98 static const AVOption cpuflags_opts[] = {
99 { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
100 #if ARCH_PPC
101 { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ALTIVEC }, .unit = "flags" },
102 #elif ARCH_X86
103 { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" },
104 { "mmxext" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_MMXEXT }, .unit = "flags" },
105 { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE }, .unit = "flags" },
106 { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE2 }, .unit = "flags" },
107 { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE2SLOW }, .unit = "flags" },
108 { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE3 }, .unit = "flags" },
109 { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE3SLOW }, .unit = "flags" },
110 { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSSE3 }, .unit = "flags" },
111 { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ATOM }, .unit = "flags" },
112 { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE4 }, .unit = "flags" },
113 { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE42 }, .unit = "flags" },
114 { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVX }, .unit = "flags" },
115 { "avxslow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVXSLOW }, .unit = "flags" },
116 { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_XOP }, .unit = "flags" },
117 { "fma3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_FMA3 }, .unit = "flags" },
118 { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_FMA4 }, .unit = "flags" },
119 { "avx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVX2 }, .unit = "flags" },
120 { "bmi1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI1 }, .unit = "flags" },
121 { "bmi2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_BMI2 }, .unit = "flags" },
122 { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_3DNOW }, .unit = "flags" },
123 { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_3DNOWEXT }, .unit = "flags" },
124 { "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" },
125 #elif ARCH_ARM
126 { "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV5TE }, .unit = "flags" },
127 { "armv6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6 }, .unit = "flags" },
128 { "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" },
129 { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
130 { "vfp_vm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP_VM }, .unit = "flags" },
131 { "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFPV3 }, .unit = "flags" },
132 { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
133 #elif ARCH_AARCH64
134 { "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" },
135 { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
136 { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
137 #endif
138 { NULL },
139 };
140 static const AVClass class = {
141 .class_name = "cpuflags",
142 .item_name = av_default_item_name,
143 .option = cpuflags_opts,
144 .version = LIBAVUTIL_VERSION_INT,
145 };
146
147 int flags = 0, ret;
148 const AVClass *pclass = &class;
149
150 if ((ret = av_opt_eval_flags(&pclass, &cpuflags_opts[0], s, &flags)) < 0)
151 return ret;
152
153 return flags & INT_MAX;
154 }
155
156 int av_cpu_count(void)
157 {
158 int nb_cpus = 1;
159 #if HAVE_SCHED_GETAFFINITY && defined(CPU_COUNT)
160 cpu_set_t cpuset;
161
162 CPU_ZERO(&cpuset);
163
164 if (!sched_getaffinity(0, sizeof(cpuset), &cpuset))
165 nb_cpus = CPU_COUNT(&cpuset);
166 #elif HAVE_GETPROCESSAFFINITYMASK
167 DWORD_PTR proc_aff, sys_aff;
168 if (GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff))
169 nb_cpus = av_popcount64(proc_aff);
170 #elif HAVE_SYSCTL && defined(HW_NCPU)
171 int mib[2] = { CTL_HW, HW_NCPU };
172 size_t len = sizeof(nb_cpus);
173
174 if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1)
175 nb_cpus = 0;
176 #elif HAVE_SYSCONF && defined(_SC_NPROC_ONLN)
177 nb_cpus = sysconf(_SC_NPROC_ONLN);
178 #elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
179 nb_cpus = sysconf(_SC_NPROCESSORS_ONLN);
180 #endif
181
182 return nb_cpus;
183 }
184
185 size_t av_cpu_max_align(void)
186 {
187 if (ARCH_AARCH64)
188 return ff_get_cpu_max_align_aarch64();
189 if (ARCH_ARM)
190 return ff_get_cpu_max_align_arm();
191 if (ARCH_PPC)
192 return ff_get_cpu_max_align_ppc();
193 if (ARCH_X86)
194 return ff_get_cpu_max_align_x86();
195 return 8;
196 }