1 /* Declare dependencies between CPUIDs */
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <asm/cpufeature.h>
6
7 struct cpuid_dep {
8 unsigned int feature;
9 unsigned int depends;
10 };
11
12 /*
13 * Table of CPUID features that depend on others.
14 *
15 * This only includes dependencies that can be usefully disabled, not
16 * features part of the base set (like FPU).
17 *
18 * Note this all is not __init / __initdata because it can be
19 * called from cpu hotplug. It shouldn't do anything in this case,
20 * but it's difficult to tell that to the init reference checker.
21 */
22 static const struct cpuid_dep cpuid_deps[] = {
23 { X86_FEATURE_FXSR, X86_FEATURE_FPU },
24 { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE },
25 { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE },
26 { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE },
27 { X86_FEATURE_AVX, X86_FEATURE_XSAVE },
28 { X86_FEATURE_PKU, X86_FEATURE_XSAVE },
29 { X86_FEATURE_MPX, X86_FEATURE_XSAVE },
30 { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE },
31 { X86_FEATURE_CMOV, X86_FEATURE_FXSR },
32 { X86_FEATURE_MMX, X86_FEATURE_FXSR },
33 { X86_FEATURE_MMXEXT, X86_FEATURE_MMX },
34 { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR },
35 { X86_FEATURE_XSAVE, X86_FEATURE_FXSR },
36 { X86_FEATURE_XMM, X86_FEATURE_FXSR },
37 { X86_FEATURE_XMM2, X86_FEATURE_XMM },
38 { X86_FEATURE_XMM3, X86_FEATURE_XMM2 },
39 { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 },
40 { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 },
41 { X86_FEATURE_XMM3, X86_FEATURE_XMM2 },
42 { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 },
43 { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, },
44 { X86_FEATURE_F16C, X86_FEATURE_XMM2, },
45 { X86_FEATURE_AES, X86_FEATURE_XMM2 },
46 { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
47 { X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
48 { X86_FEATURE_AVX_VNNI, X86_FEATURE_AVX },
49 { X86_FEATURE_FMA, X86_FEATURE_AVX },
50 { X86_FEATURE_VAES, X86_FEATURE_AVX },
51 { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
52 { X86_FEATURE_AVX2, X86_FEATURE_AVX, },
53 { X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
54 { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
55 { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F },
56 { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F },
57 { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F },
58 { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F },
59 { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F },
60 { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
61 { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
62 { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
63 { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
64 { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
65 { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
66 { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F },
67 { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F },
68 { X86_FEATURE_AVX512_VP2INTERSECT, X86_FEATURE_AVX512VL },
69 { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC },
70 { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC },
71 { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC },
72 { X86_FEATURE_BMEC, X86_FEATURE_CQM_MBM_TOTAL },
73 { X86_FEATURE_BMEC, X86_FEATURE_CQM_MBM_LOCAL },
74 { X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL },
75 { X86_FEATURE_AVX512_FP16, X86_FEATURE_AVX512BW },
76 { X86_FEATURE_ENQCMD, X86_FEATURE_XSAVES },
77 { X86_FEATURE_PER_THREAD_MBA, X86_FEATURE_MBA },
78 { X86_FEATURE_SGX_LC, X86_FEATURE_SGX },
79 { X86_FEATURE_SGX1, X86_FEATURE_SGX },
80 { X86_FEATURE_SGX2, X86_FEATURE_SGX1 },
81 { X86_FEATURE_SGX_EDECCSSA, X86_FEATURE_SGX1 },
82 { X86_FEATURE_XFD, X86_FEATURE_XSAVES },
83 { X86_FEATURE_XFD, X86_FEATURE_XGETBV1 },
84 { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD },
85 { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES },
86 { X86_FEATURE_FRED, X86_FEATURE_LKGS },
87 {}
88 };
89
clear_feature(struct cpuinfo_x86 * c,unsigned int feature)90 static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature)
91 {
92 /*
93 * Note: This could use the non atomic __*_bit() variants, but the
94 * rest of the cpufeature code uses atomics as well, so keep it for
95 * consistency. Cleanup all of it separately.
96 */
97 if (!c) {
98 clear_cpu_cap(&boot_cpu_data, feature);
99 set_bit(feature, (unsigned long *)cpu_caps_cleared);
100 } else {
101 clear_bit(feature, (unsigned long *)c->x86_capability);
102 }
103 }
104
105 /* Take the capabilities and the BUG bits into account */
106 #define MAX_FEATURE_BITS ((NCAPINTS + NBUGINTS) * sizeof(u32) * 8)
107
do_clear_cpu_cap(struct cpuinfo_x86 * c,unsigned int feature)108 static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
109 {
110 DECLARE_BITMAP(disable, MAX_FEATURE_BITS);
111 const struct cpuid_dep *d;
112 bool changed;
113
114 if (WARN_ON(feature >= MAX_FEATURE_BITS))
115 return;
116
117 if (boot_cpu_has(feature))
118 WARN_ON(alternatives_patched);
119
120 clear_feature(c, feature);
121
122 /* Collect all features to disable, handling dependencies */
123 memset(disable, 0, sizeof(disable));
124 __set_bit(feature, disable);
125
126 /* Loop until we get a stable state. */
127 do {
128 changed = false;
129 for (d = cpuid_deps; d->feature; d++) {
130 if (!test_bit(d->depends, disable))
131 continue;
132 if (__test_and_set_bit(d->feature, disable))
133 continue;
134
135 changed = true;
136 clear_feature(c, d->feature);
137 }
138 } while (changed);
139 }
140
clear_cpu_cap(struct cpuinfo_x86 * c,unsigned int feature)141 void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
142 {
143 do_clear_cpu_cap(c, feature);
144 }
145
setup_clear_cpu_cap(unsigned int feature)146 void setup_clear_cpu_cap(unsigned int feature)
147 {
148 do_clear_cpu_cap(NULL, feature);
149 }
150