1 // SPDX-License-Identifier: GPL-2.0-only
2 /* -*- linux-c -*- ------------------------------------------------------- *
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright 2007 rPath, Inc. - All Rights Reserved
6 *
7 * ----------------------------------------------------------------------- */
8
9 /*
10 * Check for obligatory CPU features and abort if the features are not
11 * present. This code should be compilable as 16-, 32- or 64-bit
12 * code, so be very careful with types and inline assembly.
13 *
14 * This code should not contain any messages; that requires an
15 * additional wrapper.
16 *
17 * As written, this code is not safe for inclusion into the kernel
18 * proper (after FPU initialization, in particular).
19 */
20
21 #ifdef _SETUP
22 # include "boot.h"
23 #endif
24 #include <linux/types.h>
25 #include <asm/intel-family.h>
26 #include <asm/processor-flags.h>
27 #include <asm/required-features.h>
28 #include <asm/msr-index.h>
29 #include "string.h"
30 #include "msr.h"
31
32 static u32 err_flags[NCAPINTS];
33
34 static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
35
36 static const u32 req_flags[NCAPINTS] =
37 {
38 REQUIRED_MASK0,
39 REQUIRED_MASK1,
40 0, /* REQUIRED_MASK2 not implemented in this file */
41 0, /* REQUIRED_MASK3 not implemented in this file */
42 REQUIRED_MASK4,
43 0, /* REQUIRED_MASK5 not implemented in this file */
44 REQUIRED_MASK6,
45 0, /* REQUIRED_MASK7 not implemented in this file */
46 0, /* REQUIRED_MASK8 not implemented in this file */
47 0, /* REQUIRED_MASK9 not implemented in this file */
48 0, /* REQUIRED_MASK10 not implemented in this file */
49 0, /* REQUIRED_MASK11 not implemented in this file */
50 0, /* REQUIRED_MASK12 not implemented in this file */
51 0, /* REQUIRED_MASK13 not implemented in this file */
52 0, /* REQUIRED_MASK14 not implemented in this file */
53 0, /* REQUIRED_MASK15 not implemented in this file */
54 REQUIRED_MASK16,
55 };
56
57 #define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
58
is_amd(void)59 static int is_amd(void)
60 {
61 return cpu_vendor[0] == A32('A', 'u', 't', 'h') &&
62 cpu_vendor[1] == A32('e', 'n', 't', 'i') &&
63 cpu_vendor[2] == A32('c', 'A', 'M', 'D');
64 }
65
is_centaur(void)66 static int is_centaur(void)
67 {
68 return cpu_vendor[0] == A32('C', 'e', 'n', 't') &&
69 cpu_vendor[1] == A32('a', 'u', 'r', 'H') &&
70 cpu_vendor[2] == A32('a', 'u', 'l', 's');
71 }
72
is_transmeta(void)73 static int is_transmeta(void)
74 {
75 return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
76 cpu_vendor[1] == A32('i', 'n', 'e', 'T') &&
77 cpu_vendor[2] == A32('M', 'x', '8', '6');
78 }
79
is_intel(void)80 static int is_intel(void)
81 {
82 return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
83 cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
84 cpu_vendor[2] == A32('n', 't', 'e', 'l');
85 }
86
87 /* Returns a bitmask of which words we have error bits in */
check_cpuflags(void)88 static int check_cpuflags(void)
89 {
90 u32 err;
91 int i;
92
93 err = 0;
94 for (i = 0; i < NCAPINTS; i++) {
95 err_flags[i] = req_flags[i] & ~cpu.flags[i];
96 if (err_flags[i])
97 err |= 1 << i;
98 }
99
100 return err;
101 }
102
103 /*
104 * Returns -1 on error.
105 *
106 * *cpu_level is set to the current CPU level; *req_level to the required
107 * level. x86-64 is considered level 64 for this purpose.
108 *
109 * *err_flags_ptr is set to the flags error array if there are flags missing.
110 */
check_cpu(int * cpu_level_ptr,int * req_level_ptr,u32 ** err_flags_ptr)111 int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
112 {
113 int err;
114
115 memset(&cpu.flags, 0, sizeof(cpu.flags));
116 cpu.level = 3;
117
118 if (has_eflag(X86_EFLAGS_AC))
119 cpu.level = 4;
120
121 get_cpuflags();
122 err = check_cpuflags();
123
124 if (test_bit(X86_FEATURE_LM, cpu.flags))
125 cpu.level = 64;
126
127 if (err == 0x01 &&
128 !(err_flags[0] &
129 ~((1 << X86_FEATURE_XMM)|(1 << X86_FEATURE_XMM2))) &&
130 is_amd()) {
131 /* If this is an AMD and we're only missing SSE+SSE2, try to
132 turn them on */
133
134 struct msr m;
135
136 boot_rdmsr(MSR_K7_HWCR, &m);
137 m.l &= ~(1 << 15);
138 boot_wrmsr(MSR_K7_HWCR, &m);
139
140 get_cpuflags(); /* Make sure it really did something */
141 err = check_cpuflags();
142 } else if (err == 0x01 &&
143 !(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
144 is_centaur() && cpu.model >= 6) {
145 /* If this is a VIA C3, we might have to enable CX8
146 explicitly */
147
148 struct msr m;
149
150 boot_rdmsr(MSR_VIA_FCR, &m);
151 m.l |= (1 << 1) | (1 << 7);
152 boot_wrmsr(MSR_VIA_FCR, &m);
153
154 set_bit(X86_FEATURE_CX8, cpu.flags);
155 err = check_cpuflags();
156 } else if (err == 0x01 && is_transmeta()) {
157 /* Transmeta might have masked feature bits in word 0 */
158
159 struct msr m, m_tmp;
160 u32 level = 1;
161
162 boot_rdmsr(0x80860004, &m);
163 m_tmp = m;
164 m_tmp.l = ~0;
165 boot_wrmsr(0x80860004, &m_tmp);
166 asm("cpuid"
167 : "+a" (level), "=d" (cpu.flags[0])
168 : : "ecx", "ebx");
169 boot_wrmsr(0x80860004, &m);
170
171 err = check_cpuflags();
172 } else if (err == 0x01 &&
173 !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
174 is_intel() && cpu.level == 6 &&
175 (cpu.model == 9 || cpu.model == 13)) {
176 /* PAE is disabled on this Pentium M but can be forced */
177 if (cmdline_find_option_bool("forcepae")) {
178 puts("WARNING: Forcing PAE in CPU flags\n");
179 set_bit(X86_FEATURE_PAE, cpu.flags);
180 err = check_cpuflags();
181 }
182 else {
183 puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
184 }
185 }
186 if (!err)
187 err = check_knl_erratum();
188
189 if (err_flags_ptr)
190 *err_flags_ptr = err ? err_flags : NULL;
191 if (cpu_level_ptr)
192 *cpu_level_ptr = cpu.level;
193 if (req_level_ptr)
194 *req_level_ptr = req_level;
195
196 return (cpu.level < req_level || err) ? -1 : 0;
197 }
198
check_knl_erratum(void)199 int check_knl_erratum(void)
200 {
201 /*
202 * First check for the affected model/family:
203 */
204 if (!is_intel() ||
205 cpu.family != 6 ||
206 cpu.model != 0x57 /*INTEL_XEON_PHI_KNL*/)
207 return 0;
208
209 /*
210 * This erratum affects the Accessed/Dirty bits, and can
211 * cause stray bits to be set in !Present PTEs. We have
212 * enough bits in our 64-bit PTEs (which we have on real
213 * 64-bit mode or PAE) to avoid using these troublesome
214 * bits. But, we do not have enough space in our 32-bit
215 * PTEs. So, refuse to run on 32-bit non-PAE kernels.
216 */
217 if (IS_ENABLED(CONFIG_X86_64) || IS_ENABLED(CONFIG_X86_PAE))
218 return 0;
219
220 puts("This 32-bit kernel can not run on this Xeon Phi x200\n"
221 "processor due to a processor erratum. Use a 64-bit\n"
222 "kernel, or enable PAE in this 32-bit kernel.\n\n");
223
224 return -1;
225 }
226
227
228