xref: /linux/arch/x86/kernel/cpu/bugs.c (revision 140eb5227767c6754742020a16d2691222b9c19b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *	- Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *	- Channing Corn (tests & fixes),
9  *	- Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 
15 #include <asm/nospec-branch.h>
16 #include <asm/cmdline.h>
17 #include <asm/bugs.h>
18 #include <asm/processor.h>
19 #include <asm/processor-flags.h>
20 #include <asm/fpu/internal.h>
21 #include <asm/msr.h>
22 #include <asm/paravirt.h>
23 #include <asm/alternative.h>
24 #include <asm/pgtable.h>
25 #include <asm/set_memory.h>
26 #include <asm/intel-family.h>
27 
28 static void __init spectre_v2_select_mitigation(void);
29 
30 void __init check_bugs(void)
31 {
32 	identify_boot_cpu();
33 
34 	if (!IS_ENABLED(CONFIG_SMP)) {
35 		pr_info("CPU: ");
36 		print_cpu_info(&boot_cpu_data);
37 	}
38 
39 	/* Select the proper spectre mitigation before patching alternatives */
40 	spectre_v2_select_mitigation();
41 
42 #ifdef CONFIG_X86_32
43 	/*
44 	 * Check whether we are able to run this kernel safely on SMP.
45 	 *
46 	 * - i386 is no longer supported.
47 	 * - In order to run on anything without a TSC, we need to be
48 	 *   compiled for a i486.
49 	 */
50 	if (boot_cpu_data.x86 < 4)
51 		panic("Kernel requires i486+ for 'invlpg' and other features");
52 
53 	init_utsname()->machine[1] =
54 		'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
55 	alternative_instructions();
56 
57 	fpu__init_check_bugs();
58 #else /* CONFIG_X86_64 */
59 	alternative_instructions();
60 
61 	/*
62 	 * Make sure the first 2MB area is not mapped by huge pages
63 	 * There are typically fixed size MTRRs in there and overlapping
64 	 * MTRRs into large pages causes slow downs.
65 	 *
66 	 * Right now we don't do that with gbpages because there seems
67 	 * very little benefit for that case.
68 	 */
69 	if (!direct_gbpages)
70 		set_memory_4k((unsigned long)__va(0), 1);
71 #endif
72 }
73 
74 /* The kernel command line selection */
75 enum spectre_v2_mitigation_cmd {
76 	SPECTRE_V2_CMD_NONE,
77 	SPECTRE_V2_CMD_AUTO,
78 	SPECTRE_V2_CMD_FORCE,
79 	SPECTRE_V2_CMD_RETPOLINE,
80 	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
81 	SPECTRE_V2_CMD_RETPOLINE_AMD,
82 };
83 
84 static const char *spectre_v2_strings[] = {
85 	[SPECTRE_V2_NONE]			= "Vulnerable",
86 	[SPECTRE_V2_RETPOLINE_MINIMAL]		= "Vulnerable: Minimal generic ASM retpoline",
87 	[SPECTRE_V2_RETPOLINE_MINIMAL_AMD]	= "Vulnerable: Minimal AMD ASM retpoline",
88 	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline",
89 	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline",
90 };
91 
92 #undef pr_fmt
93 #define pr_fmt(fmt)     "Spectre V2 mitigation: " fmt
94 
95 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
96 
97 static void __init spec2_print_if_insecure(const char *reason)
98 {
99 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
100 		pr_info("%s\n", reason);
101 }
102 
103 static void __init spec2_print_if_secure(const char *reason)
104 {
105 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
106 		pr_info("%s\n", reason);
107 }
108 
109 static inline bool retp_compiler(void)
110 {
111 	return __is_defined(RETPOLINE);
112 }
113 
114 static inline bool match_option(const char *arg, int arglen, const char *opt)
115 {
116 	int len = strlen(opt);
117 
118 	return len == arglen && !strncmp(arg, opt, len);
119 }
120 
121 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
122 {
123 	char arg[20];
124 	int ret;
125 
126 	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
127 				  sizeof(arg));
128 	if (ret > 0)  {
129 		if (match_option(arg, ret, "off")) {
130 			goto disable;
131 		} else if (match_option(arg, ret, "on")) {
132 			spec2_print_if_secure("force enabled on command line.");
133 			return SPECTRE_V2_CMD_FORCE;
134 		} else if (match_option(arg, ret, "retpoline")) {
135 			spec2_print_if_insecure("retpoline selected on command line.");
136 			return SPECTRE_V2_CMD_RETPOLINE;
137 		} else if (match_option(arg, ret, "retpoline,amd")) {
138 			if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
139 				pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
140 				return SPECTRE_V2_CMD_AUTO;
141 			}
142 			spec2_print_if_insecure("AMD retpoline selected on command line.");
143 			return SPECTRE_V2_CMD_RETPOLINE_AMD;
144 		} else if (match_option(arg, ret, "retpoline,generic")) {
145 			spec2_print_if_insecure("generic retpoline selected on command line.");
146 			return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
147 		} else if (match_option(arg, ret, "auto")) {
148 			return SPECTRE_V2_CMD_AUTO;
149 		}
150 	}
151 
152 	if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
153 		return SPECTRE_V2_CMD_AUTO;
154 disable:
155 	spec2_print_if_insecure("disabled on command line.");
156 	return SPECTRE_V2_CMD_NONE;
157 }
158 
159 /* Check for Skylake-like CPUs (for RSB handling) */
160 static bool __init is_skylake_era(void)
161 {
162 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
163 	    boot_cpu_data.x86 == 6) {
164 		switch (boot_cpu_data.x86_model) {
165 		case INTEL_FAM6_SKYLAKE_MOBILE:
166 		case INTEL_FAM6_SKYLAKE_DESKTOP:
167 		case INTEL_FAM6_SKYLAKE_X:
168 		case INTEL_FAM6_KABYLAKE_MOBILE:
169 		case INTEL_FAM6_KABYLAKE_DESKTOP:
170 			return true;
171 		}
172 	}
173 	return false;
174 }
175 
176 static void __init spectre_v2_select_mitigation(void)
177 {
178 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
179 	enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
180 
181 	/*
182 	 * If the CPU is not affected and the command line mode is NONE or AUTO
183 	 * then nothing to do.
184 	 */
185 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
186 	    (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
187 		return;
188 
189 	switch (cmd) {
190 	case SPECTRE_V2_CMD_NONE:
191 		return;
192 
193 	case SPECTRE_V2_CMD_FORCE:
194 		/* FALLTRHU */
195 	case SPECTRE_V2_CMD_AUTO:
196 		goto retpoline_auto;
197 
198 	case SPECTRE_V2_CMD_RETPOLINE_AMD:
199 		if (IS_ENABLED(CONFIG_RETPOLINE))
200 			goto retpoline_amd;
201 		break;
202 	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
203 		if (IS_ENABLED(CONFIG_RETPOLINE))
204 			goto retpoline_generic;
205 		break;
206 	case SPECTRE_V2_CMD_RETPOLINE:
207 		if (IS_ENABLED(CONFIG_RETPOLINE))
208 			goto retpoline_auto;
209 		break;
210 	}
211 	pr_err("kernel not compiled with retpoline; no mitigation available!");
212 	return;
213 
214 retpoline_auto:
215 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
216 	retpoline_amd:
217 		if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
218 			pr_err("LFENCE not serializing. Switching to generic retpoline\n");
219 			goto retpoline_generic;
220 		}
221 		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
222 					 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
223 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
224 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
225 	} else {
226 	retpoline_generic:
227 		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
228 					 SPECTRE_V2_RETPOLINE_MINIMAL;
229 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
230 	}
231 
232 	spectre_v2_enabled = mode;
233 	pr_info("%s\n", spectre_v2_strings[mode]);
234 
235 	/*
236 	 * If neither SMEP or KPTI are available, there is a risk of
237 	 * hitting userspace addresses in the RSB after a context switch
238 	 * from a shallow call stack to a deeper one. To prevent this fill
239 	 * the entire RSB, even when using IBRS.
240 	 *
241 	 * Skylake era CPUs have a separate issue with *underflow* of the
242 	 * RSB, when they will predict 'ret' targets from the generic BTB.
243 	 * The proper mitigation for this is IBRS. If IBRS is not supported
244 	 * or deactivated in favour of retpolines the RSB fill on context
245 	 * switch is required.
246 	 */
247 	if ((!boot_cpu_has(X86_FEATURE_PTI) &&
248 	     !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
249 		setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
250 		pr_info("Filling RSB on context switch\n");
251 	}
252 }
253 
254 #undef pr_fmt
255 
256 #ifdef CONFIG_SYSFS
257 ssize_t cpu_show_meltdown(struct device *dev,
258 			  struct device_attribute *attr, char *buf)
259 {
260 	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
261 		return sprintf(buf, "Not affected\n");
262 	if (boot_cpu_has(X86_FEATURE_PTI))
263 		return sprintf(buf, "Mitigation: PTI\n");
264 	return sprintf(buf, "Vulnerable\n");
265 }
266 
267 ssize_t cpu_show_spectre_v1(struct device *dev,
268 			    struct device_attribute *attr, char *buf)
269 {
270 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
271 		return sprintf(buf, "Not affected\n");
272 	return sprintf(buf, "Vulnerable\n");
273 }
274 
275 ssize_t cpu_show_spectre_v2(struct device *dev,
276 			    struct device_attribute *attr, char *buf)
277 {
278 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
279 		return sprintf(buf, "Not affected\n");
280 
281 	return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
282 }
283 #endif
284