xref: /linux/arch/x86/kernel/cpu/bugs.c (revision 547c5775a742d9c83891b629b75d1d4c8e88d8c0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *	- Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *	- Channing Corn (tests & fixes),
9  *	- Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19 
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34 
35 #include "cpu.h"
36 
37 /*
38  * Speculation Vulnerability Handling
39  *
40  * Each vulnerability is handled with the following functions:
41  *   <vuln>_select_mitigation() -- Selects a mitigation to use.  This should
42  *				   take into account all relevant command line
43  *				   options.
44  *   <vuln>_update_mitigation() -- This is called after all vulnerabilities have
45  *				   selected a mitigation, in case the selection
46  *				   may want to change based on other choices
47  *				   made.  This function is optional.
48  *   <vuln>_apply_mitigation() -- Enable the selected mitigation.
49  *
50  * The compile-time mitigation in all cases should be AUTO.  An explicit
51  * command-line option can override AUTO.  If no such option is
52  * provided, <vuln>_select_mitigation() will override AUTO to the best
53  * mitigation option.
54  */
55 
56 static void __init spectre_v1_select_mitigation(void);
57 static void __init spectre_v1_apply_mitigation(void);
58 static void __init spectre_v2_select_mitigation(void);
59 static void __init spectre_v2_update_mitigation(void);
60 static void __init spectre_v2_apply_mitigation(void);
61 static void __init retbleed_select_mitigation(void);
62 static void __init retbleed_update_mitigation(void);
63 static void __init retbleed_apply_mitigation(void);
64 static void __init spectre_v2_user_select_mitigation(void);
65 static void __init spectre_v2_user_update_mitigation(void);
66 static void __init spectre_v2_user_apply_mitigation(void);
67 static void __init ssb_select_mitigation(void);
68 static void __init ssb_apply_mitigation(void);
69 static void __init l1tf_select_mitigation(void);
70 static void __init l1tf_apply_mitigation(void);
71 static void __init mds_select_mitigation(void);
72 static void __init mds_update_mitigation(void);
73 static void __init mds_apply_mitigation(void);
74 static void __init taa_select_mitigation(void);
75 static void __init taa_update_mitigation(void);
76 static void __init taa_apply_mitigation(void);
77 static void __init mmio_select_mitigation(void);
78 static void __init mmio_update_mitigation(void);
79 static void __init mmio_apply_mitigation(void);
80 static void __init rfds_select_mitigation(void);
81 static void __init rfds_update_mitigation(void);
82 static void __init rfds_apply_mitigation(void);
83 static void __init srbds_select_mitigation(void);
84 static void __init srbds_apply_mitigation(void);
85 static void __init l1d_flush_select_mitigation(void);
86 static void __init srso_select_mitigation(void);
87 static void __init srso_update_mitigation(void);
88 static void __init srso_apply_mitigation(void);
89 static void __init gds_select_mitigation(void);
90 static void __init gds_apply_mitigation(void);
91 static void __init bhi_select_mitigation(void);
92 static void __init bhi_update_mitigation(void);
93 static void __init bhi_apply_mitigation(void);
94 static void __init its_select_mitigation(void);
95 static void __init its_update_mitigation(void);
96 static void __init its_apply_mitigation(void);
97 
98 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
99 u64 x86_spec_ctrl_base;
100 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
101 
102 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
103 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
104 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
105 
106 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
107 
108 static u64 __ro_after_init x86_arch_cap_msr;
109 
110 static DEFINE_MUTEX(spec_ctrl_mutex);
111 
112 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
113 
set_return_thunk(void * thunk)114 static void __init set_return_thunk(void *thunk)
115 {
116 	if (x86_return_thunk != __x86_return_thunk)
117 		pr_warn("x86/bugs: return thunk changed\n");
118 
119 	x86_return_thunk = thunk;
120 }
121 
122 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
update_spec_ctrl(u64 val)123 static void update_spec_ctrl(u64 val)
124 {
125 	this_cpu_write(x86_spec_ctrl_current, val);
126 	wrmsrq(MSR_IA32_SPEC_CTRL, val);
127 }
128 
129 /*
130  * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
131  * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
132  */
update_spec_ctrl_cond(u64 val)133 void update_spec_ctrl_cond(u64 val)
134 {
135 	if (this_cpu_read(x86_spec_ctrl_current) == val)
136 		return;
137 
138 	this_cpu_write(x86_spec_ctrl_current, val);
139 
140 	/*
141 	 * When KERNEL_IBRS this MSR is written on return-to-user, unless
142 	 * forced the update can be delayed until that time.
143 	 */
144 	if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
145 		wrmsrq(MSR_IA32_SPEC_CTRL, val);
146 }
147 
spec_ctrl_current(void)148 noinstr u64 spec_ctrl_current(void)
149 {
150 	return this_cpu_read(x86_spec_ctrl_current);
151 }
152 EXPORT_SYMBOL_GPL(spec_ctrl_current);
153 
154 /*
155  * AMD specific MSR info for Speculative Store Bypass control.
156  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
157  */
158 u64 __ro_after_init x86_amd_ls_cfg_base;
159 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
160 
161 /* Control conditional STIBP in switch_to() */
162 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
163 /* Control conditional IBPB in switch_mm() */
164 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
165 /* Control unconditional IBPB in switch_mm() */
166 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
167 
168 /* Control IBPB on vCPU load */
169 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
170 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
171 
172 /* Control MDS CPU buffer clear before idling (halt, mwait) */
173 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
174 EXPORT_SYMBOL_GPL(mds_idle_clear);
175 
176 /*
177  * Controls whether l1d flush based mitigations are enabled,
178  * based on hw features and admin setting via boot parameter
179  * defaults to false
180  */
181 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
182 
183 /*
184  * Controls CPU Fill buffer clear before VMenter. This is a subset of
185  * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
186  * mitigation is required.
187  */
188 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
189 EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
190 
cpu_select_mitigations(void)191 void __init cpu_select_mitigations(void)
192 {
193 	/*
194 	 * Read the SPEC_CTRL MSR to account for reserved bits which may
195 	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
196 	 * init code as it is not enumerated and depends on the family.
197 	 */
198 	if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
199 		rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
200 
201 		/*
202 		 * Previously running kernel (kexec), may have some controls
203 		 * turned ON. Clear them and let the mitigations setup below
204 		 * rediscover them based on configuration.
205 		 */
206 		x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
207 	}
208 
209 	x86_arch_cap_msr = x86_read_arch_cap_msr();
210 
211 	/* Select the proper CPU mitigations before patching alternatives: */
212 	spectre_v1_select_mitigation();
213 	spectre_v2_select_mitigation();
214 	retbleed_select_mitigation();
215 	spectre_v2_user_select_mitigation();
216 	ssb_select_mitigation();
217 	l1tf_select_mitigation();
218 	mds_select_mitigation();
219 	taa_select_mitigation();
220 	mmio_select_mitigation();
221 	rfds_select_mitigation();
222 	srbds_select_mitigation();
223 	l1d_flush_select_mitigation();
224 	srso_select_mitigation();
225 	gds_select_mitigation();
226 	its_select_mitigation();
227 	bhi_select_mitigation();
228 
229 	/*
230 	 * After mitigations are selected, some may need to update their
231 	 * choices.
232 	 */
233 	spectre_v2_update_mitigation();
234 	/*
235 	 * retbleed_update_mitigation() relies on the state set by
236 	 * spectre_v2_update_mitigation(); specifically it wants to know about
237 	 * spectre_v2=ibrs.
238 	 */
239 	retbleed_update_mitigation();
240 	/*
241 	 * its_update_mitigation() depends on spectre_v2_update_mitigation()
242 	 * and retbleed_update_mitigation().
243 	 */
244 	its_update_mitigation();
245 
246 	/*
247 	 * spectre_v2_user_update_mitigation() depends on
248 	 * retbleed_update_mitigation(), specifically the STIBP
249 	 * selection is forced for UNRET or IBPB.
250 	 */
251 	spectre_v2_user_update_mitigation();
252 	mds_update_mitigation();
253 	taa_update_mitigation();
254 	mmio_update_mitigation();
255 	rfds_update_mitigation();
256 	bhi_update_mitigation();
257 	/* srso_update_mitigation() depends on retbleed_update_mitigation(). */
258 	srso_update_mitigation();
259 
260 	spectre_v1_apply_mitigation();
261 	spectre_v2_apply_mitigation();
262 	retbleed_apply_mitigation();
263 	spectre_v2_user_apply_mitigation();
264 	ssb_apply_mitigation();
265 	l1tf_apply_mitigation();
266 	mds_apply_mitigation();
267 	taa_apply_mitigation();
268 	mmio_apply_mitigation();
269 	rfds_apply_mitigation();
270 	srbds_apply_mitigation();
271 	srso_apply_mitigation();
272 	gds_apply_mitigation();
273 	its_apply_mitigation();
274 	bhi_apply_mitigation();
275 }
276 
277 /*
278  * NOTE: This function is *only* called for SVM, since Intel uses
279  * MSR_IA32_SPEC_CTRL for SSBD.
280  */
281 void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl,bool setguest)282 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
283 {
284 	u64 guestval, hostval;
285 	struct thread_info *ti = current_thread_info();
286 
287 	/*
288 	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
289 	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
290 	 */
291 	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
292 	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
293 		return;
294 
295 	/*
296 	 * If the host has SSBD mitigation enabled, force it in the host's
297 	 * virtual MSR value. If its not permanently enabled, evaluate
298 	 * current's TIF_SSBD thread flag.
299 	 */
300 	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
301 		hostval = SPEC_CTRL_SSBD;
302 	else
303 		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
304 
305 	/* Sanitize the guest value */
306 	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
307 
308 	if (hostval != guestval) {
309 		unsigned long tif;
310 
311 		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
312 				 ssbd_spec_ctrl_to_tif(hostval);
313 
314 		speculation_ctrl_update(tif);
315 	}
316 }
317 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
318 
x86_amd_ssb_disable(void)319 static void x86_amd_ssb_disable(void)
320 {
321 	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
322 
323 	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
324 		wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
325 	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
326 		wrmsrq(MSR_AMD64_LS_CFG, msrval);
327 }
328 
329 #undef pr_fmt
330 #define pr_fmt(fmt)	"MDS: " fmt
331 
332 /* Default mitigation for MDS-affected CPUs */
333 static enum mds_mitigations mds_mitigation __ro_after_init =
334 	IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
335 static bool mds_nosmt __ro_after_init = false;
336 
337 static const char * const mds_strings[] = {
338 	[MDS_MITIGATION_OFF]	= "Vulnerable",
339 	[MDS_MITIGATION_FULL]	= "Mitigation: Clear CPU buffers",
340 	[MDS_MITIGATION_VMWERV]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
341 };
342 
343 enum taa_mitigations {
344 	TAA_MITIGATION_OFF,
345 	TAA_MITIGATION_AUTO,
346 	TAA_MITIGATION_UCODE_NEEDED,
347 	TAA_MITIGATION_VERW,
348 	TAA_MITIGATION_TSX_DISABLED,
349 };
350 
351 /* Default mitigation for TAA-affected CPUs */
352 static enum taa_mitigations taa_mitigation __ro_after_init =
353 	IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
354 
355 enum mmio_mitigations {
356 	MMIO_MITIGATION_OFF,
357 	MMIO_MITIGATION_AUTO,
358 	MMIO_MITIGATION_UCODE_NEEDED,
359 	MMIO_MITIGATION_VERW,
360 };
361 
362 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
363 static enum mmio_mitigations mmio_mitigation __ro_after_init =
364 	IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ?	MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
365 
366 enum rfds_mitigations {
367 	RFDS_MITIGATION_OFF,
368 	RFDS_MITIGATION_AUTO,
369 	RFDS_MITIGATION_VERW,
370 	RFDS_MITIGATION_UCODE_NEEDED,
371 };
372 
373 /* Default mitigation for Register File Data Sampling */
374 static enum rfds_mitigations rfds_mitigation __ro_after_init =
375 	IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
376 
377 /*
378  * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
379  * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
380  */
381 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
382 
mds_select_mitigation(void)383 static void __init mds_select_mitigation(void)
384 {
385 	if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
386 		mds_mitigation = MDS_MITIGATION_OFF;
387 		return;
388 	}
389 
390 	if (mds_mitigation == MDS_MITIGATION_AUTO)
391 		mds_mitigation = MDS_MITIGATION_FULL;
392 
393 	if (mds_mitigation == MDS_MITIGATION_OFF)
394 		return;
395 
396 	verw_clear_cpu_buf_mitigation_selected = true;
397 }
398 
mds_update_mitigation(void)399 static void __init mds_update_mitigation(void)
400 {
401 	if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
402 		return;
403 
404 	/* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
405 	if (verw_clear_cpu_buf_mitigation_selected)
406 		mds_mitigation = MDS_MITIGATION_FULL;
407 
408 	if (mds_mitigation == MDS_MITIGATION_FULL) {
409 		if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
410 			mds_mitigation = MDS_MITIGATION_VMWERV;
411 	}
412 
413 	pr_info("%s\n", mds_strings[mds_mitigation]);
414 }
415 
mds_apply_mitigation(void)416 static void __init mds_apply_mitigation(void)
417 {
418 	if (mds_mitigation == MDS_MITIGATION_FULL ||
419 	    mds_mitigation == MDS_MITIGATION_VMWERV) {
420 		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
421 		if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
422 		    (mds_nosmt || cpu_mitigations_auto_nosmt()))
423 			cpu_smt_disable(false);
424 	}
425 }
426 
mds_cmdline(char * str)427 static int __init mds_cmdline(char *str)
428 {
429 	if (!boot_cpu_has_bug(X86_BUG_MDS))
430 		return 0;
431 
432 	if (!str)
433 		return -EINVAL;
434 
435 	if (!strcmp(str, "off"))
436 		mds_mitigation = MDS_MITIGATION_OFF;
437 	else if (!strcmp(str, "full"))
438 		mds_mitigation = MDS_MITIGATION_FULL;
439 	else if (!strcmp(str, "full,nosmt")) {
440 		mds_mitigation = MDS_MITIGATION_FULL;
441 		mds_nosmt = true;
442 	}
443 
444 	return 0;
445 }
446 early_param("mds", mds_cmdline);
447 
448 #undef pr_fmt
449 #define pr_fmt(fmt)	"TAA: " fmt
450 
451 static bool taa_nosmt __ro_after_init;
452 
453 static const char * const taa_strings[] = {
454 	[TAA_MITIGATION_OFF]		= "Vulnerable",
455 	[TAA_MITIGATION_UCODE_NEEDED]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
456 	[TAA_MITIGATION_VERW]		= "Mitigation: Clear CPU buffers",
457 	[TAA_MITIGATION_TSX_DISABLED]	= "Mitigation: TSX disabled",
458 };
459 
taa_vulnerable(void)460 static bool __init taa_vulnerable(void)
461 {
462 	return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
463 }
464 
taa_select_mitigation(void)465 static void __init taa_select_mitigation(void)
466 {
467 	if (!boot_cpu_has_bug(X86_BUG_TAA)) {
468 		taa_mitigation = TAA_MITIGATION_OFF;
469 		return;
470 	}
471 
472 	/* TSX previously disabled by tsx=off */
473 	if (!boot_cpu_has(X86_FEATURE_RTM)) {
474 		taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
475 		return;
476 	}
477 
478 	if (cpu_mitigations_off())
479 		taa_mitigation = TAA_MITIGATION_OFF;
480 
481 	/* Microcode will be checked in taa_update_mitigation(). */
482 	if (taa_mitigation == TAA_MITIGATION_AUTO)
483 		taa_mitigation = TAA_MITIGATION_VERW;
484 
485 	if (taa_mitigation != TAA_MITIGATION_OFF)
486 		verw_clear_cpu_buf_mitigation_selected = true;
487 }
488 
taa_update_mitigation(void)489 static void __init taa_update_mitigation(void)
490 {
491 	if (!taa_vulnerable() || cpu_mitigations_off())
492 		return;
493 
494 	if (verw_clear_cpu_buf_mitigation_selected)
495 		taa_mitigation = TAA_MITIGATION_VERW;
496 
497 	if (taa_mitigation == TAA_MITIGATION_VERW) {
498 		/* Check if the requisite ucode is available. */
499 		if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
500 			taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
501 
502 		/*
503 		 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
504 		 * A microcode update fixes this behavior to clear CPU buffers. It also
505 		 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
506 		 * ARCH_CAP_TSX_CTRL_MSR bit.
507 		 *
508 		 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
509 		 * update is required.
510 		 */
511 		if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
512 		   !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
513 			taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
514 	}
515 
516 	pr_info("%s\n", taa_strings[taa_mitigation]);
517 }
518 
taa_apply_mitigation(void)519 static void __init taa_apply_mitigation(void)
520 {
521 	if (taa_mitigation == TAA_MITIGATION_VERW ||
522 	    taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
523 		/*
524 		 * TSX is enabled, select alternate mitigation for TAA which is
525 		 * the same as MDS. Enable MDS static branch to clear CPU buffers.
526 		 *
527 		 * For guests that can't determine whether the correct microcode is
528 		 * present on host, enable the mitigation for UCODE_NEEDED as well.
529 		 */
530 		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
531 
532 		if (taa_nosmt || cpu_mitigations_auto_nosmt())
533 			cpu_smt_disable(false);
534 	}
535 }
536 
tsx_async_abort_parse_cmdline(char * str)537 static int __init tsx_async_abort_parse_cmdline(char *str)
538 {
539 	if (!boot_cpu_has_bug(X86_BUG_TAA))
540 		return 0;
541 
542 	if (!str)
543 		return -EINVAL;
544 
545 	if (!strcmp(str, "off")) {
546 		taa_mitigation = TAA_MITIGATION_OFF;
547 	} else if (!strcmp(str, "full")) {
548 		taa_mitigation = TAA_MITIGATION_VERW;
549 	} else if (!strcmp(str, "full,nosmt")) {
550 		taa_mitigation = TAA_MITIGATION_VERW;
551 		taa_nosmt = true;
552 	}
553 
554 	return 0;
555 }
556 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
557 
558 #undef pr_fmt
559 #define pr_fmt(fmt)	"MMIO Stale Data: " fmt
560 
561 static bool mmio_nosmt __ro_after_init = false;
562 
563 static const char * const mmio_strings[] = {
564 	[MMIO_MITIGATION_OFF]		= "Vulnerable",
565 	[MMIO_MITIGATION_UCODE_NEEDED]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
566 	[MMIO_MITIGATION_VERW]		= "Mitigation: Clear CPU buffers",
567 };
568 
mmio_select_mitigation(void)569 static void __init mmio_select_mitigation(void)
570 {
571 	if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
572 	     cpu_mitigations_off()) {
573 		mmio_mitigation = MMIO_MITIGATION_OFF;
574 		return;
575 	}
576 
577 	/* Microcode will be checked in mmio_update_mitigation(). */
578 	if (mmio_mitigation == MMIO_MITIGATION_AUTO)
579 		mmio_mitigation = MMIO_MITIGATION_VERW;
580 
581 	if (mmio_mitigation == MMIO_MITIGATION_OFF)
582 		return;
583 
584 	/*
585 	 * Enable CPU buffer clear mitigation for host and VMM, if also affected
586 	 * by MDS or TAA.
587 	 */
588 	if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
589 		verw_clear_cpu_buf_mitigation_selected = true;
590 }
591 
mmio_update_mitigation(void)592 static void __init mmio_update_mitigation(void)
593 {
594 	if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || cpu_mitigations_off())
595 		return;
596 
597 	if (verw_clear_cpu_buf_mitigation_selected)
598 		mmio_mitigation = MMIO_MITIGATION_VERW;
599 
600 	if (mmio_mitigation == MMIO_MITIGATION_VERW) {
601 		/*
602 		 * Check if the system has the right microcode.
603 		 *
604 		 * CPU Fill buffer clear mitigation is enumerated by either an explicit
605 		 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
606 		 * affected systems.
607 		 */
608 		if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
609 		      (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
610 		       boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
611 		     !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
612 			mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
613 	}
614 
615 	pr_info("%s\n", mmio_strings[mmio_mitigation]);
616 }
617 
mmio_apply_mitigation(void)618 static void __init mmio_apply_mitigation(void)
619 {
620 	if (mmio_mitigation == MMIO_MITIGATION_OFF)
621 		return;
622 
623 	/*
624 	 * Only enable the VMM mitigation if the CPU buffer clear mitigation is
625 	 * not being used.
626 	 */
627 	if (verw_clear_cpu_buf_mitigation_selected) {
628 		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
629 		static_branch_disable(&cpu_buf_vm_clear);
630 	} else {
631 		static_branch_enable(&cpu_buf_vm_clear);
632 	}
633 
634 	/*
635 	 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
636 	 * be propagated to uncore buffers, clearing the Fill buffers on idle
637 	 * is required irrespective of SMT state.
638 	 */
639 	if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
640 		static_branch_enable(&mds_idle_clear);
641 
642 	if (mmio_nosmt || cpu_mitigations_auto_nosmt())
643 		cpu_smt_disable(false);
644 }
645 
mmio_stale_data_parse_cmdline(char * str)646 static int __init mmio_stale_data_parse_cmdline(char *str)
647 {
648 	if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
649 		return 0;
650 
651 	if (!str)
652 		return -EINVAL;
653 
654 	if (!strcmp(str, "off")) {
655 		mmio_mitigation = MMIO_MITIGATION_OFF;
656 	} else if (!strcmp(str, "full")) {
657 		mmio_mitigation = MMIO_MITIGATION_VERW;
658 	} else if (!strcmp(str, "full,nosmt")) {
659 		mmio_mitigation = MMIO_MITIGATION_VERW;
660 		mmio_nosmt = true;
661 	}
662 
663 	return 0;
664 }
665 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
666 
667 #undef pr_fmt
668 #define pr_fmt(fmt)	"Register File Data Sampling: " fmt
669 
670 static const char * const rfds_strings[] = {
671 	[RFDS_MITIGATION_OFF]			= "Vulnerable",
672 	[RFDS_MITIGATION_VERW]			= "Mitigation: Clear Register File",
673 	[RFDS_MITIGATION_UCODE_NEEDED]		= "Vulnerable: No microcode",
674 };
675 
verw_clears_cpu_reg_file(void)676 static inline bool __init verw_clears_cpu_reg_file(void)
677 {
678 	return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
679 }
680 
rfds_select_mitigation(void)681 static void __init rfds_select_mitigation(void)
682 {
683 	if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
684 		rfds_mitigation = RFDS_MITIGATION_OFF;
685 		return;
686 	}
687 
688 	if (rfds_mitigation == RFDS_MITIGATION_AUTO)
689 		rfds_mitigation = RFDS_MITIGATION_VERW;
690 
691 	if (rfds_mitigation == RFDS_MITIGATION_OFF)
692 		return;
693 
694 	if (verw_clears_cpu_reg_file())
695 		verw_clear_cpu_buf_mitigation_selected = true;
696 }
697 
rfds_update_mitigation(void)698 static void __init rfds_update_mitigation(void)
699 {
700 	if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off())
701 		return;
702 
703 	if (verw_clear_cpu_buf_mitigation_selected)
704 		rfds_mitigation = RFDS_MITIGATION_VERW;
705 
706 	if (rfds_mitigation == RFDS_MITIGATION_VERW) {
707 		if (!verw_clears_cpu_reg_file())
708 			rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
709 	}
710 
711 	pr_info("%s\n", rfds_strings[rfds_mitigation]);
712 }
713 
rfds_apply_mitigation(void)714 static void __init rfds_apply_mitigation(void)
715 {
716 	if (rfds_mitigation == RFDS_MITIGATION_VERW)
717 		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
718 }
719 
rfds_parse_cmdline(char * str)720 static __init int rfds_parse_cmdline(char *str)
721 {
722 	if (!str)
723 		return -EINVAL;
724 
725 	if (!boot_cpu_has_bug(X86_BUG_RFDS))
726 		return 0;
727 
728 	if (!strcmp(str, "off"))
729 		rfds_mitigation = RFDS_MITIGATION_OFF;
730 	else if (!strcmp(str, "on"))
731 		rfds_mitigation = RFDS_MITIGATION_VERW;
732 
733 	return 0;
734 }
735 early_param("reg_file_data_sampling", rfds_parse_cmdline);
736 
737 #undef pr_fmt
738 #define pr_fmt(fmt)	"SRBDS: " fmt
739 
740 enum srbds_mitigations {
741 	SRBDS_MITIGATION_OFF,
742 	SRBDS_MITIGATION_AUTO,
743 	SRBDS_MITIGATION_UCODE_NEEDED,
744 	SRBDS_MITIGATION_FULL,
745 	SRBDS_MITIGATION_TSX_OFF,
746 	SRBDS_MITIGATION_HYPERVISOR,
747 };
748 
749 static enum srbds_mitigations srbds_mitigation __ro_after_init =
750 	IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
751 
752 static const char * const srbds_strings[] = {
753 	[SRBDS_MITIGATION_OFF]		= "Vulnerable",
754 	[SRBDS_MITIGATION_UCODE_NEEDED]	= "Vulnerable: No microcode",
755 	[SRBDS_MITIGATION_FULL]		= "Mitigation: Microcode",
756 	[SRBDS_MITIGATION_TSX_OFF]	= "Mitigation: TSX disabled",
757 	[SRBDS_MITIGATION_HYPERVISOR]	= "Unknown: Dependent on hypervisor status",
758 };
759 
760 static bool srbds_off;
761 
update_srbds_msr(void)762 void update_srbds_msr(void)
763 {
764 	u64 mcu_ctrl;
765 
766 	if (!boot_cpu_has_bug(X86_BUG_SRBDS))
767 		return;
768 
769 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
770 		return;
771 
772 	if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
773 		return;
774 
775 	/*
776 	 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
777 	 * being disabled and it hasn't received the SRBDS MSR microcode.
778 	 */
779 	if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
780 		return;
781 
782 	rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
783 
784 	switch (srbds_mitigation) {
785 	case SRBDS_MITIGATION_OFF:
786 	case SRBDS_MITIGATION_TSX_OFF:
787 		mcu_ctrl |= RNGDS_MITG_DIS;
788 		break;
789 	case SRBDS_MITIGATION_FULL:
790 		mcu_ctrl &= ~RNGDS_MITG_DIS;
791 		break;
792 	default:
793 		break;
794 	}
795 
796 	wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
797 }
798 
srbds_select_mitigation(void)799 static void __init srbds_select_mitigation(void)
800 {
801 	if (!boot_cpu_has_bug(X86_BUG_SRBDS) || cpu_mitigations_off()) {
802 		srbds_mitigation = SRBDS_MITIGATION_OFF;
803 		return;
804 	}
805 
806 	if (srbds_mitigation == SRBDS_MITIGATION_AUTO)
807 		srbds_mitigation = SRBDS_MITIGATION_FULL;
808 
809 	/*
810 	 * Check to see if this is one of the MDS_NO systems supporting TSX that
811 	 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
812 	 * by Processor MMIO Stale Data vulnerability.
813 	 */
814 	if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
815 	    !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
816 		srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
817 	else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
818 		srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
819 	else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
820 		srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
821 	else if (srbds_off)
822 		srbds_mitigation = SRBDS_MITIGATION_OFF;
823 
824 	pr_info("%s\n", srbds_strings[srbds_mitigation]);
825 }
826 
srbds_apply_mitigation(void)827 static void __init srbds_apply_mitigation(void)
828 {
829 	update_srbds_msr();
830 }
831 
srbds_parse_cmdline(char * str)832 static int __init srbds_parse_cmdline(char *str)
833 {
834 	if (!str)
835 		return -EINVAL;
836 
837 	if (!boot_cpu_has_bug(X86_BUG_SRBDS))
838 		return 0;
839 
840 	srbds_off = !strcmp(str, "off");
841 	return 0;
842 }
843 early_param("srbds", srbds_parse_cmdline);
844 
845 #undef pr_fmt
846 #define pr_fmt(fmt)     "L1D Flush : " fmt
847 
848 enum l1d_flush_mitigations {
849 	L1D_FLUSH_OFF = 0,
850 	L1D_FLUSH_ON,
851 };
852 
853 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
854 
l1d_flush_select_mitigation(void)855 static void __init l1d_flush_select_mitigation(void)
856 {
857 	if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
858 		return;
859 
860 	static_branch_enable(&switch_mm_cond_l1d_flush);
861 	pr_info("Conditional flush on switch_mm() enabled\n");
862 }
863 
l1d_flush_parse_cmdline(char * str)864 static int __init l1d_flush_parse_cmdline(char *str)
865 {
866 	if (!strcmp(str, "on"))
867 		l1d_flush_mitigation = L1D_FLUSH_ON;
868 
869 	return 0;
870 }
871 early_param("l1d_flush", l1d_flush_parse_cmdline);
872 
873 #undef pr_fmt
874 #define pr_fmt(fmt)	"GDS: " fmt
875 
876 enum gds_mitigations {
877 	GDS_MITIGATION_OFF,
878 	GDS_MITIGATION_AUTO,
879 	GDS_MITIGATION_UCODE_NEEDED,
880 	GDS_MITIGATION_FORCE,
881 	GDS_MITIGATION_FULL,
882 	GDS_MITIGATION_FULL_LOCKED,
883 	GDS_MITIGATION_HYPERVISOR,
884 };
885 
886 static enum gds_mitigations gds_mitigation __ro_after_init =
887 	IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
888 
889 static const char * const gds_strings[] = {
890 	[GDS_MITIGATION_OFF]		= "Vulnerable",
891 	[GDS_MITIGATION_UCODE_NEEDED]	= "Vulnerable: No microcode",
892 	[GDS_MITIGATION_FORCE]		= "Mitigation: AVX disabled, no microcode",
893 	[GDS_MITIGATION_FULL]		= "Mitigation: Microcode",
894 	[GDS_MITIGATION_FULL_LOCKED]	= "Mitigation: Microcode (locked)",
895 	[GDS_MITIGATION_HYPERVISOR]	= "Unknown: Dependent on hypervisor status",
896 };
897 
gds_ucode_mitigated(void)898 bool gds_ucode_mitigated(void)
899 {
900 	return (gds_mitigation == GDS_MITIGATION_FULL ||
901 		gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
902 }
903 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
904 
update_gds_msr(void)905 void update_gds_msr(void)
906 {
907 	u64 mcu_ctrl_after;
908 	u64 mcu_ctrl;
909 
910 	switch (gds_mitigation) {
911 	case GDS_MITIGATION_OFF:
912 		rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
913 		mcu_ctrl |= GDS_MITG_DIS;
914 		break;
915 	case GDS_MITIGATION_FULL_LOCKED:
916 		/*
917 		 * The LOCKED state comes from the boot CPU. APs might not have
918 		 * the same state. Make sure the mitigation is enabled on all
919 		 * CPUs.
920 		 */
921 	case GDS_MITIGATION_FULL:
922 		rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
923 		mcu_ctrl &= ~GDS_MITG_DIS;
924 		break;
925 	case GDS_MITIGATION_FORCE:
926 	case GDS_MITIGATION_UCODE_NEEDED:
927 	case GDS_MITIGATION_HYPERVISOR:
928 	case GDS_MITIGATION_AUTO:
929 		return;
930 	}
931 
932 	wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
933 
934 	/*
935 	 * Check to make sure that the WRMSR value was not ignored. Writes to
936 	 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
937 	 * processor was not.
938 	 */
939 	rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
940 	WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
941 }
942 
gds_select_mitigation(void)943 static void __init gds_select_mitigation(void)
944 {
945 	u64 mcu_ctrl;
946 
947 	if (!boot_cpu_has_bug(X86_BUG_GDS))
948 		return;
949 
950 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
951 		gds_mitigation = GDS_MITIGATION_HYPERVISOR;
952 		return;
953 	}
954 
955 	if (cpu_mitigations_off())
956 		gds_mitigation = GDS_MITIGATION_OFF;
957 	/* Will verify below that mitigation _can_ be disabled */
958 
959 	if (gds_mitigation == GDS_MITIGATION_AUTO)
960 		gds_mitigation = GDS_MITIGATION_FULL;
961 
962 	/* No microcode */
963 	if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
964 		if (gds_mitigation != GDS_MITIGATION_FORCE)
965 			gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
966 		return;
967 	}
968 
969 	/* Microcode has mitigation, use it */
970 	if (gds_mitigation == GDS_MITIGATION_FORCE)
971 		gds_mitigation = GDS_MITIGATION_FULL;
972 
973 	rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
974 	if (mcu_ctrl & GDS_MITG_LOCKED) {
975 		if (gds_mitigation == GDS_MITIGATION_OFF)
976 			pr_warn("Mitigation locked. Disable failed.\n");
977 
978 		/*
979 		 * The mitigation is selected from the boot CPU. All other CPUs
980 		 * _should_ have the same state. If the boot CPU isn't locked
981 		 * but others are then update_gds_msr() will WARN() of the state
982 		 * mismatch. If the boot CPU is locked update_gds_msr() will
983 		 * ensure the other CPUs have the mitigation enabled.
984 		 */
985 		gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
986 	}
987 }
988 
gds_apply_mitigation(void)989 static void __init gds_apply_mitigation(void)
990 {
991 	if (!boot_cpu_has_bug(X86_BUG_GDS))
992 		return;
993 
994 	/* Microcode is present */
995 	if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
996 		update_gds_msr();
997 	else if (gds_mitigation == GDS_MITIGATION_FORCE) {
998 		/*
999 		 * This only needs to be done on the boot CPU so do it
1000 		 * here rather than in update_gds_msr()
1001 		 */
1002 		setup_clear_cpu_cap(X86_FEATURE_AVX);
1003 		pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
1004 	}
1005 
1006 	pr_info("%s\n", gds_strings[gds_mitigation]);
1007 }
1008 
gds_parse_cmdline(char * str)1009 static int __init gds_parse_cmdline(char *str)
1010 {
1011 	if (!str)
1012 		return -EINVAL;
1013 
1014 	if (!boot_cpu_has_bug(X86_BUG_GDS))
1015 		return 0;
1016 
1017 	if (!strcmp(str, "off"))
1018 		gds_mitigation = GDS_MITIGATION_OFF;
1019 	else if (!strcmp(str, "force"))
1020 		gds_mitigation = GDS_MITIGATION_FORCE;
1021 
1022 	return 0;
1023 }
1024 early_param("gather_data_sampling", gds_parse_cmdline);
1025 
1026 #undef pr_fmt
1027 #define pr_fmt(fmt)     "Spectre V1 : " fmt
1028 
1029 enum spectre_v1_mitigation {
1030 	SPECTRE_V1_MITIGATION_NONE,
1031 	SPECTRE_V1_MITIGATION_AUTO,
1032 };
1033 
1034 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
1035 	IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
1036 		SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
1037 
1038 static const char * const spectre_v1_strings[] = {
1039 	[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
1040 	[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
1041 };
1042 
1043 /*
1044  * Does SMAP provide full mitigation against speculative kernel access to
1045  * userspace?
1046  */
smap_works_speculatively(void)1047 static bool smap_works_speculatively(void)
1048 {
1049 	if (!boot_cpu_has(X86_FEATURE_SMAP))
1050 		return false;
1051 
1052 	/*
1053 	 * On CPUs which are vulnerable to Meltdown, SMAP does not
1054 	 * prevent speculative access to user data in the L1 cache.
1055 	 * Consider SMAP to be non-functional as a mitigation on these
1056 	 * CPUs.
1057 	 */
1058 	if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
1059 		return false;
1060 
1061 	return true;
1062 }
1063 
spectre_v1_select_mitigation(void)1064 static void __init spectre_v1_select_mitigation(void)
1065 {
1066 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off())
1067 		spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1068 }
1069 
spectre_v1_apply_mitigation(void)1070 static void __init spectre_v1_apply_mitigation(void)
1071 {
1072 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off())
1073 		return;
1074 
1075 	if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
1076 		/*
1077 		 * With Spectre v1, a user can speculatively control either
1078 		 * path of a conditional swapgs with a user-controlled GS
1079 		 * value.  The mitigation is to add lfences to both code paths.
1080 		 *
1081 		 * If FSGSBASE is enabled, the user can put a kernel address in
1082 		 * GS, in which case SMAP provides no protection.
1083 		 *
1084 		 * If FSGSBASE is disabled, the user can only put a user space
1085 		 * address in GS.  That makes an attack harder, but still
1086 		 * possible if there's no SMAP protection.
1087 		 */
1088 		if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
1089 		    !smap_works_speculatively()) {
1090 			/*
1091 			 * Mitigation can be provided from SWAPGS itself or
1092 			 * PTI as the CR3 write in the Meltdown mitigation
1093 			 * is serializing.
1094 			 *
1095 			 * If neither is there, mitigate with an LFENCE to
1096 			 * stop speculation through swapgs.
1097 			 */
1098 			if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
1099 			    !boot_cpu_has(X86_FEATURE_PTI))
1100 				setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
1101 
1102 			/*
1103 			 * Enable lfences in the kernel entry (non-swapgs)
1104 			 * paths, to prevent user entry from speculatively
1105 			 * skipping swapgs.
1106 			 */
1107 			setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
1108 		}
1109 	}
1110 
1111 	pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1112 }
1113 
nospectre_v1_cmdline(char * str)1114 static int __init nospectre_v1_cmdline(char *str)
1115 {
1116 	spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1117 	return 0;
1118 }
1119 early_param("nospectre_v1", nospectre_v1_cmdline);
1120 
1121 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
1122 
1123 #undef pr_fmt
1124 #define pr_fmt(fmt)     "RETBleed: " fmt
1125 
1126 enum its_mitigation {
1127 	ITS_MITIGATION_OFF,
1128 	ITS_MITIGATION_AUTO,
1129 	ITS_MITIGATION_VMEXIT_ONLY,
1130 	ITS_MITIGATION_ALIGNED_THUNKS,
1131 	ITS_MITIGATION_RETPOLINE_STUFF,
1132 };
1133 
1134 static enum its_mitigation its_mitigation __ro_after_init =
1135 	IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
1136 
1137 enum retbleed_mitigation {
1138 	RETBLEED_MITIGATION_NONE,
1139 	RETBLEED_MITIGATION_AUTO,
1140 	RETBLEED_MITIGATION_UNRET,
1141 	RETBLEED_MITIGATION_IBPB,
1142 	RETBLEED_MITIGATION_IBRS,
1143 	RETBLEED_MITIGATION_EIBRS,
1144 	RETBLEED_MITIGATION_STUFF,
1145 };
1146 
1147 static const char * const retbleed_strings[] = {
1148 	[RETBLEED_MITIGATION_NONE]	= "Vulnerable",
1149 	[RETBLEED_MITIGATION_UNRET]	= "Mitigation: untrained return thunk",
1150 	[RETBLEED_MITIGATION_IBPB]	= "Mitigation: IBPB",
1151 	[RETBLEED_MITIGATION_IBRS]	= "Mitigation: IBRS",
1152 	[RETBLEED_MITIGATION_EIBRS]	= "Mitigation: Enhanced IBRS",
1153 	[RETBLEED_MITIGATION_STUFF]	= "Mitigation: Stuffing",
1154 };
1155 
1156 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
1157 	IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
1158 
1159 static int __ro_after_init retbleed_nosmt = false;
1160 
retbleed_parse_cmdline(char * str)1161 static int __init retbleed_parse_cmdline(char *str)
1162 {
1163 	if (!str)
1164 		return -EINVAL;
1165 
1166 	while (str) {
1167 		char *next = strchr(str, ',');
1168 		if (next) {
1169 			*next = 0;
1170 			next++;
1171 		}
1172 
1173 		if (!strcmp(str, "off")) {
1174 			retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1175 		} else if (!strcmp(str, "auto")) {
1176 			retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1177 		} else if (!strcmp(str, "unret")) {
1178 			retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1179 		} else if (!strcmp(str, "ibpb")) {
1180 			retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1181 		} else if (!strcmp(str, "stuff")) {
1182 			retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1183 		} else if (!strcmp(str, "nosmt")) {
1184 			retbleed_nosmt = true;
1185 		} else if (!strcmp(str, "force")) {
1186 			setup_force_cpu_bug(X86_BUG_RETBLEED);
1187 		} else {
1188 			pr_err("Ignoring unknown retbleed option (%s).", str);
1189 		}
1190 
1191 		str = next;
1192 	}
1193 
1194 	return 0;
1195 }
1196 early_param("retbleed", retbleed_parse_cmdline);
1197 
1198 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1199 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1200 
retbleed_select_mitigation(void)1201 static void __init retbleed_select_mitigation(void)
1202 {
1203 	if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) {
1204 		retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1205 		return;
1206 	}
1207 
1208 	switch (retbleed_mitigation) {
1209 	case RETBLEED_MITIGATION_UNRET:
1210 		if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1211 			retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1212 			pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1213 		}
1214 		break;
1215 	case RETBLEED_MITIGATION_IBPB:
1216 		if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1217 			pr_err("WARNING: CPU does not support IBPB.\n");
1218 			retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1219 		} else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1220 			pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1221 			retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1222 		}
1223 		break;
1224 	case RETBLEED_MITIGATION_STUFF:
1225 		if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1226 			pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1227 			retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1228 		} else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1229 			pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
1230 			retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1231 		}
1232 		break;
1233 	default:
1234 		break;
1235 	}
1236 
1237 	if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
1238 		return;
1239 
1240 	/* Intel mitigation selected in retbleed_update_mitigation() */
1241 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1242 	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1243 		if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1244 			retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1245 		else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1246 			 boot_cpu_has(X86_FEATURE_IBPB))
1247 			retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1248 		else
1249 			retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1250 	}
1251 }
1252 
retbleed_update_mitigation(void)1253 static void __init retbleed_update_mitigation(void)
1254 {
1255 	if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
1256 		return;
1257 
1258 	if (retbleed_mitigation == RETBLEED_MITIGATION_NONE)
1259 		goto out;
1260 
1261 	/*
1262 	 * retbleed=stuff is only allowed on Intel.  If stuffing can't be used
1263 	 * then a different mitigation will be selected below.
1264 	 *
1265 	 * its=stuff will also attempt to enable stuffing.
1266 	 */
1267 	if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF ||
1268 	    its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) {
1269 		if (spectre_v2_enabled != SPECTRE_V2_RETPOLINE) {
1270 			pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
1271 			retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1272 		} else {
1273 			if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1274 				pr_info("Retbleed mitigation updated to stuffing\n");
1275 
1276 			retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1277 		}
1278 	}
1279 	/*
1280 	 * Let IBRS trump all on Intel without affecting the effects of the
1281 	 * retbleed= cmdline option except for call depth based stuffing
1282 	 */
1283 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1284 		switch (spectre_v2_enabled) {
1285 		case SPECTRE_V2_IBRS:
1286 			retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1287 			break;
1288 		case SPECTRE_V2_EIBRS:
1289 		case SPECTRE_V2_EIBRS_RETPOLINE:
1290 		case SPECTRE_V2_EIBRS_LFENCE:
1291 			retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1292 			break;
1293 		default:
1294 			if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1295 				pr_err(RETBLEED_INTEL_MSG);
1296 		}
1297 		/* If nothing has set the mitigation yet, default to NONE. */
1298 		if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO)
1299 			retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1300 	}
1301 out:
1302 	pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1303 }
1304 
1305 
retbleed_apply_mitigation(void)1306 static void __init retbleed_apply_mitigation(void)
1307 {
1308 	bool mitigate_smt = false;
1309 
1310 	switch (retbleed_mitigation) {
1311 	case RETBLEED_MITIGATION_NONE:
1312 		return;
1313 
1314 	case RETBLEED_MITIGATION_UNRET:
1315 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1316 		setup_force_cpu_cap(X86_FEATURE_UNRET);
1317 
1318 		set_return_thunk(retbleed_return_thunk);
1319 
1320 		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1321 		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1322 			pr_err(RETBLEED_UNTRAIN_MSG);
1323 
1324 		mitigate_smt = true;
1325 		break;
1326 
1327 	case RETBLEED_MITIGATION_IBPB:
1328 		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1329 		setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1330 		mitigate_smt = true;
1331 
1332 		/*
1333 		 * IBPB on entry already obviates the need for
1334 		 * software-based untraining so clear those in case some
1335 		 * other mitigation like SRSO has selected them.
1336 		 */
1337 		setup_clear_cpu_cap(X86_FEATURE_UNRET);
1338 		setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1339 
1340 		/*
1341 		 * There is no need for RSB filling: write_ibpb() ensures
1342 		 * all predictions, including the RSB, are invalidated,
1343 		 * regardless of IBPB implementation.
1344 		 */
1345 		setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1346 
1347 		break;
1348 
1349 	case RETBLEED_MITIGATION_STUFF:
1350 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1351 		setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1352 
1353 		set_return_thunk(call_depth_return_thunk);
1354 		break;
1355 
1356 	default:
1357 		break;
1358 	}
1359 
1360 	if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1361 	    (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
1362 		cpu_smt_disable(false);
1363 }
1364 
1365 #undef pr_fmt
1366 #define pr_fmt(fmt)     "ITS: " fmt
1367 
1368 static const char * const its_strings[] = {
1369 	[ITS_MITIGATION_OFF]			= "Vulnerable",
1370 	[ITS_MITIGATION_VMEXIT_ONLY]		= "Mitigation: Vulnerable, KVM: Not affected",
1371 	[ITS_MITIGATION_ALIGNED_THUNKS]		= "Mitigation: Aligned branch/return thunks",
1372 	[ITS_MITIGATION_RETPOLINE_STUFF]	= "Mitigation: Retpolines, Stuffing RSB",
1373 };
1374 
its_parse_cmdline(char * str)1375 static int __init its_parse_cmdline(char *str)
1376 {
1377 	if (!str)
1378 		return -EINVAL;
1379 
1380 	if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
1381 		pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
1382 		return 0;
1383 	}
1384 
1385 	if (!strcmp(str, "off")) {
1386 		its_mitigation = ITS_MITIGATION_OFF;
1387 	} else if (!strcmp(str, "on")) {
1388 		its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1389 	} else if (!strcmp(str, "force")) {
1390 		its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1391 		setup_force_cpu_bug(X86_BUG_ITS);
1392 	} else if (!strcmp(str, "vmexit")) {
1393 		its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
1394 	} else if (!strcmp(str, "stuff")) {
1395 		its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1396 	} else {
1397 		pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
1398 	}
1399 
1400 	return 0;
1401 }
1402 early_param("indirect_target_selection", its_parse_cmdline);
1403 
its_select_mitigation(void)1404 static void __init its_select_mitigation(void)
1405 {
1406 	if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
1407 		its_mitigation = ITS_MITIGATION_OFF;
1408 		return;
1409 	}
1410 
1411 	if (its_mitigation == ITS_MITIGATION_AUTO)
1412 		its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1413 
1414 	if (its_mitigation == ITS_MITIGATION_OFF)
1415 		return;
1416 
1417 	if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
1418 	    !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
1419 		pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
1420 		its_mitigation = ITS_MITIGATION_OFF;
1421 		return;
1422 	}
1423 
1424 	if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
1425 		pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
1426 		its_mitigation = ITS_MITIGATION_OFF;
1427 		return;
1428 	}
1429 
1430 	if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1431 	    !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1432 		pr_err("RSB stuff mitigation not supported, using default\n");
1433 		its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1434 	}
1435 
1436 	if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
1437 	    !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
1438 		its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1439 }
1440 
its_update_mitigation(void)1441 static void __init its_update_mitigation(void)
1442 {
1443 	if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off())
1444 		return;
1445 
1446 	switch (spectre_v2_enabled) {
1447 	case SPECTRE_V2_NONE:
1448 		pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
1449 		its_mitigation = ITS_MITIGATION_OFF;
1450 		break;
1451 	case SPECTRE_V2_RETPOLINE:
1452 		/* Retpoline+CDT mitigates ITS */
1453 		if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
1454 			its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1455 		break;
1456 	case SPECTRE_V2_LFENCE:
1457 	case SPECTRE_V2_EIBRS_LFENCE:
1458 		pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
1459 		its_mitigation = ITS_MITIGATION_OFF;
1460 		break;
1461 	default:
1462 		break;
1463 	}
1464 
1465 	/*
1466 	 * retbleed_update_mitigation() will try to do stuffing if its=stuff.
1467 	 * If it can't, such as if spectre_v2!=retpoline, then fall back to
1468 	 * aligned thunks.
1469 	 */
1470 	if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1471 	    retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1472 		its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1473 
1474 	pr_info("%s\n", its_strings[its_mitigation]);
1475 }
1476 
its_apply_mitigation(void)1477 static void __init its_apply_mitigation(void)
1478 {
1479 	/* its=stuff forces retbleed stuffing and is enabled there. */
1480 	if (its_mitigation != ITS_MITIGATION_ALIGNED_THUNKS)
1481 		return;
1482 
1483 	if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
1484 		setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
1485 
1486 	setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1487 	set_return_thunk(its_return_thunk);
1488 }
1489 
1490 #undef pr_fmt
1491 #define pr_fmt(fmt)     "Spectre V2 : " fmt
1492 
1493 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1494 	SPECTRE_V2_USER_NONE;
1495 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1496 	SPECTRE_V2_USER_NONE;
1497 
1498 #ifdef CONFIG_MITIGATION_RETPOLINE
1499 static bool spectre_v2_bad_module;
1500 
retpoline_module_ok(bool has_retpoline)1501 bool retpoline_module_ok(bool has_retpoline)
1502 {
1503 	if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1504 		return true;
1505 
1506 	pr_err("System may be vulnerable to spectre v2\n");
1507 	spectre_v2_bad_module = true;
1508 	return false;
1509 }
1510 
spectre_v2_module_string(void)1511 static inline const char *spectre_v2_module_string(void)
1512 {
1513 	return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1514 }
1515 #else
spectre_v2_module_string(void)1516 static inline const char *spectre_v2_module_string(void) { return ""; }
1517 #endif
1518 
1519 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1520 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1521 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1522 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1523 
1524 #ifdef CONFIG_BPF_SYSCALL
unpriv_ebpf_notify(int new_state)1525 void unpriv_ebpf_notify(int new_state)
1526 {
1527 	if (new_state)
1528 		return;
1529 
1530 	/* Unprivileged eBPF is enabled */
1531 
1532 	switch (spectre_v2_enabled) {
1533 	case SPECTRE_V2_EIBRS:
1534 		pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1535 		break;
1536 	case SPECTRE_V2_EIBRS_LFENCE:
1537 		if (sched_smt_active())
1538 			pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1539 		break;
1540 	default:
1541 		break;
1542 	}
1543 }
1544 #endif
1545 
match_option(const char * arg,int arglen,const char * opt)1546 static inline bool match_option(const char *arg, int arglen, const char *opt)
1547 {
1548 	int len = strlen(opt);
1549 
1550 	return len == arglen && !strncmp(arg, opt, len);
1551 }
1552 
1553 /* The kernel command line selection for spectre v2 */
1554 enum spectre_v2_mitigation_cmd {
1555 	SPECTRE_V2_CMD_NONE,
1556 	SPECTRE_V2_CMD_AUTO,
1557 	SPECTRE_V2_CMD_FORCE,
1558 	SPECTRE_V2_CMD_RETPOLINE,
1559 	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1560 	SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1561 	SPECTRE_V2_CMD_EIBRS,
1562 	SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1563 	SPECTRE_V2_CMD_EIBRS_LFENCE,
1564 	SPECTRE_V2_CMD_IBRS,
1565 };
1566 
1567 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO;
1568 
1569 enum spectre_v2_user_cmd {
1570 	SPECTRE_V2_USER_CMD_NONE,
1571 	SPECTRE_V2_USER_CMD_AUTO,
1572 	SPECTRE_V2_USER_CMD_FORCE,
1573 	SPECTRE_V2_USER_CMD_PRCTL,
1574 	SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1575 	SPECTRE_V2_USER_CMD_SECCOMP,
1576 	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1577 };
1578 
1579 static const char * const spectre_v2_user_strings[] = {
1580 	[SPECTRE_V2_USER_NONE]			= "User space: Vulnerable",
1581 	[SPECTRE_V2_USER_STRICT]		= "User space: Mitigation: STIBP protection",
1582 	[SPECTRE_V2_USER_STRICT_PREFERRED]	= "User space: Mitigation: STIBP always-on protection",
1583 	[SPECTRE_V2_USER_PRCTL]			= "User space: Mitigation: STIBP via prctl",
1584 	[SPECTRE_V2_USER_SECCOMP]		= "User space: Mitigation: STIBP via seccomp and prctl",
1585 };
1586 
1587 static const struct {
1588 	const char			*option;
1589 	enum spectre_v2_user_cmd	cmd;
1590 	bool				secure;
1591 } v2_user_options[] __initconst = {
1592 	{ "auto",		SPECTRE_V2_USER_CMD_AUTO,		false },
1593 	{ "off",		SPECTRE_V2_USER_CMD_NONE,		false },
1594 	{ "on",			SPECTRE_V2_USER_CMD_FORCE,		true  },
1595 	{ "prctl",		SPECTRE_V2_USER_CMD_PRCTL,		false },
1596 	{ "prctl,ibpb",		SPECTRE_V2_USER_CMD_PRCTL_IBPB,		false },
1597 	{ "seccomp",		SPECTRE_V2_USER_CMD_SECCOMP,		false },
1598 	{ "seccomp,ibpb",	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,	false },
1599 };
1600 
spec_v2_user_print_cond(const char * reason,bool secure)1601 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1602 {
1603 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1604 		pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1605 }
1606 
spectre_v2_parse_user_cmdline(void)1607 static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void)
1608 {
1609 	char arg[20];
1610 	int ret, i;
1611 
1612 	if (cpu_mitigations_off() || !IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2))
1613 		return SPECTRE_V2_USER_CMD_NONE;
1614 
1615 	ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1616 				  arg, sizeof(arg));
1617 	if (ret < 0)
1618 		return SPECTRE_V2_USER_CMD_AUTO;
1619 
1620 	for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1621 		if (match_option(arg, ret, v2_user_options[i].option)) {
1622 			spec_v2_user_print_cond(v2_user_options[i].option,
1623 						v2_user_options[i].secure);
1624 			return v2_user_options[i].cmd;
1625 		}
1626 	}
1627 
1628 	pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
1629 	return SPECTRE_V2_USER_CMD_AUTO;
1630 }
1631 
spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)1632 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1633 {
1634 	return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1635 }
1636 
spectre_v2_user_select_mitigation(void)1637 static void __init spectre_v2_user_select_mitigation(void)
1638 {
1639 	if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1640 		return;
1641 
1642 	switch (spectre_v2_parse_user_cmdline()) {
1643 	case SPECTRE_V2_USER_CMD_NONE:
1644 		return;
1645 	case SPECTRE_V2_USER_CMD_FORCE:
1646 		spectre_v2_user_ibpb  = SPECTRE_V2_USER_STRICT;
1647 		spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1648 		break;
1649 	case SPECTRE_V2_USER_CMD_AUTO:
1650 	case SPECTRE_V2_USER_CMD_PRCTL:
1651 		spectre_v2_user_ibpb  = SPECTRE_V2_USER_PRCTL;
1652 		spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1653 		break;
1654 	case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1655 		spectre_v2_user_ibpb  = SPECTRE_V2_USER_STRICT;
1656 		spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1657 		break;
1658 	case SPECTRE_V2_USER_CMD_SECCOMP:
1659 		if (IS_ENABLED(CONFIG_SECCOMP))
1660 			spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
1661 		else
1662 			spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1663 		spectre_v2_user_stibp = spectre_v2_user_ibpb;
1664 		break;
1665 	case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1666 		spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1667 		if (IS_ENABLED(CONFIG_SECCOMP))
1668 			spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
1669 		else
1670 			spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1671 		break;
1672 	}
1673 
1674 	/*
1675 	 * At this point, an STIBP mode other than "off" has been set.
1676 	 * If STIBP support is not being forced, check if STIBP always-on
1677 	 * is preferred.
1678 	 */
1679 	if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1680 	     spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
1681 	    boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1682 		spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1683 
1684 	if (!boot_cpu_has(X86_FEATURE_IBPB))
1685 		spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1686 
1687 	if (!boot_cpu_has(X86_FEATURE_STIBP))
1688 		spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1689 }
1690 
spectre_v2_user_update_mitigation(void)1691 static void __init spectre_v2_user_update_mitigation(void)
1692 {
1693 	if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1694 		return;
1695 
1696 	/* The spectre_v2 cmd line can override spectre_v2_user options */
1697 	if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
1698 		spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1699 		spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1700 	} else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
1701 		spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1702 		spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1703 	}
1704 
1705 	/*
1706 	 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1707 	 * is not required.
1708 	 *
1709 	 * Intel's Enhanced IBRS also protects against cross-thread branch target
1710 	 * injection in user-mode as the IBRS bit remains always set which
1711 	 * implicitly enables cross-thread protections.  However, in legacy IBRS
1712 	 * mode, the IBRS bit is set only on kernel entry and cleared on return
1713 	 * to userspace.  AMD Automatic IBRS also does not protect userspace.
1714 	 * These modes therefore disable the implicit cross-thread protection,
1715 	 * so allow for STIBP to be selected in those cases.
1716 	 */
1717 	if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1718 	    !cpu_smt_possible() ||
1719 	    (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1720 	     !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
1721 		spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1722 		return;
1723 	}
1724 
1725 	if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
1726 	    (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1727 	     retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
1728 		if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
1729 		    spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
1730 			pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1731 		spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1732 	}
1733 	pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
1734 }
1735 
spectre_v2_user_apply_mitigation(void)1736 static void __init spectre_v2_user_apply_mitigation(void)
1737 {
1738 	/* Initialize Indirect Branch Prediction Barrier */
1739 	if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
1740 		static_branch_enable(&switch_vcpu_ibpb);
1741 
1742 		switch (spectre_v2_user_ibpb) {
1743 		case SPECTRE_V2_USER_STRICT:
1744 			static_branch_enable(&switch_mm_always_ibpb);
1745 			break;
1746 		case SPECTRE_V2_USER_PRCTL:
1747 		case SPECTRE_V2_USER_SECCOMP:
1748 			static_branch_enable(&switch_mm_cond_ibpb);
1749 			break;
1750 		default:
1751 			break;
1752 		}
1753 
1754 		pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1755 			static_key_enabled(&switch_mm_always_ibpb) ?
1756 			"always-on" : "conditional");
1757 	}
1758 }
1759 
1760 static const char * const spectre_v2_strings[] = {
1761 	[SPECTRE_V2_NONE]			= "Vulnerable",
1762 	[SPECTRE_V2_RETPOLINE]			= "Mitigation: Retpolines",
1763 	[SPECTRE_V2_LFENCE]			= "Mitigation: LFENCE",
1764 	[SPECTRE_V2_EIBRS]			= "Mitigation: Enhanced / Automatic IBRS",
1765 	[SPECTRE_V2_EIBRS_LFENCE]		= "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1766 	[SPECTRE_V2_EIBRS_RETPOLINE]		= "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1767 	[SPECTRE_V2_IBRS]			= "Mitigation: IBRS",
1768 };
1769 
1770 static const struct {
1771 	const char *option;
1772 	enum spectre_v2_mitigation_cmd cmd;
1773 	bool secure;
1774 } mitigation_options[] __initconst = {
1775 	{ "off",		SPECTRE_V2_CMD_NONE,		  false },
1776 	{ "on",			SPECTRE_V2_CMD_FORCE,		  true  },
1777 	{ "retpoline",		SPECTRE_V2_CMD_RETPOLINE,	  false },
1778 	{ "retpoline,amd",	SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1779 	{ "retpoline,lfence",	SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1780 	{ "retpoline,generic",	SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1781 	{ "eibrs",		SPECTRE_V2_CMD_EIBRS,		  false },
1782 	{ "eibrs,lfence",	SPECTRE_V2_CMD_EIBRS_LFENCE,	  false },
1783 	{ "eibrs,retpoline",	SPECTRE_V2_CMD_EIBRS_RETPOLINE,	  false },
1784 	{ "auto",		SPECTRE_V2_CMD_AUTO,		  false },
1785 	{ "ibrs",		SPECTRE_V2_CMD_IBRS,              false },
1786 };
1787 
spec_v2_print_cond(const char * reason,bool secure)1788 static void __init spec_v2_print_cond(const char *reason, bool secure)
1789 {
1790 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1791 		pr_info("%s selected on command line.\n", reason);
1792 }
1793 
spectre_v2_parse_cmdline(void)1794 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1795 {
1796 	enum spectre_v2_mitigation_cmd cmd;
1797 	char arg[20];
1798 	int ret, i;
1799 
1800 	cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?  SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
1801 	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1802 	    cpu_mitigations_off())
1803 		return SPECTRE_V2_CMD_NONE;
1804 
1805 	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1806 	if (ret < 0)
1807 		return cmd;
1808 
1809 	for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1810 		if (!match_option(arg, ret, mitigation_options[i].option))
1811 			continue;
1812 		cmd = mitigation_options[i].cmd;
1813 		break;
1814 	}
1815 
1816 	if (i >= ARRAY_SIZE(mitigation_options)) {
1817 		pr_err("unknown option (%s). Switching to default mode\n", arg);
1818 		return cmd;
1819 	}
1820 
1821 	if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1822 	     cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1823 	     cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1824 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1825 	     cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1826 	    !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1827 		pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1828 		       mitigation_options[i].option);
1829 		return SPECTRE_V2_CMD_AUTO;
1830 	}
1831 
1832 	if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1833 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1834 	     cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1835 	    !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1836 		pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
1837 		       mitigation_options[i].option);
1838 		return SPECTRE_V2_CMD_AUTO;
1839 	}
1840 
1841 	if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1842 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1843 	    !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1844 		pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1845 		       mitigation_options[i].option);
1846 		return SPECTRE_V2_CMD_AUTO;
1847 	}
1848 
1849 	if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
1850 		pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1851 		       mitigation_options[i].option);
1852 		return SPECTRE_V2_CMD_AUTO;
1853 	}
1854 
1855 	if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1856 		pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1857 		       mitigation_options[i].option);
1858 		return SPECTRE_V2_CMD_AUTO;
1859 	}
1860 
1861 	if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
1862 		pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1863 		       mitigation_options[i].option);
1864 		return SPECTRE_V2_CMD_AUTO;
1865 	}
1866 
1867 	if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
1868 		pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1869 		       mitigation_options[i].option);
1870 		return SPECTRE_V2_CMD_AUTO;
1871 	}
1872 
1873 	spec_v2_print_cond(mitigation_options[i].option,
1874 			   mitigation_options[i].secure);
1875 	return cmd;
1876 }
1877 
spectre_v2_select_retpoline(void)1878 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1879 {
1880 	if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1881 		pr_err("Kernel not compiled with retpoline; no mitigation available!");
1882 		return SPECTRE_V2_NONE;
1883 	}
1884 
1885 	return SPECTRE_V2_RETPOLINE;
1886 }
1887 
1888 static bool __ro_after_init rrsba_disabled;
1889 
1890 /* Disable in-kernel use of non-RSB RET predictors */
spec_ctrl_disable_kernel_rrsba(void)1891 static void __init spec_ctrl_disable_kernel_rrsba(void)
1892 {
1893 	if (rrsba_disabled)
1894 		return;
1895 
1896 	if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
1897 		rrsba_disabled = true;
1898 		return;
1899 	}
1900 
1901 	if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1902 		return;
1903 
1904 	x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1905 	update_spec_ctrl(x86_spec_ctrl_base);
1906 	rrsba_disabled = true;
1907 }
1908 
spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)1909 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
1910 {
1911 	/*
1912 	 * WARNING! There are many subtleties to consider when changing *any*
1913 	 * code related to RSB-related mitigations.  Before doing so, carefully
1914 	 * read the following document, and update if necessary:
1915 	 *
1916 	 *   Documentation/admin-guide/hw-vuln/rsb.rst
1917 	 *
1918 	 * In an overly simplified nutshell:
1919 	 *
1920 	 *   - User->user RSB attacks are conditionally mitigated during
1921 	 *     context switches by cond_mitigation -> write_ibpb().
1922 	 *
1923 	 *   - User->kernel and guest->host attacks are mitigated by eIBRS or
1924 	 *     RSB filling.
1925 	 *
1926 	 *     Though, depending on config, note that other alternative
1927 	 *     mitigations may end up getting used instead, e.g., IBPB on
1928 	 *     entry/vmexit, call depth tracking, or return thunks.
1929 	 */
1930 
1931 	switch (mode) {
1932 	case SPECTRE_V2_NONE:
1933 		break;
1934 
1935 	case SPECTRE_V2_EIBRS:
1936 	case SPECTRE_V2_EIBRS_LFENCE:
1937 	case SPECTRE_V2_EIBRS_RETPOLINE:
1938 		if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
1939 			pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1940 			setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
1941 		}
1942 		break;
1943 
1944 	case SPECTRE_V2_RETPOLINE:
1945 	case SPECTRE_V2_LFENCE:
1946 	case SPECTRE_V2_IBRS:
1947 		pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
1948 		setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1949 		setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1950 		break;
1951 
1952 	default:
1953 		pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
1954 		dump_stack();
1955 		break;
1956 	}
1957 }
1958 
1959 /*
1960  * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
1961  * branch history in userspace. Not needed if BHI_NO is set.
1962  */
spec_ctrl_bhi_dis(void)1963 static bool __init spec_ctrl_bhi_dis(void)
1964 {
1965 	if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
1966 		return false;
1967 
1968 	x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
1969 	update_spec_ctrl(x86_spec_ctrl_base);
1970 	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
1971 
1972 	return true;
1973 }
1974 
1975 enum bhi_mitigations {
1976 	BHI_MITIGATION_OFF,
1977 	BHI_MITIGATION_AUTO,
1978 	BHI_MITIGATION_ON,
1979 	BHI_MITIGATION_VMEXIT_ONLY,
1980 };
1981 
1982 static enum bhi_mitigations bhi_mitigation __ro_after_init =
1983 	IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
1984 
spectre_bhi_parse_cmdline(char * str)1985 static int __init spectre_bhi_parse_cmdline(char *str)
1986 {
1987 	if (!str)
1988 		return -EINVAL;
1989 
1990 	if (!strcmp(str, "off"))
1991 		bhi_mitigation = BHI_MITIGATION_OFF;
1992 	else if (!strcmp(str, "on"))
1993 		bhi_mitigation = BHI_MITIGATION_ON;
1994 	else if (!strcmp(str, "vmexit"))
1995 		bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
1996 	else
1997 		pr_err("Ignoring unknown spectre_bhi option (%s)", str);
1998 
1999 	return 0;
2000 }
2001 early_param("spectre_bhi", spectre_bhi_parse_cmdline);
2002 
bhi_select_mitigation(void)2003 static void __init bhi_select_mitigation(void)
2004 {
2005 	if (!boot_cpu_has(X86_BUG_BHI) || cpu_mitigations_off())
2006 		bhi_mitigation = BHI_MITIGATION_OFF;
2007 
2008 	if (bhi_mitigation == BHI_MITIGATION_AUTO)
2009 		bhi_mitigation = BHI_MITIGATION_ON;
2010 }
2011 
bhi_update_mitigation(void)2012 static void __init bhi_update_mitigation(void)
2013 {
2014 	if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
2015 		bhi_mitigation = BHI_MITIGATION_OFF;
2016 
2017 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2018 	     spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)
2019 		bhi_mitigation = BHI_MITIGATION_OFF;
2020 }
2021 
bhi_apply_mitigation(void)2022 static void __init bhi_apply_mitigation(void)
2023 {
2024 	if (bhi_mitigation == BHI_MITIGATION_OFF)
2025 		return;
2026 
2027 	/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
2028 	if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2029 	    !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
2030 		spec_ctrl_disable_kernel_rrsba();
2031 		if (rrsba_disabled)
2032 			return;
2033 	}
2034 
2035 	if (!IS_ENABLED(CONFIG_X86_64))
2036 		return;
2037 
2038 	/* Mitigate in hardware if supported */
2039 	if (spec_ctrl_bhi_dis())
2040 		return;
2041 
2042 	if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
2043 		pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
2044 		setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2045 		return;
2046 	}
2047 
2048 	pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
2049 	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
2050 	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2051 }
2052 
spectre_v2_select_mitigation(void)2053 static void __init spectre_v2_select_mitigation(void)
2054 {
2055 	spectre_v2_cmd = spectre_v2_parse_cmdline();
2056 
2057 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2058 	    (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO))
2059 		return;
2060 
2061 	switch (spectre_v2_cmd) {
2062 	case SPECTRE_V2_CMD_NONE:
2063 		return;
2064 
2065 	case SPECTRE_V2_CMD_FORCE:
2066 	case SPECTRE_V2_CMD_AUTO:
2067 		if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2068 			spectre_v2_enabled = SPECTRE_V2_EIBRS;
2069 			break;
2070 		}
2071 
2072 		spectre_v2_enabled = spectre_v2_select_retpoline();
2073 		break;
2074 
2075 	case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
2076 		pr_err(SPECTRE_V2_LFENCE_MSG);
2077 		spectre_v2_enabled = SPECTRE_V2_LFENCE;
2078 		break;
2079 
2080 	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
2081 		spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
2082 		break;
2083 
2084 	case SPECTRE_V2_CMD_RETPOLINE:
2085 		spectre_v2_enabled = spectre_v2_select_retpoline();
2086 		break;
2087 
2088 	case SPECTRE_V2_CMD_IBRS:
2089 		spectre_v2_enabled = SPECTRE_V2_IBRS;
2090 		break;
2091 
2092 	case SPECTRE_V2_CMD_EIBRS:
2093 		spectre_v2_enabled = SPECTRE_V2_EIBRS;
2094 		break;
2095 
2096 	case SPECTRE_V2_CMD_EIBRS_LFENCE:
2097 		spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
2098 		break;
2099 
2100 	case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
2101 		spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
2102 		break;
2103 	}
2104 }
2105 
spectre_v2_update_mitigation(void)2106 static void __init spectre_v2_update_mitigation(void)
2107 {
2108 	if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
2109 	    !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
2110 		if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
2111 		    boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2112 		    retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
2113 		    retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
2114 		    boot_cpu_has(X86_FEATURE_IBRS) &&
2115 		    boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
2116 			spectre_v2_enabled = SPECTRE_V2_IBRS;
2117 		}
2118 	}
2119 
2120 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && !cpu_mitigations_off())
2121 		pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
2122 }
2123 
spectre_v2_apply_mitigation(void)2124 static void __init spectre_v2_apply_mitigation(void)
2125 {
2126 	if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2127 		pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
2128 
2129 	if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2130 		if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
2131 			msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
2132 		} else {
2133 			x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
2134 			update_spec_ctrl(x86_spec_ctrl_base);
2135 		}
2136 	}
2137 
2138 	switch (spectre_v2_enabled) {
2139 	case SPECTRE_V2_NONE:
2140 		return;
2141 
2142 	case SPECTRE_V2_EIBRS:
2143 		break;
2144 
2145 	case SPECTRE_V2_IBRS:
2146 		setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
2147 		if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
2148 			pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
2149 		break;
2150 
2151 	case SPECTRE_V2_LFENCE:
2152 	case SPECTRE_V2_EIBRS_LFENCE:
2153 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
2154 		fallthrough;
2155 
2156 	case SPECTRE_V2_RETPOLINE:
2157 	case SPECTRE_V2_EIBRS_RETPOLINE:
2158 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
2159 		break;
2160 	}
2161 
2162 	/*
2163 	 * Disable alternate RSB predictions in kernel when indirect CALLs and
2164 	 * JMPs gets protection against BHI and Intramode-BTI, but RET
2165 	 * prediction from a non-RSB predictor is still a risk.
2166 	 */
2167 	if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
2168 	    spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
2169 	    spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
2170 		spec_ctrl_disable_kernel_rrsba();
2171 
2172 	spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
2173 
2174 	/*
2175 	 * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
2176 	 * and Enhanced IBRS protect firmware too, so enable IBRS around
2177 	 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
2178 	 * otherwise enabled.
2179 	 *
2180 	 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
2181 	 * boot_cpu_has(), because the user might select retpoline on the kernel
2182 	 * command line and if the CPU supports Enhanced IBRS, kernel might
2183 	 * un-intentionally not enable IBRS around firmware calls.
2184 	 */
2185 	if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2186 	    boot_cpu_has(X86_FEATURE_IBPB) &&
2187 	    (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2188 	     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
2189 
2190 		if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
2191 			setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
2192 			pr_info("Enabling Speculation Barrier for firmware calls\n");
2193 		}
2194 
2195 	} else if (boot_cpu_has(X86_FEATURE_IBRS) &&
2196 		   !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2197 		setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2198 		pr_info("Enabling Restricted Speculation for firmware calls\n");
2199 	}
2200 }
2201 
update_stibp_msr(void * __unused)2202 static void update_stibp_msr(void * __unused)
2203 {
2204 	u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
2205 	update_spec_ctrl(val);
2206 }
2207 
2208 /* Update x86_spec_ctrl_base in case SMT state changed. */
update_stibp_strict(void)2209 static void update_stibp_strict(void)
2210 {
2211 	u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
2212 
2213 	if (sched_smt_active())
2214 		mask |= SPEC_CTRL_STIBP;
2215 
2216 	if (mask == x86_spec_ctrl_base)
2217 		return;
2218 
2219 	pr_info("Update user space SMT mitigation: STIBP %s\n",
2220 		mask & SPEC_CTRL_STIBP ? "always-on" : "off");
2221 	x86_spec_ctrl_base = mask;
2222 	on_each_cpu(update_stibp_msr, NULL, 1);
2223 }
2224 
2225 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
update_indir_branch_cond(void)2226 static void update_indir_branch_cond(void)
2227 {
2228 	if (sched_smt_active())
2229 		static_branch_enable(&switch_to_cond_stibp);
2230 	else
2231 		static_branch_disable(&switch_to_cond_stibp);
2232 }
2233 
2234 #undef pr_fmt
2235 #define pr_fmt(fmt) fmt
2236 
2237 /* Update the static key controlling the MDS CPU buffer clear in idle */
update_mds_branch_idle(void)2238 static void update_mds_branch_idle(void)
2239 {
2240 	/*
2241 	 * Enable the idle clearing if SMT is active on CPUs which are
2242 	 * affected only by MSBDS and not any other MDS variant.
2243 	 *
2244 	 * The other variants cannot be mitigated when SMT is enabled, so
2245 	 * clearing the buffers on idle just to prevent the Store Buffer
2246 	 * repartitioning leak would be a window dressing exercise.
2247 	 */
2248 	if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
2249 		return;
2250 
2251 	if (sched_smt_active()) {
2252 		static_branch_enable(&mds_idle_clear);
2253 	} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
2254 		   (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2255 		static_branch_disable(&mds_idle_clear);
2256 	}
2257 }
2258 
2259 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
2260 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
2261 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
2262 
cpu_bugs_smt_update(void)2263 void cpu_bugs_smt_update(void)
2264 {
2265 	mutex_lock(&spec_ctrl_mutex);
2266 
2267 	if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2268 	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2269 		pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
2270 
2271 	switch (spectre_v2_user_stibp) {
2272 	case SPECTRE_V2_USER_NONE:
2273 		break;
2274 	case SPECTRE_V2_USER_STRICT:
2275 	case SPECTRE_V2_USER_STRICT_PREFERRED:
2276 		update_stibp_strict();
2277 		break;
2278 	case SPECTRE_V2_USER_PRCTL:
2279 	case SPECTRE_V2_USER_SECCOMP:
2280 		update_indir_branch_cond();
2281 		break;
2282 	}
2283 
2284 	switch (mds_mitigation) {
2285 	case MDS_MITIGATION_FULL:
2286 	case MDS_MITIGATION_AUTO:
2287 	case MDS_MITIGATION_VMWERV:
2288 		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
2289 			pr_warn_once(MDS_MSG_SMT);
2290 		update_mds_branch_idle();
2291 		break;
2292 	case MDS_MITIGATION_OFF:
2293 		break;
2294 	}
2295 
2296 	switch (taa_mitigation) {
2297 	case TAA_MITIGATION_VERW:
2298 	case TAA_MITIGATION_AUTO:
2299 	case TAA_MITIGATION_UCODE_NEEDED:
2300 		if (sched_smt_active())
2301 			pr_warn_once(TAA_MSG_SMT);
2302 		break;
2303 	case TAA_MITIGATION_TSX_DISABLED:
2304 	case TAA_MITIGATION_OFF:
2305 		break;
2306 	}
2307 
2308 	switch (mmio_mitigation) {
2309 	case MMIO_MITIGATION_VERW:
2310 	case MMIO_MITIGATION_AUTO:
2311 	case MMIO_MITIGATION_UCODE_NEEDED:
2312 		if (sched_smt_active())
2313 			pr_warn_once(MMIO_MSG_SMT);
2314 		break;
2315 	case MMIO_MITIGATION_OFF:
2316 		break;
2317 	}
2318 
2319 	mutex_unlock(&spec_ctrl_mutex);
2320 }
2321 
2322 #undef pr_fmt
2323 #define pr_fmt(fmt)	"Speculative Store Bypass: " fmt
2324 
2325 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
2326 
2327 /* The kernel command line selection */
2328 enum ssb_mitigation_cmd {
2329 	SPEC_STORE_BYPASS_CMD_NONE,
2330 	SPEC_STORE_BYPASS_CMD_AUTO,
2331 	SPEC_STORE_BYPASS_CMD_ON,
2332 	SPEC_STORE_BYPASS_CMD_PRCTL,
2333 	SPEC_STORE_BYPASS_CMD_SECCOMP,
2334 };
2335 
2336 static const char * const ssb_strings[] = {
2337 	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
2338 	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
2339 	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl",
2340 	[SPEC_STORE_BYPASS_SECCOMP]	= "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2341 };
2342 
2343 static const struct {
2344 	const char *option;
2345 	enum ssb_mitigation_cmd cmd;
2346 } ssb_mitigation_options[]  __initconst = {
2347 	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
2348 	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
2349 	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
2350 	{ "prctl",	SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
2351 	{ "seccomp",	SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
2352 };
2353 
ssb_parse_cmdline(void)2354 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
2355 {
2356 	enum ssb_mitigation_cmd cmd;
2357 	char arg[20];
2358 	int ret, i;
2359 
2360 	cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ?
2361 		SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE;
2362 	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
2363 	    cpu_mitigations_off()) {
2364 		return SPEC_STORE_BYPASS_CMD_NONE;
2365 	} else {
2366 		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
2367 					  arg, sizeof(arg));
2368 		if (ret < 0)
2369 			return cmd;
2370 
2371 		for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
2372 			if (!match_option(arg, ret, ssb_mitigation_options[i].option))
2373 				continue;
2374 
2375 			cmd = ssb_mitigation_options[i].cmd;
2376 			break;
2377 		}
2378 
2379 		if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
2380 			pr_err("unknown option (%s). Switching to default mode\n", arg);
2381 			return cmd;
2382 		}
2383 	}
2384 
2385 	return cmd;
2386 }
2387 
ssb_select_mitigation(void)2388 static void __init ssb_select_mitigation(void)
2389 {
2390 	enum ssb_mitigation_cmd cmd;
2391 
2392 	if (!boot_cpu_has(X86_FEATURE_SSBD))
2393 		goto out;
2394 
2395 	cmd = ssb_parse_cmdline();
2396 	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
2397 	    (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
2398 	     cmd == SPEC_STORE_BYPASS_CMD_AUTO))
2399 		return;
2400 
2401 	switch (cmd) {
2402 	case SPEC_STORE_BYPASS_CMD_SECCOMP:
2403 		/*
2404 		 * Choose prctl+seccomp as the default mode if seccomp is
2405 		 * enabled.
2406 		 */
2407 		if (IS_ENABLED(CONFIG_SECCOMP))
2408 			ssb_mode = SPEC_STORE_BYPASS_SECCOMP;
2409 		else
2410 			ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2411 		break;
2412 	case SPEC_STORE_BYPASS_CMD_ON:
2413 		ssb_mode = SPEC_STORE_BYPASS_DISABLE;
2414 		break;
2415 	case SPEC_STORE_BYPASS_CMD_AUTO:
2416 	case SPEC_STORE_BYPASS_CMD_PRCTL:
2417 		ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2418 		break;
2419 	case SPEC_STORE_BYPASS_CMD_NONE:
2420 		break;
2421 	}
2422 
2423 out:
2424 	if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2425 		pr_info("%s\n", ssb_strings[ssb_mode]);
2426 }
2427 
ssb_apply_mitigation(void)2428 static void __init ssb_apply_mitigation(void)
2429 {
2430 	/*
2431 	 * We have three CPU feature flags that are in play here:
2432 	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2433 	 *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2434 	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2435 	 */
2436 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
2437 		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2438 		/*
2439 		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2440 		 * use a completely different MSR and bit dependent on family.
2441 		 */
2442 		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2443 		    !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2444 			x86_amd_ssb_disable();
2445 		} else {
2446 			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2447 			update_spec_ctrl(x86_spec_ctrl_base);
2448 		}
2449 	}
2450 }
2451 
2452 #undef pr_fmt
2453 #define pr_fmt(fmt)     "Speculation prctl: " fmt
2454 
task_update_spec_tif(struct task_struct * tsk)2455 static void task_update_spec_tif(struct task_struct *tsk)
2456 {
2457 	/* Force the update of the real TIF bits */
2458 	set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2459 
2460 	/*
2461 	 * Immediately update the speculation control MSRs for the current
2462 	 * task, but for a non-current task delay setting the CPU
2463 	 * mitigation until it is scheduled next.
2464 	 *
2465 	 * This can only happen for SECCOMP mitigation. For PRCTL it's
2466 	 * always the current task.
2467 	 */
2468 	if (tsk == current)
2469 		speculation_ctrl_update_current();
2470 }
2471 
l1d_flush_prctl_set(struct task_struct * task,unsigned long ctrl)2472 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2473 {
2474 
2475 	if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2476 		return -EPERM;
2477 
2478 	switch (ctrl) {
2479 	case PR_SPEC_ENABLE:
2480 		set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2481 		return 0;
2482 	case PR_SPEC_DISABLE:
2483 		clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2484 		return 0;
2485 	default:
2486 		return -ERANGE;
2487 	}
2488 }
2489 
ssb_prctl_set(struct task_struct * task,unsigned long ctrl)2490 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2491 {
2492 	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2493 	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2494 		return -ENXIO;
2495 
2496 	switch (ctrl) {
2497 	case PR_SPEC_ENABLE:
2498 		/* If speculation is force disabled, enable is not allowed */
2499 		if (task_spec_ssb_force_disable(task))
2500 			return -EPERM;
2501 		task_clear_spec_ssb_disable(task);
2502 		task_clear_spec_ssb_noexec(task);
2503 		task_update_spec_tif(task);
2504 		break;
2505 	case PR_SPEC_DISABLE:
2506 		task_set_spec_ssb_disable(task);
2507 		task_clear_spec_ssb_noexec(task);
2508 		task_update_spec_tif(task);
2509 		break;
2510 	case PR_SPEC_FORCE_DISABLE:
2511 		task_set_spec_ssb_disable(task);
2512 		task_set_spec_ssb_force_disable(task);
2513 		task_clear_spec_ssb_noexec(task);
2514 		task_update_spec_tif(task);
2515 		break;
2516 	case PR_SPEC_DISABLE_NOEXEC:
2517 		if (task_spec_ssb_force_disable(task))
2518 			return -EPERM;
2519 		task_set_spec_ssb_disable(task);
2520 		task_set_spec_ssb_noexec(task);
2521 		task_update_spec_tif(task);
2522 		break;
2523 	default:
2524 		return -ERANGE;
2525 	}
2526 	return 0;
2527 }
2528 
is_spec_ib_user_controlled(void)2529 static bool is_spec_ib_user_controlled(void)
2530 {
2531 	return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2532 		spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2533 		spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2534 		spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2535 }
2536 
ib_prctl_set(struct task_struct * task,unsigned long ctrl)2537 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2538 {
2539 	switch (ctrl) {
2540 	case PR_SPEC_ENABLE:
2541 		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2542 		    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2543 			return 0;
2544 
2545 		/*
2546 		 * With strict mode for both IBPB and STIBP, the instruction
2547 		 * code paths avoid checking this task flag and instead,
2548 		 * unconditionally run the instruction. However, STIBP and IBPB
2549 		 * are independent and either can be set to conditionally
2550 		 * enabled regardless of the mode of the other.
2551 		 *
2552 		 * If either is set to conditional, allow the task flag to be
2553 		 * updated, unless it was force-disabled by a previous prctl
2554 		 * call. Currently, this is possible on an AMD CPU which has the
2555 		 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2556 		 * kernel is booted with 'spectre_v2_user=seccomp', then
2557 		 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2558 		 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2559 		 */
2560 		if (!is_spec_ib_user_controlled() ||
2561 		    task_spec_ib_force_disable(task))
2562 			return -EPERM;
2563 
2564 		task_clear_spec_ib_disable(task);
2565 		task_update_spec_tif(task);
2566 		break;
2567 	case PR_SPEC_DISABLE:
2568 	case PR_SPEC_FORCE_DISABLE:
2569 		/*
2570 		 * Indirect branch speculation is always allowed when
2571 		 * mitigation is force disabled.
2572 		 */
2573 		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2574 		    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2575 			return -EPERM;
2576 
2577 		if (!is_spec_ib_user_controlled())
2578 			return 0;
2579 
2580 		task_set_spec_ib_disable(task);
2581 		if (ctrl == PR_SPEC_FORCE_DISABLE)
2582 			task_set_spec_ib_force_disable(task);
2583 		task_update_spec_tif(task);
2584 		if (task == current)
2585 			indirect_branch_prediction_barrier();
2586 		break;
2587 	default:
2588 		return -ERANGE;
2589 	}
2590 	return 0;
2591 }
2592 
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)2593 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2594 			     unsigned long ctrl)
2595 {
2596 	switch (which) {
2597 	case PR_SPEC_STORE_BYPASS:
2598 		return ssb_prctl_set(task, ctrl);
2599 	case PR_SPEC_INDIRECT_BRANCH:
2600 		return ib_prctl_set(task, ctrl);
2601 	case PR_SPEC_L1D_FLUSH:
2602 		return l1d_flush_prctl_set(task, ctrl);
2603 	default:
2604 		return -ENODEV;
2605 	}
2606 }
2607 
2608 #ifdef CONFIG_SECCOMP
arch_seccomp_spec_mitigate(struct task_struct * task)2609 void arch_seccomp_spec_mitigate(struct task_struct *task)
2610 {
2611 	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2612 		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2613 	if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2614 	    spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2615 		ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2616 }
2617 #endif
2618 
l1d_flush_prctl_get(struct task_struct * task)2619 static int l1d_flush_prctl_get(struct task_struct *task)
2620 {
2621 	if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2622 		return PR_SPEC_FORCE_DISABLE;
2623 
2624 	if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2625 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2626 	else
2627 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2628 }
2629 
ssb_prctl_get(struct task_struct * task)2630 static int ssb_prctl_get(struct task_struct *task)
2631 {
2632 	switch (ssb_mode) {
2633 	case SPEC_STORE_BYPASS_NONE:
2634 		if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2635 			return PR_SPEC_ENABLE;
2636 		return PR_SPEC_NOT_AFFECTED;
2637 	case SPEC_STORE_BYPASS_DISABLE:
2638 		return PR_SPEC_DISABLE;
2639 	case SPEC_STORE_BYPASS_SECCOMP:
2640 	case SPEC_STORE_BYPASS_PRCTL:
2641 		if (task_spec_ssb_force_disable(task))
2642 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2643 		if (task_spec_ssb_noexec(task))
2644 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2645 		if (task_spec_ssb_disable(task))
2646 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2647 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2648 	}
2649 	BUG();
2650 }
2651 
ib_prctl_get(struct task_struct * task)2652 static int ib_prctl_get(struct task_struct *task)
2653 {
2654 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2655 		return PR_SPEC_NOT_AFFECTED;
2656 
2657 	if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2658 	    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2659 		return PR_SPEC_ENABLE;
2660 	else if (is_spec_ib_user_controlled()) {
2661 		if (task_spec_ib_force_disable(task))
2662 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2663 		if (task_spec_ib_disable(task))
2664 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2665 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2666 	} else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2667 	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2668 	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2669 		return PR_SPEC_DISABLE;
2670 	else
2671 		return PR_SPEC_NOT_AFFECTED;
2672 }
2673 
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)2674 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2675 {
2676 	switch (which) {
2677 	case PR_SPEC_STORE_BYPASS:
2678 		return ssb_prctl_get(task);
2679 	case PR_SPEC_INDIRECT_BRANCH:
2680 		return ib_prctl_get(task);
2681 	case PR_SPEC_L1D_FLUSH:
2682 		return l1d_flush_prctl_get(task);
2683 	default:
2684 		return -ENODEV;
2685 	}
2686 }
2687 
x86_spec_ctrl_setup_ap(void)2688 void x86_spec_ctrl_setup_ap(void)
2689 {
2690 	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2691 		update_spec_ctrl(x86_spec_ctrl_base);
2692 
2693 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2694 		x86_amd_ssb_disable();
2695 }
2696 
2697 bool itlb_multihit_kvm_mitigation;
2698 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2699 
2700 #undef pr_fmt
2701 #define pr_fmt(fmt)	"L1TF: " fmt
2702 
2703 /* Default mitigation for L1TF-affected CPUs */
2704 enum l1tf_mitigations l1tf_mitigation __ro_after_init =
2705 	IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
2706 #if IS_ENABLED(CONFIG_KVM_INTEL)
2707 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2708 #endif
2709 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2710 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2711 
2712 /*
2713  * These CPUs all support 44bits physical address space internally in the
2714  * cache but CPUID can report a smaller number of physical address bits.
2715  *
2716  * The L1TF mitigation uses the top most address bit for the inversion of
2717  * non present PTEs. When the installed memory reaches into the top most
2718  * address bit due to memory holes, which has been observed on machines
2719  * which report 36bits physical address bits and have 32G RAM installed,
2720  * then the mitigation range check in l1tf_select_mitigation() triggers.
2721  * This is a false positive because the mitigation is still possible due to
2722  * the fact that the cache uses 44bit internally. Use the cache bits
2723  * instead of the reported physical bits and adjust them on the affected
2724  * machines to 44bit if the reported bits are less than 44.
2725  */
override_cache_bits(struct cpuinfo_x86 * c)2726 static void override_cache_bits(struct cpuinfo_x86 *c)
2727 {
2728 	if (c->x86 != 6)
2729 		return;
2730 
2731 	switch (c->x86_vfm) {
2732 	case INTEL_NEHALEM:
2733 	case INTEL_WESTMERE:
2734 	case INTEL_SANDYBRIDGE:
2735 	case INTEL_IVYBRIDGE:
2736 	case INTEL_HASWELL:
2737 	case INTEL_HASWELL_L:
2738 	case INTEL_HASWELL_G:
2739 	case INTEL_BROADWELL:
2740 	case INTEL_BROADWELL_G:
2741 	case INTEL_SKYLAKE_L:
2742 	case INTEL_SKYLAKE:
2743 	case INTEL_KABYLAKE_L:
2744 	case INTEL_KABYLAKE:
2745 		if (c->x86_cache_bits < 44)
2746 			c->x86_cache_bits = 44;
2747 		break;
2748 	}
2749 }
2750 
l1tf_select_mitigation(void)2751 static void __init l1tf_select_mitigation(void)
2752 {
2753 	if (!boot_cpu_has_bug(X86_BUG_L1TF) || cpu_mitigations_off()) {
2754 		l1tf_mitigation = L1TF_MITIGATION_OFF;
2755 		return;
2756 	}
2757 
2758 	if (l1tf_mitigation == L1TF_MITIGATION_AUTO) {
2759 		if (cpu_mitigations_auto_nosmt())
2760 			l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2761 		else
2762 			l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2763 	}
2764 }
2765 
l1tf_apply_mitigation(void)2766 static void __init l1tf_apply_mitigation(void)
2767 {
2768 	u64 half_pa;
2769 
2770 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
2771 		return;
2772 
2773 	override_cache_bits(&boot_cpu_data);
2774 
2775 	switch (l1tf_mitigation) {
2776 	case L1TF_MITIGATION_OFF:
2777 	case L1TF_MITIGATION_FLUSH_NOWARN:
2778 	case L1TF_MITIGATION_FLUSH:
2779 	case L1TF_MITIGATION_AUTO:
2780 		break;
2781 	case L1TF_MITIGATION_FLUSH_NOSMT:
2782 	case L1TF_MITIGATION_FULL:
2783 		cpu_smt_disable(false);
2784 		break;
2785 	case L1TF_MITIGATION_FULL_FORCE:
2786 		cpu_smt_disable(true);
2787 		break;
2788 	}
2789 
2790 #if CONFIG_PGTABLE_LEVELS == 2
2791 	pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2792 	return;
2793 #endif
2794 
2795 	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2796 	if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2797 			e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2798 		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2799 		pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2800 				half_pa);
2801 		pr_info("However, doing so will make a part of your RAM unusable.\n");
2802 		pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2803 		return;
2804 	}
2805 
2806 	setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2807 }
2808 
l1tf_cmdline(char * str)2809 static int __init l1tf_cmdline(char *str)
2810 {
2811 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
2812 		return 0;
2813 
2814 	if (!str)
2815 		return -EINVAL;
2816 
2817 	if (!strcmp(str, "off"))
2818 		l1tf_mitigation = L1TF_MITIGATION_OFF;
2819 	else if (!strcmp(str, "flush,nowarn"))
2820 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2821 	else if (!strcmp(str, "flush"))
2822 		l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2823 	else if (!strcmp(str, "flush,nosmt"))
2824 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2825 	else if (!strcmp(str, "full"))
2826 		l1tf_mitigation = L1TF_MITIGATION_FULL;
2827 	else if (!strcmp(str, "full,force"))
2828 		l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2829 
2830 	return 0;
2831 }
2832 early_param("l1tf", l1tf_cmdline);
2833 
2834 #undef pr_fmt
2835 #define pr_fmt(fmt)	"Speculative Return Stack Overflow: " fmt
2836 
2837 enum srso_mitigation {
2838 	SRSO_MITIGATION_NONE,
2839 	SRSO_MITIGATION_AUTO,
2840 	SRSO_MITIGATION_UCODE_NEEDED,
2841 	SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
2842 	SRSO_MITIGATION_MICROCODE,
2843 	SRSO_MITIGATION_SAFE_RET,
2844 	SRSO_MITIGATION_IBPB,
2845 	SRSO_MITIGATION_IBPB_ON_VMEXIT,
2846 	SRSO_MITIGATION_BP_SPEC_REDUCE,
2847 };
2848 
2849 static const char * const srso_strings[] = {
2850 	[SRSO_MITIGATION_NONE]			= "Vulnerable",
2851 	[SRSO_MITIGATION_UCODE_NEEDED]		= "Vulnerable: No microcode",
2852 	[SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED]	= "Vulnerable: Safe RET, no microcode",
2853 	[SRSO_MITIGATION_MICROCODE]		= "Vulnerable: Microcode, no safe RET",
2854 	[SRSO_MITIGATION_SAFE_RET]		= "Mitigation: Safe RET",
2855 	[SRSO_MITIGATION_IBPB]			= "Mitigation: IBPB",
2856 	[SRSO_MITIGATION_IBPB_ON_VMEXIT]	= "Mitigation: IBPB on VMEXIT only",
2857 	[SRSO_MITIGATION_BP_SPEC_REDUCE]	= "Mitigation: Reduced Speculation"
2858 };
2859 
2860 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
2861 
srso_parse_cmdline(char * str)2862 static int __init srso_parse_cmdline(char *str)
2863 {
2864 	if (!str)
2865 		return -EINVAL;
2866 
2867 	if (!strcmp(str, "off"))
2868 		srso_mitigation = SRSO_MITIGATION_NONE;
2869 	else if (!strcmp(str, "microcode"))
2870 		srso_mitigation = SRSO_MITIGATION_MICROCODE;
2871 	else if (!strcmp(str, "safe-ret"))
2872 		srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2873 	else if (!strcmp(str, "ibpb"))
2874 		srso_mitigation = SRSO_MITIGATION_IBPB;
2875 	else if (!strcmp(str, "ibpb-vmexit"))
2876 		srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2877 	else
2878 		pr_err("Ignoring unknown SRSO option (%s).", str);
2879 
2880 	return 0;
2881 }
2882 early_param("spec_rstack_overflow", srso_parse_cmdline);
2883 
2884 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2885 
srso_select_mitigation(void)2886 static void __init srso_select_mitigation(void)
2887 {
2888 	bool has_microcode;
2889 
2890 	if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
2891 		srso_mitigation = SRSO_MITIGATION_NONE;
2892 
2893 	if (srso_mitigation == SRSO_MITIGATION_NONE)
2894 		return;
2895 
2896 	if (srso_mitigation == SRSO_MITIGATION_AUTO)
2897 		srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2898 
2899 	has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
2900 	if (has_microcode) {
2901 		/*
2902 		 * Zen1/2 with SMT off aren't vulnerable after the right
2903 		 * IBPB microcode has been applied.
2904 		 */
2905 		if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
2906 			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
2907 			srso_mitigation = SRSO_MITIGATION_NONE;
2908 			return;
2909 		}
2910 	} else {
2911 		pr_warn("IBPB-extending microcode not applied!\n");
2912 		pr_warn(SRSO_NOTICE);
2913 	}
2914 
2915 	switch (srso_mitigation) {
2916 	case SRSO_MITIGATION_SAFE_RET:
2917 		if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
2918 			srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2919 			goto ibpb_on_vmexit;
2920 		}
2921 
2922 		if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
2923 			pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2924 			srso_mitigation = SRSO_MITIGATION_NONE;
2925 		}
2926 
2927 		if (!has_microcode)
2928 			srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
2929 		break;
2930 ibpb_on_vmexit:
2931 	case SRSO_MITIGATION_IBPB_ON_VMEXIT:
2932 		if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
2933 			pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
2934 			srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
2935 			break;
2936 		}
2937 		fallthrough;
2938 	case SRSO_MITIGATION_IBPB:
2939 		if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
2940 			pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
2941 			srso_mitigation = SRSO_MITIGATION_NONE;
2942 		}
2943 
2944 		if (!has_microcode)
2945 			srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
2946 		break;
2947 	default:
2948 		break;
2949 	}
2950 }
2951 
srso_update_mitigation(void)2952 static void __init srso_update_mitigation(void)
2953 {
2954 	/* If retbleed is using IBPB, that works for SRSO as well */
2955 	if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
2956 	    boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
2957 		srso_mitigation = SRSO_MITIGATION_IBPB;
2958 
2959 	if (boot_cpu_has_bug(X86_BUG_SRSO) &&
2960 	    !cpu_mitigations_off() &&
2961 	    !boot_cpu_has(X86_FEATURE_SRSO_NO))
2962 		pr_info("%s\n", srso_strings[srso_mitigation]);
2963 }
2964 
srso_apply_mitigation(void)2965 static void __init srso_apply_mitigation(void)
2966 {
2967 	/*
2968 	 * Clear the feature flag if this mitigation is not selected as that
2969 	 * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
2970 	 */
2971 	if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
2972 		setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
2973 
2974 	if (srso_mitigation == SRSO_MITIGATION_NONE) {
2975 		if (boot_cpu_has(X86_FEATURE_SBPB))
2976 			x86_pred_cmd = PRED_CMD_SBPB;
2977 		return;
2978 	}
2979 
2980 	switch (srso_mitigation) {
2981 	case SRSO_MITIGATION_SAFE_RET:
2982 	case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
2983 		/*
2984 		 * Enable the return thunk for generated code
2985 		 * like ftrace, static_call, etc.
2986 		 */
2987 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
2988 		setup_force_cpu_cap(X86_FEATURE_UNRET);
2989 
2990 		if (boot_cpu_data.x86 == 0x19) {
2991 			setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
2992 			set_return_thunk(srso_alias_return_thunk);
2993 		} else {
2994 			setup_force_cpu_cap(X86_FEATURE_SRSO);
2995 			set_return_thunk(srso_return_thunk);
2996 		}
2997 		break;
2998 	case SRSO_MITIGATION_IBPB:
2999 		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
3000 		/*
3001 		 * IBPB on entry already obviates the need for
3002 		 * software-based untraining so clear those in case some
3003 		 * other mitigation like Retbleed has selected them.
3004 		 */
3005 		setup_clear_cpu_cap(X86_FEATURE_UNRET);
3006 		setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
3007 		fallthrough;
3008 	case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3009 		setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
3010 		/*
3011 		 * There is no need for RSB filling: entry_ibpb() ensures
3012 		 * all predictions, including the RSB, are invalidated,
3013 		 * regardless of IBPB implementation.
3014 		 */
3015 		setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
3016 		break;
3017 	default:
3018 		break;
3019 	}
3020 }
3021 
3022 #undef pr_fmt
3023 #define pr_fmt(fmt) fmt
3024 
3025 #ifdef CONFIG_SYSFS
3026 
3027 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
3028 
3029 #if IS_ENABLED(CONFIG_KVM_INTEL)
3030 static const char * const l1tf_vmx_states[] = {
3031 	[VMENTER_L1D_FLUSH_AUTO]		= "auto",
3032 	[VMENTER_L1D_FLUSH_NEVER]		= "vulnerable",
3033 	[VMENTER_L1D_FLUSH_COND]		= "conditional cache flushes",
3034 	[VMENTER_L1D_FLUSH_ALWAYS]		= "cache flushes",
3035 	[VMENTER_L1D_FLUSH_EPT_DISABLED]	= "EPT disabled",
3036 	[VMENTER_L1D_FLUSH_NOT_REQUIRED]	= "flush not necessary"
3037 };
3038 
l1tf_show_state(char * buf)3039 static ssize_t l1tf_show_state(char *buf)
3040 {
3041 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
3042 		return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3043 
3044 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
3045 	    (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
3046 	     sched_smt_active())) {
3047 		return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
3048 				  l1tf_vmx_states[l1tf_vmx_mitigation]);
3049 	}
3050 
3051 	return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
3052 			  l1tf_vmx_states[l1tf_vmx_mitigation],
3053 			  sched_smt_active() ? "vulnerable" : "disabled");
3054 }
3055 
itlb_multihit_show_state(char * buf)3056 static ssize_t itlb_multihit_show_state(char *buf)
3057 {
3058 	if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
3059 	    !boot_cpu_has(X86_FEATURE_VMX))
3060 		return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
3061 	else if (!(cr4_read_shadow() & X86_CR4_VMXE))
3062 		return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
3063 	else if (itlb_multihit_kvm_mitigation)
3064 		return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
3065 	else
3066 		return sysfs_emit(buf, "KVM: Vulnerable\n");
3067 }
3068 #else
l1tf_show_state(char * buf)3069 static ssize_t l1tf_show_state(char *buf)
3070 {
3071 	return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3072 }
3073 
itlb_multihit_show_state(char * buf)3074 static ssize_t itlb_multihit_show_state(char *buf)
3075 {
3076 	return sysfs_emit(buf, "Processor vulnerable\n");
3077 }
3078 #endif
3079 
mds_show_state(char * buf)3080 static ssize_t mds_show_state(char *buf)
3081 {
3082 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3083 		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3084 				  mds_strings[mds_mitigation]);
3085 	}
3086 
3087 	if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
3088 		return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3089 				  (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
3090 				   sched_smt_active() ? "mitigated" : "disabled"));
3091 	}
3092 
3093 	return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3094 			  sched_smt_active() ? "vulnerable" : "disabled");
3095 }
3096 
tsx_async_abort_show_state(char * buf)3097 static ssize_t tsx_async_abort_show_state(char *buf)
3098 {
3099 	if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
3100 	    (taa_mitigation == TAA_MITIGATION_OFF))
3101 		return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
3102 
3103 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3104 		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3105 				  taa_strings[taa_mitigation]);
3106 	}
3107 
3108 	return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
3109 			  sched_smt_active() ? "vulnerable" : "disabled");
3110 }
3111 
mmio_stale_data_show_state(char * buf)3112 static ssize_t mmio_stale_data_show_state(char *buf)
3113 {
3114 	if (mmio_mitigation == MMIO_MITIGATION_OFF)
3115 		return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
3116 
3117 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3118 		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3119 				  mmio_strings[mmio_mitigation]);
3120 	}
3121 
3122 	return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
3123 			  sched_smt_active() ? "vulnerable" : "disabled");
3124 }
3125 
rfds_show_state(char * buf)3126 static ssize_t rfds_show_state(char *buf)
3127 {
3128 	return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
3129 }
3130 
old_microcode_show_state(char * buf)3131 static ssize_t old_microcode_show_state(char *buf)
3132 {
3133 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
3134 		return sysfs_emit(buf, "Unknown: running under hypervisor");
3135 
3136 	return sysfs_emit(buf, "Vulnerable\n");
3137 }
3138 
its_show_state(char * buf)3139 static ssize_t its_show_state(char *buf)
3140 {
3141 	return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
3142 }
3143 
stibp_state(void)3144 static char *stibp_state(void)
3145 {
3146 	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3147 	    !boot_cpu_has(X86_FEATURE_AUTOIBRS))
3148 		return "";
3149 
3150 	switch (spectre_v2_user_stibp) {
3151 	case SPECTRE_V2_USER_NONE:
3152 		return "; STIBP: disabled";
3153 	case SPECTRE_V2_USER_STRICT:
3154 		return "; STIBP: forced";
3155 	case SPECTRE_V2_USER_STRICT_PREFERRED:
3156 		return "; STIBP: always-on";
3157 	case SPECTRE_V2_USER_PRCTL:
3158 	case SPECTRE_V2_USER_SECCOMP:
3159 		if (static_key_enabled(&switch_to_cond_stibp))
3160 			return "; STIBP: conditional";
3161 	}
3162 	return "";
3163 }
3164 
ibpb_state(void)3165 static char *ibpb_state(void)
3166 {
3167 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
3168 		if (static_key_enabled(&switch_mm_always_ibpb))
3169 			return "; IBPB: always-on";
3170 		if (static_key_enabled(&switch_mm_cond_ibpb))
3171 			return "; IBPB: conditional";
3172 		return "; IBPB: disabled";
3173 	}
3174 	return "";
3175 }
3176 
pbrsb_eibrs_state(void)3177 static char *pbrsb_eibrs_state(void)
3178 {
3179 	if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
3180 		if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
3181 		    boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
3182 			return "; PBRSB-eIBRS: SW sequence";
3183 		else
3184 			return "; PBRSB-eIBRS: Vulnerable";
3185 	} else {
3186 		return "; PBRSB-eIBRS: Not affected";
3187 	}
3188 }
3189 
spectre_bhi_state(void)3190 static const char *spectre_bhi_state(void)
3191 {
3192 	if (!boot_cpu_has_bug(X86_BUG_BHI))
3193 		return "; BHI: Not affected";
3194 	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
3195 		return "; BHI: BHI_DIS_S";
3196 	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
3197 		return "; BHI: SW loop, KVM: SW loop";
3198 	else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
3199 		 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
3200 		 rrsba_disabled)
3201 		return "; BHI: Retpoline";
3202 	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
3203 		return "; BHI: Vulnerable, KVM: SW loop";
3204 
3205 	return "; BHI: Vulnerable";
3206 }
3207 
spectre_v2_show_state(char * buf)3208 static ssize_t spectre_v2_show_state(char *buf)
3209 {
3210 	if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
3211 		return sysfs_emit(buf, "Vulnerable: LFENCE\n");
3212 
3213 	if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
3214 		return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
3215 
3216 	if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3217 	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3218 		return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
3219 
3220 	return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
3221 			  spectre_v2_strings[spectre_v2_enabled],
3222 			  ibpb_state(),
3223 			  boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
3224 			  stibp_state(),
3225 			  boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
3226 			  pbrsb_eibrs_state(),
3227 			  spectre_bhi_state(),
3228 			  /* this should always be at the end */
3229 			  spectre_v2_module_string());
3230 }
3231 
srbds_show_state(char * buf)3232 static ssize_t srbds_show_state(char *buf)
3233 {
3234 	return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
3235 }
3236 
retbleed_show_state(char * buf)3237 static ssize_t retbleed_show_state(char *buf)
3238 {
3239 	if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
3240 	    retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
3241 		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
3242 		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
3243 			return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
3244 
3245 		return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
3246 				  !sched_smt_active() ? "disabled" :
3247 				  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3248 				  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
3249 				  "enabled with STIBP protection" : "vulnerable");
3250 	}
3251 
3252 	return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
3253 }
3254 
srso_show_state(char * buf)3255 static ssize_t srso_show_state(char *buf)
3256 {
3257 	if (boot_cpu_has(X86_FEATURE_SRSO_NO))
3258 		return sysfs_emit(buf, "Mitigation: SMT disabled\n");
3259 
3260 	return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
3261 }
3262 
gds_show_state(char * buf)3263 static ssize_t gds_show_state(char *buf)
3264 {
3265 	return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
3266 }
3267 
cpu_show_common(struct device * dev,struct device_attribute * attr,char * buf,unsigned int bug)3268 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
3269 			       char *buf, unsigned int bug)
3270 {
3271 	if (!boot_cpu_has_bug(bug))
3272 		return sysfs_emit(buf, "Not affected\n");
3273 
3274 	switch (bug) {
3275 	case X86_BUG_CPU_MELTDOWN:
3276 		if (boot_cpu_has(X86_FEATURE_PTI))
3277 			return sysfs_emit(buf, "Mitigation: PTI\n");
3278 
3279 		if (hypervisor_is_type(X86_HYPER_XEN_PV))
3280 			return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
3281 
3282 		break;
3283 
3284 	case X86_BUG_SPECTRE_V1:
3285 		return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
3286 
3287 	case X86_BUG_SPECTRE_V2:
3288 		return spectre_v2_show_state(buf);
3289 
3290 	case X86_BUG_SPEC_STORE_BYPASS:
3291 		return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
3292 
3293 	case X86_BUG_L1TF:
3294 		if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
3295 			return l1tf_show_state(buf);
3296 		break;
3297 
3298 	case X86_BUG_MDS:
3299 		return mds_show_state(buf);
3300 
3301 	case X86_BUG_TAA:
3302 		return tsx_async_abort_show_state(buf);
3303 
3304 	case X86_BUG_ITLB_MULTIHIT:
3305 		return itlb_multihit_show_state(buf);
3306 
3307 	case X86_BUG_SRBDS:
3308 		return srbds_show_state(buf);
3309 
3310 	case X86_BUG_MMIO_STALE_DATA:
3311 		return mmio_stale_data_show_state(buf);
3312 
3313 	case X86_BUG_RETBLEED:
3314 		return retbleed_show_state(buf);
3315 
3316 	case X86_BUG_SRSO:
3317 		return srso_show_state(buf);
3318 
3319 	case X86_BUG_GDS:
3320 		return gds_show_state(buf);
3321 
3322 	case X86_BUG_RFDS:
3323 		return rfds_show_state(buf);
3324 
3325 	case X86_BUG_OLD_MICROCODE:
3326 		return old_microcode_show_state(buf);
3327 
3328 	case X86_BUG_ITS:
3329 		return its_show_state(buf);
3330 
3331 	default:
3332 		break;
3333 	}
3334 
3335 	return sysfs_emit(buf, "Vulnerable\n");
3336 }
3337 
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)3338 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
3339 {
3340 	return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
3341 }
3342 
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)3343 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
3344 {
3345 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
3346 }
3347 
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)3348 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
3349 {
3350 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
3351 }
3352 
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)3353 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
3354 {
3355 	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
3356 }
3357 
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)3358 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
3359 {
3360 	return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
3361 }
3362 
cpu_show_mds(struct device * dev,struct device_attribute * attr,char * buf)3363 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
3364 {
3365 	return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
3366 }
3367 
cpu_show_tsx_async_abort(struct device * dev,struct device_attribute * attr,char * buf)3368 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
3369 {
3370 	return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
3371 }
3372 
cpu_show_itlb_multihit(struct device * dev,struct device_attribute * attr,char * buf)3373 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
3374 {
3375 	return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
3376 }
3377 
cpu_show_srbds(struct device * dev,struct device_attribute * attr,char * buf)3378 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
3379 {
3380 	return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
3381 }
3382 
cpu_show_mmio_stale_data(struct device * dev,struct device_attribute * attr,char * buf)3383 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
3384 {
3385 	return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3386 }
3387 
cpu_show_retbleed(struct device * dev,struct device_attribute * attr,char * buf)3388 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3389 {
3390 	return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3391 }
3392 
cpu_show_spec_rstack_overflow(struct device * dev,struct device_attribute * attr,char * buf)3393 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3394 {
3395 	return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3396 }
3397 
cpu_show_gds(struct device * dev,struct device_attribute * attr,char * buf)3398 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3399 {
3400 	return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3401 }
3402 
cpu_show_reg_file_data_sampling(struct device * dev,struct device_attribute * attr,char * buf)3403 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3404 {
3405 	return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3406 }
3407 
cpu_show_old_microcode(struct device * dev,struct device_attribute * attr,char * buf)3408 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
3409 {
3410 	return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
3411 }
3412 
cpu_show_indirect_target_selection(struct device * dev,struct device_attribute * attr,char * buf)3413 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
3414 {
3415 	return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
3416 }
3417 #endif
3418 
__warn_thunk(void)3419 void __warn_thunk(void)
3420 {
3421 	WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3422 }
3423