xref: /linux/arch/x86/kernel/cpu/bugs.c (revision 159a8bb06f7bb298da1aacc99975e9817e2cf02c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *	- Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *	- Channing Corn (tests & fixes),
9  *	- Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19 
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/intel-family.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34 
35 #include "cpu.h"
36 
37 static void __init spectre_v1_select_mitigation(void);
38 static void __init spectre_v2_select_mitigation(void);
39 static void __init retbleed_select_mitigation(void);
40 static void __init spectre_v2_user_select_mitigation(void);
41 static void __init ssb_select_mitigation(void);
42 static void __init l1tf_select_mitigation(void);
43 static void __init mds_select_mitigation(void);
44 static void __init md_clear_update_mitigation(void);
45 static void __init md_clear_select_mitigation(void);
46 static void __init taa_select_mitigation(void);
47 static void __init mmio_select_mitigation(void);
48 static void __init srbds_select_mitigation(void);
49 static void __init l1d_flush_select_mitigation(void);
50 static void __init srso_select_mitigation(void);
51 static void __init gds_select_mitigation(void);
52 
53 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
54 u64 x86_spec_ctrl_base;
55 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
56 
57 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
58 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
59 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
60 
61 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
62 EXPORT_SYMBOL_GPL(x86_pred_cmd);
63 
64 static DEFINE_MUTEX(spec_ctrl_mutex);
65 
66 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
67 static void update_spec_ctrl(u64 val)
68 {
69 	this_cpu_write(x86_spec_ctrl_current, val);
70 	wrmsrl(MSR_IA32_SPEC_CTRL, val);
71 }
72 
73 /*
74  * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
75  * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
76  */
77 void update_spec_ctrl_cond(u64 val)
78 {
79 	if (this_cpu_read(x86_spec_ctrl_current) == val)
80 		return;
81 
82 	this_cpu_write(x86_spec_ctrl_current, val);
83 
84 	/*
85 	 * When KERNEL_IBRS this MSR is written on return-to-user, unless
86 	 * forced the update can be delayed until that time.
87 	 */
88 	if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
89 		wrmsrl(MSR_IA32_SPEC_CTRL, val);
90 }
91 
92 noinstr u64 spec_ctrl_current(void)
93 {
94 	return this_cpu_read(x86_spec_ctrl_current);
95 }
96 EXPORT_SYMBOL_GPL(spec_ctrl_current);
97 
98 /*
99  * AMD specific MSR info for Speculative Store Bypass control.
100  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
101  */
102 u64 __ro_after_init x86_amd_ls_cfg_base;
103 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
104 
105 /* Control conditional STIBP in switch_to() */
106 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
107 /* Control conditional IBPB in switch_mm() */
108 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
109 /* Control unconditional IBPB in switch_mm() */
110 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
111 
112 /* Control MDS CPU buffer clear before returning to user space */
113 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
114 EXPORT_SYMBOL_GPL(mds_user_clear);
115 /* Control MDS CPU buffer clear before idling (halt, mwait) */
116 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
117 EXPORT_SYMBOL_GPL(mds_idle_clear);
118 
119 /*
120  * Controls whether l1d flush based mitigations are enabled,
121  * based on hw features and admin setting via boot parameter
122  * defaults to false
123  */
124 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
125 
126 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
127 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
128 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
129 
130 void __init cpu_select_mitigations(void)
131 {
132 	/*
133 	 * Read the SPEC_CTRL MSR to account for reserved bits which may
134 	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
135 	 * init code as it is not enumerated and depends on the family.
136 	 */
137 	if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
138 		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
139 
140 		/*
141 		 * Previously running kernel (kexec), may have some controls
142 		 * turned ON. Clear them and let the mitigations setup below
143 		 * rediscover them based on configuration.
144 		 */
145 		x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
146 	}
147 
148 	/* Select the proper CPU mitigations before patching alternatives: */
149 	spectre_v1_select_mitigation();
150 	spectre_v2_select_mitigation();
151 	/*
152 	 * retbleed_select_mitigation() relies on the state set by
153 	 * spectre_v2_select_mitigation(); specifically it wants to know about
154 	 * spectre_v2=ibrs.
155 	 */
156 	retbleed_select_mitigation();
157 	/*
158 	 * spectre_v2_user_select_mitigation() relies on the state set by
159 	 * retbleed_select_mitigation(); specifically the STIBP selection is
160 	 * forced for UNRET or IBPB.
161 	 */
162 	spectre_v2_user_select_mitigation();
163 	ssb_select_mitigation();
164 	l1tf_select_mitigation();
165 	md_clear_select_mitigation();
166 	srbds_select_mitigation();
167 	l1d_flush_select_mitigation();
168 	srso_select_mitigation();
169 	gds_select_mitigation();
170 }
171 
172 /*
173  * NOTE: This function is *only* called for SVM, since Intel uses
174  * MSR_IA32_SPEC_CTRL for SSBD.
175  */
176 void
177 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
178 {
179 	u64 guestval, hostval;
180 	struct thread_info *ti = current_thread_info();
181 
182 	/*
183 	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
184 	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
185 	 */
186 	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
187 	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
188 		return;
189 
190 	/*
191 	 * If the host has SSBD mitigation enabled, force it in the host's
192 	 * virtual MSR value. If its not permanently enabled, evaluate
193 	 * current's TIF_SSBD thread flag.
194 	 */
195 	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
196 		hostval = SPEC_CTRL_SSBD;
197 	else
198 		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
199 
200 	/* Sanitize the guest value */
201 	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
202 
203 	if (hostval != guestval) {
204 		unsigned long tif;
205 
206 		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
207 				 ssbd_spec_ctrl_to_tif(hostval);
208 
209 		speculation_ctrl_update(tif);
210 	}
211 }
212 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
213 
214 static void x86_amd_ssb_disable(void)
215 {
216 	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
217 
218 	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
219 		wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
220 	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
221 		wrmsrl(MSR_AMD64_LS_CFG, msrval);
222 }
223 
224 #undef pr_fmt
225 #define pr_fmt(fmt)	"MDS: " fmt
226 
227 /* Default mitigation for MDS-affected CPUs */
228 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
229 static bool mds_nosmt __ro_after_init = false;
230 
231 static const char * const mds_strings[] = {
232 	[MDS_MITIGATION_OFF]	= "Vulnerable",
233 	[MDS_MITIGATION_FULL]	= "Mitigation: Clear CPU buffers",
234 	[MDS_MITIGATION_VMWERV]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
235 };
236 
237 static void __init mds_select_mitigation(void)
238 {
239 	if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
240 		mds_mitigation = MDS_MITIGATION_OFF;
241 		return;
242 	}
243 
244 	if (mds_mitigation == MDS_MITIGATION_FULL) {
245 		if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
246 			mds_mitigation = MDS_MITIGATION_VMWERV;
247 
248 		static_branch_enable(&mds_user_clear);
249 
250 		if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
251 		    (mds_nosmt || cpu_mitigations_auto_nosmt()))
252 			cpu_smt_disable(false);
253 	}
254 }
255 
256 static int __init mds_cmdline(char *str)
257 {
258 	if (!boot_cpu_has_bug(X86_BUG_MDS))
259 		return 0;
260 
261 	if (!str)
262 		return -EINVAL;
263 
264 	if (!strcmp(str, "off"))
265 		mds_mitigation = MDS_MITIGATION_OFF;
266 	else if (!strcmp(str, "full"))
267 		mds_mitigation = MDS_MITIGATION_FULL;
268 	else if (!strcmp(str, "full,nosmt")) {
269 		mds_mitigation = MDS_MITIGATION_FULL;
270 		mds_nosmt = true;
271 	}
272 
273 	return 0;
274 }
275 early_param("mds", mds_cmdline);
276 
277 #undef pr_fmt
278 #define pr_fmt(fmt)	"TAA: " fmt
279 
280 enum taa_mitigations {
281 	TAA_MITIGATION_OFF,
282 	TAA_MITIGATION_UCODE_NEEDED,
283 	TAA_MITIGATION_VERW,
284 	TAA_MITIGATION_TSX_DISABLED,
285 };
286 
287 /* Default mitigation for TAA-affected CPUs */
288 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
289 static bool taa_nosmt __ro_after_init;
290 
291 static const char * const taa_strings[] = {
292 	[TAA_MITIGATION_OFF]		= "Vulnerable",
293 	[TAA_MITIGATION_UCODE_NEEDED]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
294 	[TAA_MITIGATION_VERW]		= "Mitigation: Clear CPU buffers",
295 	[TAA_MITIGATION_TSX_DISABLED]	= "Mitigation: TSX disabled",
296 };
297 
298 static void __init taa_select_mitigation(void)
299 {
300 	u64 ia32_cap;
301 
302 	if (!boot_cpu_has_bug(X86_BUG_TAA)) {
303 		taa_mitigation = TAA_MITIGATION_OFF;
304 		return;
305 	}
306 
307 	/* TSX previously disabled by tsx=off */
308 	if (!boot_cpu_has(X86_FEATURE_RTM)) {
309 		taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
310 		return;
311 	}
312 
313 	if (cpu_mitigations_off()) {
314 		taa_mitigation = TAA_MITIGATION_OFF;
315 		return;
316 	}
317 
318 	/*
319 	 * TAA mitigation via VERW is turned off if both
320 	 * tsx_async_abort=off and mds=off are specified.
321 	 */
322 	if (taa_mitigation == TAA_MITIGATION_OFF &&
323 	    mds_mitigation == MDS_MITIGATION_OFF)
324 		return;
325 
326 	if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
327 		taa_mitigation = TAA_MITIGATION_VERW;
328 	else
329 		taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
330 
331 	/*
332 	 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
333 	 * A microcode update fixes this behavior to clear CPU buffers. It also
334 	 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
335 	 * ARCH_CAP_TSX_CTRL_MSR bit.
336 	 *
337 	 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
338 	 * update is required.
339 	 */
340 	ia32_cap = x86_read_arch_cap_msr();
341 	if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
342 	    !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
343 		taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
344 
345 	/*
346 	 * TSX is enabled, select alternate mitigation for TAA which is
347 	 * the same as MDS. Enable MDS static branch to clear CPU buffers.
348 	 *
349 	 * For guests that can't determine whether the correct microcode is
350 	 * present on host, enable the mitigation for UCODE_NEEDED as well.
351 	 */
352 	static_branch_enable(&mds_user_clear);
353 
354 	if (taa_nosmt || cpu_mitigations_auto_nosmt())
355 		cpu_smt_disable(false);
356 }
357 
358 static int __init tsx_async_abort_parse_cmdline(char *str)
359 {
360 	if (!boot_cpu_has_bug(X86_BUG_TAA))
361 		return 0;
362 
363 	if (!str)
364 		return -EINVAL;
365 
366 	if (!strcmp(str, "off")) {
367 		taa_mitigation = TAA_MITIGATION_OFF;
368 	} else if (!strcmp(str, "full")) {
369 		taa_mitigation = TAA_MITIGATION_VERW;
370 	} else if (!strcmp(str, "full,nosmt")) {
371 		taa_mitigation = TAA_MITIGATION_VERW;
372 		taa_nosmt = true;
373 	}
374 
375 	return 0;
376 }
377 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
378 
379 #undef pr_fmt
380 #define pr_fmt(fmt)	"MMIO Stale Data: " fmt
381 
382 enum mmio_mitigations {
383 	MMIO_MITIGATION_OFF,
384 	MMIO_MITIGATION_UCODE_NEEDED,
385 	MMIO_MITIGATION_VERW,
386 };
387 
388 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
389 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
390 static bool mmio_nosmt __ro_after_init = false;
391 
392 static const char * const mmio_strings[] = {
393 	[MMIO_MITIGATION_OFF]		= "Vulnerable",
394 	[MMIO_MITIGATION_UCODE_NEEDED]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
395 	[MMIO_MITIGATION_VERW]		= "Mitigation: Clear CPU buffers",
396 };
397 
398 static void __init mmio_select_mitigation(void)
399 {
400 	u64 ia32_cap;
401 
402 	if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
403 	     boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
404 	     cpu_mitigations_off()) {
405 		mmio_mitigation = MMIO_MITIGATION_OFF;
406 		return;
407 	}
408 
409 	if (mmio_mitigation == MMIO_MITIGATION_OFF)
410 		return;
411 
412 	ia32_cap = x86_read_arch_cap_msr();
413 
414 	/*
415 	 * Enable CPU buffer clear mitigation for host and VMM, if also affected
416 	 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
417 	 */
418 	if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
419 					      boot_cpu_has(X86_FEATURE_RTM)))
420 		static_branch_enable(&mds_user_clear);
421 	else
422 		static_branch_enable(&mmio_stale_data_clear);
423 
424 	/*
425 	 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
426 	 * be propagated to uncore buffers, clearing the Fill buffers on idle
427 	 * is required irrespective of SMT state.
428 	 */
429 	if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
430 		static_branch_enable(&mds_idle_clear);
431 
432 	/*
433 	 * Check if the system has the right microcode.
434 	 *
435 	 * CPU Fill buffer clear mitigation is enumerated by either an explicit
436 	 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
437 	 * affected systems.
438 	 */
439 	if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
440 	    (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
441 	     boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
442 	     !(ia32_cap & ARCH_CAP_MDS_NO)))
443 		mmio_mitigation = MMIO_MITIGATION_VERW;
444 	else
445 		mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
446 
447 	if (mmio_nosmt || cpu_mitigations_auto_nosmt())
448 		cpu_smt_disable(false);
449 }
450 
451 static int __init mmio_stale_data_parse_cmdline(char *str)
452 {
453 	if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
454 		return 0;
455 
456 	if (!str)
457 		return -EINVAL;
458 
459 	if (!strcmp(str, "off")) {
460 		mmio_mitigation = MMIO_MITIGATION_OFF;
461 	} else if (!strcmp(str, "full")) {
462 		mmio_mitigation = MMIO_MITIGATION_VERW;
463 	} else if (!strcmp(str, "full,nosmt")) {
464 		mmio_mitigation = MMIO_MITIGATION_VERW;
465 		mmio_nosmt = true;
466 	}
467 
468 	return 0;
469 }
470 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
471 
472 #undef pr_fmt
473 #define pr_fmt(fmt)     "" fmt
474 
475 static void __init md_clear_update_mitigation(void)
476 {
477 	if (cpu_mitigations_off())
478 		return;
479 
480 	if (!static_key_enabled(&mds_user_clear))
481 		goto out;
482 
483 	/*
484 	 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
485 	 * mitigation, if necessary.
486 	 */
487 	if (mds_mitigation == MDS_MITIGATION_OFF &&
488 	    boot_cpu_has_bug(X86_BUG_MDS)) {
489 		mds_mitigation = MDS_MITIGATION_FULL;
490 		mds_select_mitigation();
491 	}
492 	if (taa_mitigation == TAA_MITIGATION_OFF &&
493 	    boot_cpu_has_bug(X86_BUG_TAA)) {
494 		taa_mitigation = TAA_MITIGATION_VERW;
495 		taa_select_mitigation();
496 	}
497 	if (mmio_mitigation == MMIO_MITIGATION_OFF &&
498 	    boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
499 		mmio_mitigation = MMIO_MITIGATION_VERW;
500 		mmio_select_mitigation();
501 	}
502 out:
503 	if (boot_cpu_has_bug(X86_BUG_MDS))
504 		pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
505 	if (boot_cpu_has_bug(X86_BUG_TAA))
506 		pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
507 	if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
508 		pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
509 	else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
510 		pr_info("MMIO Stale Data: Unknown: No mitigations\n");
511 }
512 
513 static void __init md_clear_select_mitigation(void)
514 {
515 	mds_select_mitigation();
516 	taa_select_mitigation();
517 	mmio_select_mitigation();
518 
519 	/*
520 	 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
521 	 * and print their mitigation after MDS, TAA and MMIO Stale Data
522 	 * mitigation selection is done.
523 	 */
524 	md_clear_update_mitigation();
525 }
526 
527 #undef pr_fmt
528 #define pr_fmt(fmt)	"SRBDS: " fmt
529 
530 enum srbds_mitigations {
531 	SRBDS_MITIGATION_OFF,
532 	SRBDS_MITIGATION_UCODE_NEEDED,
533 	SRBDS_MITIGATION_FULL,
534 	SRBDS_MITIGATION_TSX_OFF,
535 	SRBDS_MITIGATION_HYPERVISOR,
536 };
537 
538 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
539 
540 static const char * const srbds_strings[] = {
541 	[SRBDS_MITIGATION_OFF]		= "Vulnerable",
542 	[SRBDS_MITIGATION_UCODE_NEEDED]	= "Vulnerable: No microcode",
543 	[SRBDS_MITIGATION_FULL]		= "Mitigation: Microcode",
544 	[SRBDS_MITIGATION_TSX_OFF]	= "Mitigation: TSX disabled",
545 	[SRBDS_MITIGATION_HYPERVISOR]	= "Unknown: Dependent on hypervisor status",
546 };
547 
548 static bool srbds_off;
549 
550 void update_srbds_msr(void)
551 {
552 	u64 mcu_ctrl;
553 
554 	if (!boot_cpu_has_bug(X86_BUG_SRBDS))
555 		return;
556 
557 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
558 		return;
559 
560 	if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
561 		return;
562 
563 	/*
564 	 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
565 	 * being disabled and it hasn't received the SRBDS MSR microcode.
566 	 */
567 	if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
568 		return;
569 
570 	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
571 
572 	switch (srbds_mitigation) {
573 	case SRBDS_MITIGATION_OFF:
574 	case SRBDS_MITIGATION_TSX_OFF:
575 		mcu_ctrl |= RNGDS_MITG_DIS;
576 		break;
577 	case SRBDS_MITIGATION_FULL:
578 		mcu_ctrl &= ~RNGDS_MITG_DIS;
579 		break;
580 	default:
581 		break;
582 	}
583 
584 	wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
585 }
586 
587 static void __init srbds_select_mitigation(void)
588 {
589 	u64 ia32_cap;
590 
591 	if (!boot_cpu_has_bug(X86_BUG_SRBDS))
592 		return;
593 
594 	/*
595 	 * Check to see if this is one of the MDS_NO systems supporting TSX that
596 	 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
597 	 * by Processor MMIO Stale Data vulnerability.
598 	 */
599 	ia32_cap = x86_read_arch_cap_msr();
600 	if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
601 	    !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
602 		srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
603 	else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
604 		srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
605 	else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
606 		srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
607 	else if (cpu_mitigations_off() || srbds_off)
608 		srbds_mitigation = SRBDS_MITIGATION_OFF;
609 
610 	update_srbds_msr();
611 	pr_info("%s\n", srbds_strings[srbds_mitigation]);
612 }
613 
614 static int __init srbds_parse_cmdline(char *str)
615 {
616 	if (!str)
617 		return -EINVAL;
618 
619 	if (!boot_cpu_has_bug(X86_BUG_SRBDS))
620 		return 0;
621 
622 	srbds_off = !strcmp(str, "off");
623 	return 0;
624 }
625 early_param("srbds", srbds_parse_cmdline);
626 
627 #undef pr_fmt
628 #define pr_fmt(fmt)     "L1D Flush : " fmt
629 
630 enum l1d_flush_mitigations {
631 	L1D_FLUSH_OFF = 0,
632 	L1D_FLUSH_ON,
633 };
634 
635 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
636 
637 static void __init l1d_flush_select_mitigation(void)
638 {
639 	if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
640 		return;
641 
642 	static_branch_enable(&switch_mm_cond_l1d_flush);
643 	pr_info("Conditional flush on switch_mm() enabled\n");
644 }
645 
646 static int __init l1d_flush_parse_cmdline(char *str)
647 {
648 	if (!strcmp(str, "on"))
649 		l1d_flush_mitigation = L1D_FLUSH_ON;
650 
651 	return 0;
652 }
653 early_param("l1d_flush", l1d_flush_parse_cmdline);
654 
655 #undef pr_fmt
656 #define pr_fmt(fmt)	"GDS: " fmt
657 
658 enum gds_mitigations {
659 	GDS_MITIGATION_OFF,
660 	GDS_MITIGATION_UCODE_NEEDED,
661 	GDS_MITIGATION_FORCE,
662 	GDS_MITIGATION_FULL,
663 	GDS_MITIGATION_FULL_LOCKED,
664 	GDS_MITIGATION_HYPERVISOR,
665 };
666 
667 #if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION)
668 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
669 #else
670 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
671 #endif
672 
673 static const char * const gds_strings[] = {
674 	[GDS_MITIGATION_OFF]		= "Vulnerable",
675 	[GDS_MITIGATION_UCODE_NEEDED]	= "Vulnerable: No microcode",
676 	[GDS_MITIGATION_FORCE]		= "Mitigation: AVX disabled, no microcode",
677 	[GDS_MITIGATION_FULL]		= "Mitigation: Microcode",
678 	[GDS_MITIGATION_FULL_LOCKED]	= "Mitigation: Microcode (locked)",
679 	[GDS_MITIGATION_HYPERVISOR]	= "Unknown: Dependent on hypervisor status",
680 };
681 
682 bool gds_ucode_mitigated(void)
683 {
684 	return (gds_mitigation == GDS_MITIGATION_FULL ||
685 		gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
686 }
687 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
688 
689 void update_gds_msr(void)
690 {
691 	u64 mcu_ctrl_after;
692 	u64 mcu_ctrl;
693 
694 	switch (gds_mitigation) {
695 	case GDS_MITIGATION_OFF:
696 		rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
697 		mcu_ctrl |= GDS_MITG_DIS;
698 		break;
699 	case GDS_MITIGATION_FULL_LOCKED:
700 		/*
701 		 * The LOCKED state comes from the boot CPU. APs might not have
702 		 * the same state. Make sure the mitigation is enabled on all
703 		 * CPUs.
704 		 */
705 	case GDS_MITIGATION_FULL:
706 		rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
707 		mcu_ctrl &= ~GDS_MITG_DIS;
708 		break;
709 	case GDS_MITIGATION_FORCE:
710 	case GDS_MITIGATION_UCODE_NEEDED:
711 	case GDS_MITIGATION_HYPERVISOR:
712 		return;
713 	};
714 
715 	wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
716 
717 	/*
718 	 * Check to make sure that the WRMSR value was not ignored. Writes to
719 	 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
720 	 * processor was not.
721 	 */
722 	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
723 	WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
724 }
725 
726 static void __init gds_select_mitigation(void)
727 {
728 	u64 mcu_ctrl;
729 
730 	if (!boot_cpu_has_bug(X86_BUG_GDS))
731 		return;
732 
733 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
734 		gds_mitigation = GDS_MITIGATION_HYPERVISOR;
735 		goto out;
736 	}
737 
738 	if (cpu_mitigations_off())
739 		gds_mitigation = GDS_MITIGATION_OFF;
740 	/* Will verify below that mitigation _can_ be disabled */
741 
742 	/* No microcode */
743 	if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
744 		if (gds_mitigation == GDS_MITIGATION_FORCE) {
745 			/*
746 			 * This only needs to be done on the boot CPU so do it
747 			 * here rather than in update_gds_msr()
748 			 */
749 			setup_clear_cpu_cap(X86_FEATURE_AVX);
750 			pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
751 		} else {
752 			gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
753 		}
754 		goto out;
755 	}
756 
757 	/* Microcode has mitigation, use it */
758 	if (gds_mitigation == GDS_MITIGATION_FORCE)
759 		gds_mitigation = GDS_MITIGATION_FULL;
760 
761 	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
762 	if (mcu_ctrl & GDS_MITG_LOCKED) {
763 		if (gds_mitigation == GDS_MITIGATION_OFF)
764 			pr_warn("Mitigation locked. Disable failed.\n");
765 
766 		/*
767 		 * The mitigation is selected from the boot CPU. All other CPUs
768 		 * _should_ have the same state. If the boot CPU isn't locked
769 		 * but others are then update_gds_msr() will WARN() of the state
770 		 * mismatch. If the boot CPU is locked update_gds_msr() will
771 		 * ensure the other CPUs have the mitigation enabled.
772 		 */
773 		gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
774 	}
775 
776 	update_gds_msr();
777 out:
778 	pr_info("%s\n", gds_strings[gds_mitigation]);
779 }
780 
781 static int __init gds_parse_cmdline(char *str)
782 {
783 	if (!str)
784 		return -EINVAL;
785 
786 	if (!boot_cpu_has_bug(X86_BUG_GDS))
787 		return 0;
788 
789 	if (!strcmp(str, "off"))
790 		gds_mitigation = GDS_MITIGATION_OFF;
791 	else if (!strcmp(str, "force"))
792 		gds_mitigation = GDS_MITIGATION_FORCE;
793 
794 	return 0;
795 }
796 early_param("gather_data_sampling", gds_parse_cmdline);
797 
798 #undef pr_fmt
799 #define pr_fmt(fmt)     "Spectre V1 : " fmt
800 
801 enum spectre_v1_mitigation {
802 	SPECTRE_V1_MITIGATION_NONE,
803 	SPECTRE_V1_MITIGATION_AUTO,
804 };
805 
806 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
807 	SPECTRE_V1_MITIGATION_AUTO;
808 
809 static const char * const spectre_v1_strings[] = {
810 	[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
811 	[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
812 };
813 
814 /*
815  * Does SMAP provide full mitigation against speculative kernel access to
816  * userspace?
817  */
818 static bool smap_works_speculatively(void)
819 {
820 	if (!boot_cpu_has(X86_FEATURE_SMAP))
821 		return false;
822 
823 	/*
824 	 * On CPUs which are vulnerable to Meltdown, SMAP does not
825 	 * prevent speculative access to user data in the L1 cache.
826 	 * Consider SMAP to be non-functional as a mitigation on these
827 	 * CPUs.
828 	 */
829 	if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
830 		return false;
831 
832 	return true;
833 }
834 
835 static void __init spectre_v1_select_mitigation(void)
836 {
837 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
838 		spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
839 		return;
840 	}
841 
842 	if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
843 		/*
844 		 * With Spectre v1, a user can speculatively control either
845 		 * path of a conditional swapgs with a user-controlled GS
846 		 * value.  The mitigation is to add lfences to both code paths.
847 		 *
848 		 * If FSGSBASE is enabled, the user can put a kernel address in
849 		 * GS, in which case SMAP provides no protection.
850 		 *
851 		 * If FSGSBASE is disabled, the user can only put a user space
852 		 * address in GS.  That makes an attack harder, but still
853 		 * possible if there's no SMAP protection.
854 		 */
855 		if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
856 		    !smap_works_speculatively()) {
857 			/*
858 			 * Mitigation can be provided from SWAPGS itself or
859 			 * PTI as the CR3 write in the Meltdown mitigation
860 			 * is serializing.
861 			 *
862 			 * If neither is there, mitigate with an LFENCE to
863 			 * stop speculation through swapgs.
864 			 */
865 			if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
866 			    !boot_cpu_has(X86_FEATURE_PTI))
867 				setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
868 
869 			/*
870 			 * Enable lfences in the kernel entry (non-swapgs)
871 			 * paths, to prevent user entry from speculatively
872 			 * skipping swapgs.
873 			 */
874 			setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
875 		}
876 	}
877 
878 	pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
879 }
880 
881 static int __init nospectre_v1_cmdline(char *str)
882 {
883 	spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
884 	return 0;
885 }
886 early_param("nospectre_v1", nospectre_v1_cmdline);
887 
888 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
889 
890 #undef pr_fmt
891 #define pr_fmt(fmt)     "RETBleed: " fmt
892 
893 enum retbleed_mitigation {
894 	RETBLEED_MITIGATION_NONE,
895 	RETBLEED_MITIGATION_UNRET,
896 	RETBLEED_MITIGATION_IBPB,
897 	RETBLEED_MITIGATION_IBRS,
898 	RETBLEED_MITIGATION_EIBRS,
899 	RETBLEED_MITIGATION_STUFF,
900 };
901 
902 enum retbleed_mitigation_cmd {
903 	RETBLEED_CMD_OFF,
904 	RETBLEED_CMD_AUTO,
905 	RETBLEED_CMD_UNRET,
906 	RETBLEED_CMD_IBPB,
907 	RETBLEED_CMD_STUFF,
908 };
909 
910 static const char * const retbleed_strings[] = {
911 	[RETBLEED_MITIGATION_NONE]	= "Vulnerable",
912 	[RETBLEED_MITIGATION_UNRET]	= "Mitigation: untrained return thunk",
913 	[RETBLEED_MITIGATION_IBPB]	= "Mitigation: IBPB",
914 	[RETBLEED_MITIGATION_IBRS]	= "Mitigation: IBRS",
915 	[RETBLEED_MITIGATION_EIBRS]	= "Mitigation: Enhanced IBRS",
916 	[RETBLEED_MITIGATION_STUFF]	= "Mitigation: Stuffing",
917 };
918 
919 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
920 	RETBLEED_MITIGATION_NONE;
921 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
922 	RETBLEED_CMD_AUTO;
923 
924 static int __ro_after_init retbleed_nosmt = false;
925 
926 static int __init retbleed_parse_cmdline(char *str)
927 {
928 	if (!str)
929 		return -EINVAL;
930 
931 	while (str) {
932 		char *next = strchr(str, ',');
933 		if (next) {
934 			*next = 0;
935 			next++;
936 		}
937 
938 		if (!strcmp(str, "off")) {
939 			retbleed_cmd = RETBLEED_CMD_OFF;
940 		} else if (!strcmp(str, "auto")) {
941 			retbleed_cmd = RETBLEED_CMD_AUTO;
942 		} else if (!strcmp(str, "unret")) {
943 			retbleed_cmd = RETBLEED_CMD_UNRET;
944 		} else if (!strcmp(str, "ibpb")) {
945 			retbleed_cmd = RETBLEED_CMD_IBPB;
946 		} else if (!strcmp(str, "stuff")) {
947 			retbleed_cmd = RETBLEED_CMD_STUFF;
948 		} else if (!strcmp(str, "nosmt")) {
949 			retbleed_nosmt = true;
950 		} else if (!strcmp(str, "force")) {
951 			setup_force_cpu_bug(X86_BUG_RETBLEED);
952 		} else {
953 			pr_err("Ignoring unknown retbleed option (%s).", str);
954 		}
955 
956 		str = next;
957 	}
958 
959 	return 0;
960 }
961 early_param("retbleed", retbleed_parse_cmdline);
962 
963 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
964 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
965 
966 static void __init retbleed_select_mitigation(void)
967 {
968 	bool mitigate_smt = false;
969 
970 	if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
971 		return;
972 
973 	switch (retbleed_cmd) {
974 	case RETBLEED_CMD_OFF:
975 		return;
976 
977 	case RETBLEED_CMD_UNRET:
978 		if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
979 			retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
980 		} else {
981 			pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
982 			goto do_cmd_auto;
983 		}
984 		break;
985 
986 	case RETBLEED_CMD_IBPB:
987 		if (!boot_cpu_has(X86_FEATURE_IBPB)) {
988 			pr_err("WARNING: CPU does not support IBPB.\n");
989 			goto do_cmd_auto;
990 		} else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
991 			retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
992 		} else {
993 			pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
994 			goto do_cmd_auto;
995 		}
996 		break;
997 
998 	case RETBLEED_CMD_STUFF:
999 		if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING) &&
1000 		    spectre_v2_enabled == SPECTRE_V2_RETPOLINE) {
1001 			retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1002 
1003 		} else {
1004 			if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING))
1005 				pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
1006 			else
1007 				pr_err("WARNING: kernel not compiled with CALL_DEPTH_TRACKING.\n");
1008 
1009 			goto do_cmd_auto;
1010 		}
1011 		break;
1012 
1013 do_cmd_auto:
1014 	case RETBLEED_CMD_AUTO:
1015 	default:
1016 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1017 		    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1018 			if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
1019 				retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1020 			else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
1021 				retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1022 		}
1023 
1024 		/*
1025 		 * The Intel mitigation (IBRS or eIBRS) was already selected in
1026 		 * spectre_v2_select_mitigation().  'retbleed_mitigation' will
1027 		 * be set accordingly below.
1028 		 */
1029 
1030 		break;
1031 	}
1032 
1033 	switch (retbleed_mitigation) {
1034 	case RETBLEED_MITIGATION_UNRET:
1035 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1036 		setup_force_cpu_cap(X86_FEATURE_UNRET);
1037 
1038 		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1039 		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1040 			pr_err(RETBLEED_UNTRAIN_MSG);
1041 
1042 		mitigate_smt = true;
1043 		break;
1044 
1045 	case RETBLEED_MITIGATION_IBPB:
1046 		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1047 		mitigate_smt = true;
1048 		break;
1049 
1050 	case RETBLEED_MITIGATION_STUFF:
1051 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1052 		setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1053 		x86_set_skl_return_thunk();
1054 		break;
1055 
1056 	default:
1057 		break;
1058 	}
1059 
1060 	if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1061 	    (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
1062 		cpu_smt_disable(false);
1063 
1064 	/*
1065 	 * Let IBRS trump all on Intel without affecting the effects of the
1066 	 * retbleed= cmdline option except for call depth based stuffing
1067 	 */
1068 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1069 		switch (spectre_v2_enabled) {
1070 		case SPECTRE_V2_IBRS:
1071 			retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1072 			break;
1073 		case SPECTRE_V2_EIBRS:
1074 		case SPECTRE_V2_EIBRS_RETPOLINE:
1075 		case SPECTRE_V2_EIBRS_LFENCE:
1076 			retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1077 			break;
1078 		default:
1079 			if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1080 				pr_err(RETBLEED_INTEL_MSG);
1081 		}
1082 	}
1083 
1084 	pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1085 }
1086 
1087 #undef pr_fmt
1088 #define pr_fmt(fmt)     "Spectre V2 : " fmt
1089 
1090 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1091 	SPECTRE_V2_USER_NONE;
1092 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1093 	SPECTRE_V2_USER_NONE;
1094 
1095 #ifdef CONFIG_RETPOLINE
1096 static bool spectre_v2_bad_module;
1097 
1098 bool retpoline_module_ok(bool has_retpoline)
1099 {
1100 	if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1101 		return true;
1102 
1103 	pr_err("System may be vulnerable to spectre v2\n");
1104 	spectre_v2_bad_module = true;
1105 	return false;
1106 }
1107 
1108 static inline const char *spectre_v2_module_string(void)
1109 {
1110 	return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1111 }
1112 #else
1113 static inline const char *spectre_v2_module_string(void) { return ""; }
1114 #endif
1115 
1116 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1117 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1118 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1119 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1120 
1121 #ifdef CONFIG_BPF_SYSCALL
1122 void unpriv_ebpf_notify(int new_state)
1123 {
1124 	if (new_state)
1125 		return;
1126 
1127 	/* Unprivileged eBPF is enabled */
1128 
1129 	switch (spectre_v2_enabled) {
1130 	case SPECTRE_V2_EIBRS:
1131 		pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1132 		break;
1133 	case SPECTRE_V2_EIBRS_LFENCE:
1134 		if (sched_smt_active())
1135 			pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1136 		break;
1137 	default:
1138 		break;
1139 	}
1140 }
1141 #endif
1142 
1143 static inline bool match_option(const char *arg, int arglen, const char *opt)
1144 {
1145 	int len = strlen(opt);
1146 
1147 	return len == arglen && !strncmp(arg, opt, len);
1148 }
1149 
1150 /* The kernel command line selection for spectre v2 */
1151 enum spectre_v2_mitigation_cmd {
1152 	SPECTRE_V2_CMD_NONE,
1153 	SPECTRE_V2_CMD_AUTO,
1154 	SPECTRE_V2_CMD_FORCE,
1155 	SPECTRE_V2_CMD_RETPOLINE,
1156 	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1157 	SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1158 	SPECTRE_V2_CMD_EIBRS,
1159 	SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1160 	SPECTRE_V2_CMD_EIBRS_LFENCE,
1161 	SPECTRE_V2_CMD_IBRS,
1162 };
1163 
1164 enum spectre_v2_user_cmd {
1165 	SPECTRE_V2_USER_CMD_NONE,
1166 	SPECTRE_V2_USER_CMD_AUTO,
1167 	SPECTRE_V2_USER_CMD_FORCE,
1168 	SPECTRE_V2_USER_CMD_PRCTL,
1169 	SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1170 	SPECTRE_V2_USER_CMD_SECCOMP,
1171 	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1172 };
1173 
1174 static const char * const spectre_v2_user_strings[] = {
1175 	[SPECTRE_V2_USER_NONE]			= "User space: Vulnerable",
1176 	[SPECTRE_V2_USER_STRICT]		= "User space: Mitigation: STIBP protection",
1177 	[SPECTRE_V2_USER_STRICT_PREFERRED]	= "User space: Mitigation: STIBP always-on protection",
1178 	[SPECTRE_V2_USER_PRCTL]			= "User space: Mitigation: STIBP via prctl",
1179 	[SPECTRE_V2_USER_SECCOMP]		= "User space: Mitigation: STIBP via seccomp and prctl",
1180 };
1181 
1182 static const struct {
1183 	const char			*option;
1184 	enum spectre_v2_user_cmd	cmd;
1185 	bool				secure;
1186 } v2_user_options[] __initconst = {
1187 	{ "auto",		SPECTRE_V2_USER_CMD_AUTO,		false },
1188 	{ "off",		SPECTRE_V2_USER_CMD_NONE,		false },
1189 	{ "on",			SPECTRE_V2_USER_CMD_FORCE,		true  },
1190 	{ "prctl",		SPECTRE_V2_USER_CMD_PRCTL,		false },
1191 	{ "prctl,ibpb",		SPECTRE_V2_USER_CMD_PRCTL_IBPB,		false },
1192 	{ "seccomp",		SPECTRE_V2_USER_CMD_SECCOMP,		false },
1193 	{ "seccomp,ibpb",	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,	false },
1194 };
1195 
1196 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1197 {
1198 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1199 		pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1200 }
1201 
1202 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
1203 
1204 static enum spectre_v2_user_cmd __init
1205 spectre_v2_parse_user_cmdline(void)
1206 {
1207 	char arg[20];
1208 	int ret, i;
1209 
1210 	switch (spectre_v2_cmd) {
1211 	case SPECTRE_V2_CMD_NONE:
1212 		return SPECTRE_V2_USER_CMD_NONE;
1213 	case SPECTRE_V2_CMD_FORCE:
1214 		return SPECTRE_V2_USER_CMD_FORCE;
1215 	default:
1216 		break;
1217 	}
1218 
1219 	ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1220 				  arg, sizeof(arg));
1221 	if (ret < 0)
1222 		return SPECTRE_V2_USER_CMD_AUTO;
1223 
1224 	for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1225 		if (match_option(arg, ret, v2_user_options[i].option)) {
1226 			spec_v2_user_print_cond(v2_user_options[i].option,
1227 						v2_user_options[i].secure);
1228 			return v2_user_options[i].cmd;
1229 		}
1230 	}
1231 
1232 	pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
1233 	return SPECTRE_V2_USER_CMD_AUTO;
1234 }
1235 
1236 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1237 {
1238 	return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1239 }
1240 
1241 static void __init
1242 spectre_v2_user_select_mitigation(void)
1243 {
1244 	enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
1245 	bool smt_possible = IS_ENABLED(CONFIG_SMP);
1246 	enum spectre_v2_user_cmd cmd;
1247 
1248 	if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1249 		return;
1250 
1251 	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
1252 	    cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
1253 		smt_possible = false;
1254 
1255 	cmd = spectre_v2_parse_user_cmdline();
1256 	switch (cmd) {
1257 	case SPECTRE_V2_USER_CMD_NONE:
1258 		goto set_mode;
1259 	case SPECTRE_V2_USER_CMD_FORCE:
1260 		mode = SPECTRE_V2_USER_STRICT;
1261 		break;
1262 	case SPECTRE_V2_USER_CMD_AUTO:
1263 	case SPECTRE_V2_USER_CMD_PRCTL:
1264 	case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1265 		mode = SPECTRE_V2_USER_PRCTL;
1266 		break;
1267 	case SPECTRE_V2_USER_CMD_SECCOMP:
1268 	case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1269 		if (IS_ENABLED(CONFIG_SECCOMP))
1270 			mode = SPECTRE_V2_USER_SECCOMP;
1271 		else
1272 			mode = SPECTRE_V2_USER_PRCTL;
1273 		break;
1274 	}
1275 
1276 	/* Initialize Indirect Branch Prediction Barrier */
1277 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
1278 		setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1279 
1280 		spectre_v2_user_ibpb = mode;
1281 		switch (cmd) {
1282 		case SPECTRE_V2_USER_CMD_FORCE:
1283 		case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1284 		case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1285 			static_branch_enable(&switch_mm_always_ibpb);
1286 			spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1287 			break;
1288 		case SPECTRE_V2_USER_CMD_PRCTL:
1289 		case SPECTRE_V2_USER_CMD_AUTO:
1290 		case SPECTRE_V2_USER_CMD_SECCOMP:
1291 			static_branch_enable(&switch_mm_cond_ibpb);
1292 			break;
1293 		default:
1294 			break;
1295 		}
1296 
1297 		pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1298 			static_key_enabled(&switch_mm_always_ibpb) ?
1299 			"always-on" : "conditional");
1300 	}
1301 
1302 	/*
1303 	 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1304 	 * is not required.
1305 	 *
1306 	 * Intel's Enhanced IBRS also protects against cross-thread branch target
1307 	 * injection in user-mode as the IBRS bit remains always set which
1308 	 * implicitly enables cross-thread protections.  However, in legacy IBRS
1309 	 * mode, the IBRS bit is set only on kernel entry and cleared on return
1310 	 * to userspace.  AMD Automatic IBRS also does not protect userspace.
1311 	 * These modes therefore disable the implicit cross-thread protection,
1312 	 * so allow for STIBP to be selected in those cases.
1313 	 */
1314 	if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1315 	    !smt_possible ||
1316 	    (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1317 	     !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
1318 		return;
1319 
1320 	/*
1321 	 * At this point, an STIBP mode other than "off" has been set.
1322 	 * If STIBP support is not being forced, check if STIBP always-on
1323 	 * is preferred.
1324 	 */
1325 	if (mode != SPECTRE_V2_USER_STRICT &&
1326 	    boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1327 		mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1328 
1329 	if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1330 	    retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
1331 		if (mode != SPECTRE_V2_USER_STRICT &&
1332 		    mode != SPECTRE_V2_USER_STRICT_PREFERRED)
1333 			pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1334 		mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1335 	}
1336 
1337 	spectre_v2_user_stibp = mode;
1338 
1339 set_mode:
1340 	pr_info("%s\n", spectre_v2_user_strings[mode]);
1341 }
1342 
1343 static const char * const spectre_v2_strings[] = {
1344 	[SPECTRE_V2_NONE]			= "Vulnerable",
1345 	[SPECTRE_V2_RETPOLINE]			= "Mitigation: Retpolines",
1346 	[SPECTRE_V2_LFENCE]			= "Mitigation: LFENCE",
1347 	[SPECTRE_V2_EIBRS]			= "Mitigation: Enhanced / Automatic IBRS",
1348 	[SPECTRE_V2_EIBRS_LFENCE]		= "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1349 	[SPECTRE_V2_EIBRS_RETPOLINE]		= "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1350 	[SPECTRE_V2_IBRS]			= "Mitigation: IBRS",
1351 };
1352 
1353 static const struct {
1354 	const char *option;
1355 	enum spectre_v2_mitigation_cmd cmd;
1356 	bool secure;
1357 } mitigation_options[] __initconst = {
1358 	{ "off",		SPECTRE_V2_CMD_NONE,		  false },
1359 	{ "on",			SPECTRE_V2_CMD_FORCE,		  true  },
1360 	{ "retpoline",		SPECTRE_V2_CMD_RETPOLINE,	  false },
1361 	{ "retpoline,amd",	SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1362 	{ "retpoline,lfence",	SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1363 	{ "retpoline,generic",	SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1364 	{ "eibrs",		SPECTRE_V2_CMD_EIBRS,		  false },
1365 	{ "eibrs,lfence",	SPECTRE_V2_CMD_EIBRS_LFENCE,	  false },
1366 	{ "eibrs,retpoline",	SPECTRE_V2_CMD_EIBRS_RETPOLINE,	  false },
1367 	{ "auto",		SPECTRE_V2_CMD_AUTO,		  false },
1368 	{ "ibrs",		SPECTRE_V2_CMD_IBRS,              false },
1369 };
1370 
1371 static void __init spec_v2_print_cond(const char *reason, bool secure)
1372 {
1373 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1374 		pr_info("%s selected on command line.\n", reason);
1375 }
1376 
1377 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1378 {
1379 	enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
1380 	char arg[20];
1381 	int ret, i;
1382 
1383 	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1384 	    cpu_mitigations_off())
1385 		return SPECTRE_V2_CMD_NONE;
1386 
1387 	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1388 	if (ret < 0)
1389 		return SPECTRE_V2_CMD_AUTO;
1390 
1391 	for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1392 		if (!match_option(arg, ret, mitigation_options[i].option))
1393 			continue;
1394 		cmd = mitigation_options[i].cmd;
1395 		break;
1396 	}
1397 
1398 	if (i >= ARRAY_SIZE(mitigation_options)) {
1399 		pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1400 		return SPECTRE_V2_CMD_AUTO;
1401 	}
1402 
1403 	if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1404 	     cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1405 	     cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1406 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1407 	     cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1408 	    !IS_ENABLED(CONFIG_RETPOLINE)) {
1409 		pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1410 		       mitigation_options[i].option);
1411 		return SPECTRE_V2_CMD_AUTO;
1412 	}
1413 
1414 	if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1415 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1416 	     cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1417 	    !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1418 		pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
1419 		       mitigation_options[i].option);
1420 		return SPECTRE_V2_CMD_AUTO;
1421 	}
1422 
1423 	if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1424 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1425 	    !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1426 		pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1427 		       mitigation_options[i].option);
1428 		return SPECTRE_V2_CMD_AUTO;
1429 	}
1430 
1431 	if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY)) {
1432 		pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1433 		       mitigation_options[i].option);
1434 		return SPECTRE_V2_CMD_AUTO;
1435 	}
1436 
1437 	if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1438 		pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1439 		       mitigation_options[i].option);
1440 		return SPECTRE_V2_CMD_AUTO;
1441 	}
1442 
1443 	if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
1444 		pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1445 		       mitigation_options[i].option);
1446 		return SPECTRE_V2_CMD_AUTO;
1447 	}
1448 
1449 	if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
1450 		pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1451 		       mitigation_options[i].option);
1452 		return SPECTRE_V2_CMD_AUTO;
1453 	}
1454 
1455 	spec_v2_print_cond(mitigation_options[i].option,
1456 			   mitigation_options[i].secure);
1457 	return cmd;
1458 }
1459 
1460 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1461 {
1462 	if (!IS_ENABLED(CONFIG_RETPOLINE)) {
1463 		pr_err("Kernel not compiled with retpoline; no mitigation available!");
1464 		return SPECTRE_V2_NONE;
1465 	}
1466 
1467 	return SPECTRE_V2_RETPOLINE;
1468 }
1469 
1470 /* Disable in-kernel use of non-RSB RET predictors */
1471 static void __init spec_ctrl_disable_kernel_rrsba(void)
1472 {
1473 	u64 ia32_cap;
1474 
1475 	if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1476 		return;
1477 
1478 	ia32_cap = x86_read_arch_cap_msr();
1479 
1480 	if (ia32_cap & ARCH_CAP_RRSBA) {
1481 		x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1482 		update_spec_ctrl(x86_spec_ctrl_base);
1483 	}
1484 }
1485 
1486 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
1487 {
1488 	/*
1489 	 * Similar to context switches, there are two types of RSB attacks
1490 	 * after VM exit:
1491 	 *
1492 	 * 1) RSB underflow
1493 	 *
1494 	 * 2) Poisoned RSB entry
1495 	 *
1496 	 * When retpoline is enabled, both are mitigated by filling/clearing
1497 	 * the RSB.
1498 	 *
1499 	 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1500 	 * prediction isolation protections, RSB still needs to be cleared
1501 	 * because of #2.  Note that SMEP provides no protection here, unlike
1502 	 * user-space-poisoned RSB entries.
1503 	 *
1504 	 * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1505 	 * bug is present then a LITE version of RSB protection is required,
1506 	 * just a single call needs to retire before a RET is executed.
1507 	 */
1508 	switch (mode) {
1509 	case SPECTRE_V2_NONE:
1510 		return;
1511 
1512 	case SPECTRE_V2_EIBRS_LFENCE:
1513 	case SPECTRE_V2_EIBRS:
1514 		if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
1515 			setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
1516 			pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1517 		}
1518 		return;
1519 
1520 	case SPECTRE_V2_EIBRS_RETPOLINE:
1521 	case SPECTRE_V2_RETPOLINE:
1522 	case SPECTRE_V2_LFENCE:
1523 	case SPECTRE_V2_IBRS:
1524 		setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1525 		pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1526 		return;
1527 	}
1528 
1529 	pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1530 	dump_stack();
1531 }
1532 
1533 static void __init spectre_v2_select_mitigation(void)
1534 {
1535 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
1536 	enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
1537 
1538 	/*
1539 	 * If the CPU is not affected and the command line mode is NONE or AUTO
1540 	 * then nothing to do.
1541 	 */
1542 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
1543 	    (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
1544 		return;
1545 
1546 	switch (cmd) {
1547 	case SPECTRE_V2_CMD_NONE:
1548 		return;
1549 
1550 	case SPECTRE_V2_CMD_FORCE:
1551 	case SPECTRE_V2_CMD_AUTO:
1552 		if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1553 			mode = SPECTRE_V2_EIBRS;
1554 			break;
1555 		}
1556 
1557 		if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY) &&
1558 		    boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1559 		    retbleed_cmd != RETBLEED_CMD_OFF &&
1560 		    retbleed_cmd != RETBLEED_CMD_STUFF &&
1561 		    boot_cpu_has(X86_FEATURE_IBRS) &&
1562 		    boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1563 			mode = SPECTRE_V2_IBRS;
1564 			break;
1565 		}
1566 
1567 		mode = spectre_v2_select_retpoline();
1568 		break;
1569 
1570 	case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1571 		pr_err(SPECTRE_V2_LFENCE_MSG);
1572 		mode = SPECTRE_V2_LFENCE;
1573 		break;
1574 
1575 	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
1576 		mode = SPECTRE_V2_RETPOLINE;
1577 		break;
1578 
1579 	case SPECTRE_V2_CMD_RETPOLINE:
1580 		mode = spectre_v2_select_retpoline();
1581 		break;
1582 
1583 	case SPECTRE_V2_CMD_IBRS:
1584 		mode = SPECTRE_V2_IBRS;
1585 		break;
1586 
1587 	case SPECTRE_V2_CMD_EIBRS:
1588 		mode = SPECTRE_V2_EIBRS;
1589 		break;
1590 
1591 	case SPECTRE_V2_CMD_EIBRS_LFENCE:
1592 		mode = SPECTRE_V2_EIBRS_LFENCE;
1593 		break;
1594 
1595 	case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1596 		mode = SPECTRE_V2_EIBRS_RETPOLINE;
1597 		break;
1598 	}
1599 
1600 	if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1601 		pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1602 
1603 	if (spectre_v2_in_ibrs_mode(mode)) {
1604 		if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
1605 			msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
1606 		} else {
1607 			x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1608 			update_spec_ctrl(x86_spec_ctrl_base);
1609 		}
1610 	}
1611 
1612 	switch (mode) {
1613 	case SPECTRE_V2_NONE:
1614 	case SPECTRE_V2_EIBRS:
1615 		break;
1616 
1617 	case SPECTRE_V2_IBRS:
1618 		setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
1619 		if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1620 			pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
1621 		break;
1622 
1623 	case SPECTRE_V2_LFENCE:
1624 	case SPECTRE_V2_EIBRS_LFENCE:
1625 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1626 		fallthrough;
1627 
1628 	case SPECTRE_V2_RETPOLINE:
1629 	case SPECTRE_V2_EIBRS_RETPOLINE:
1630 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1631 		break;
1632 	}
1633 
1634 	/*
1635 	 * Disable alternate RSB predictions in kernel when indirect CALLs and
1636 	 * JMPs gets protection against BHI and Intramode-BTI, but RET
1637 	 * prediction from a non-RSB predictor is still a risk.
1638 	 */
1639 	if (mode == SPECTRE_V2_EIBRS_LFENCE ||
1640 	    mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1641 	    mode == SPECTRE_V2_RETPOLINE)
1642 		spec_ctrl_disable_kernel_rrsba();
1643 
1644 	spectre_v2_enabled = mode;
1645 	pr_info("%s\n", spectre_v2_strings[mode]);
1646 
1647 	/*
1648 	 * If Spectre v2 protection has been enabled, fill the RSB during a
1649 	 * context switch.  In general there are two types of RSB attacks
1650 	 * across context switches, for which the CALLs/RETs may be unbalanced.
1651 	 *
1652 	 * 1) RSB underflow
1653 	 *
1654 	 *    Some Intel parts have "bottomless RSB".  When the RSB is empty,
1655 	 *    speculated return targets may come from the branch predictor,
1656 	 *    which could have a user-poisoned BTB or BHB entry.
1657 	 *
1658 	 *    AMD has it even worse: *all* returns are speculated from the BTB,
1659 	 *    regardless of the state of the RSB.
1660 	 *
1661 	 *    When IBRS or eIBRS is enabled, the "user -> kernel" attack
1662 	 *    scenario is mitigated by the IBRS branch prediction isolation
1663 	 *    properties, so the RSB buffer filling wouldn't be necessary to
1664 	 *    protect against this type of attack.
1665 	 *
1666 	 *    The "user -> user" attack scenario is mitigated by RSB filling.
1667 	 *
1668 	 * 2) Poisoned RSB entry
1669 	 *
1670 	 *    If the 'next' in-kernel return stack is shorter than 'prev',
1671 	 *    'next' could be tricked into speculating with a user-poisoned RSB
1672 	 *    entry.
1673 	 *
1674 	 *    The "user -> kernel" attack scenario is mitigated by SMEP and
1675 	 *    eIBRS.
1676 	 *
1677 	 *    The "user -> user" scenario, also known as SpectreBHB, requires
1678 	 *    RSB clearing.
1679 	 *
1680 	 * So to mitigate all cases, unconditionally fill RSB on context
1681 	 * switches.
1682 	 *
1683 	 * FIXME: Is this pointless for retbleed-affected AMD?
1684 	 */
1685 	setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1686 	pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1687 
1688 	spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
1689 
1690 	/*
1691 	 * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
1692 	 * and Enhanced IBRS protect firmware too, so enable IBRS around
1693 	 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
1694 	 * otherwise enabled.
1695 	 *
1696 	 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1697 	 * the user might select retpoline on the kernel command line and if
1698 	 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1699 	 * enable IBRS around firmware calls.
1700 	 */
1701 	if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1702 	    boot_cpu_has(X86_FEATURE_IBPB) &&
1703 	    (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1704 	     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
1705 
1706 		if (retbleed_cmd != RETBLEED_CMD_IBPB) {
1707 			setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
1708 			pr_info("Enabling Speculation Barrier for firmware calls\n");
1709 		}
1710 
1711 	} else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
1712 		setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1713 		pr_info("Enabling Restricted Speculation for firmware calls\n");
1714 	}
1715 
1716 	/* Set up IBPB and STIBP depending on the general spectre V2 command */
1717 	spectre_v2_cmd = cmd;
1718 }
1719 
1720 static void update_stibp_msr(void * __unused)
1721 {
1722 	u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
1723 	update_spec_ctrl(val);
1724 }
1725 
1726 /* Update x86_spec_ctrl_base in case SMT state changed. */
1727 static void update_stibp_strict(void)
1728 {
1729 	u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1730 
1731 	if (sched_smt_active())
1732 		mask |= SPEC_CTRL_STIBP;
1733 
1734 	if (mask == x86_spec_ctrl_base)
1735 		return;
1736 
1737 	pr_info("Update user space SMT mitigation: STIBP %s\n",
1738 		mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1739 	x86_spec_ctrl_base = mask;
1740 	on_each_cpu(update_stibp_msr, NULL, 1);
1741 }
1742 
1743 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1744 static void update_indir_branch_cond(void)
1745 {
1746 	if (sched_smt_active())
1747 		static_branch_enable(&switch_to_cond_stibp);
1748 	else
1749 		static_branch_disable(&switch_to_cond_stibp);
1750 }
1751 
1752 #undef pr_fmt
1753 #define pr_fmt(fmt) fmt
1754 
1755 /* Update the static key controlling the MDS CPU buffer clear in idle */
1756 static void update_mds_branch_idle(void)
1757 {
1758 	u64 ia32_cap = x86_read_arch_cap_msr();
1759 
1760 	/*
1761 	 * Enable the idle clearing if SMT is active on CPUs which are
1762 	 * affected only by MSBDS and not any other MDS variant.
1763 	 *
1764 	 * The other variants cannot be mitigated when SMT is enabled, so
1765 	 * clearing the buffers on idle just to prevent the Store Buffer
1766 	 * repartitioning leak would be a window dressing exercise.
1767 	 */
1768 	if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1769 		return;
1770 
1771 	if (sched_smt_active()) {
1772 		static_branch_enable(&mds_idle_clear);
1773 	} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1774 		   (ia32_cap & ARCH_CAP_FBSDP_NO)) {
1775 		static_branch_disable(&mds_idle_clear);
1776 	}
1777 }
1778 
1779 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1780 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1781 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1782 
1783 void cpu_bugs_smt_update(void)
1784 {
1785 	mutex_lock(&spec_ctrl_mutex);
1786 
1787 	if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1788 	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1789 		pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1790 
1791 	switch (spectre_v2_user_stibp) {
1792 	case SPECTRE_V2_USER_NONE:
1793 		break;
1794 	case SPECTRE_V2_USER_STRICT:
1795 	case SPECTRE_V2_USER_STRICT_PREFERRED:
1796 		update_stibp_strict();
1797 		break;
1798 	case SPECTRE_V2_USER_PRCTL:
1799 	case SPECTRE_V2_USER_SECCOMP:
1800 		update_indir_branch_cond();
1801 		break;
1802 	}
1803 
1804 	switch (mds_mitigation) {
1805 	case MDS_MITIGATION_FULL:
1806 	case MDS_MITIGATION_VMWERV:
1807 		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1808 			pr_warn_once(MDS_MSG_SMT);
1809 		update_mds_branch_idle();
1810 		break;
1811 	case MDS_MITIGATION_OFF:
1812 		break;
1813 	}
1814 
1815 	switch (taa_mitigation) {
1816 	case TAA_MITIGATION_VERW:
1817 	case TAA_MITIGATION_UCODE_NEEDED:
1818 		if (sched_smt_active())
1819 			pr_warn_once(TAA_MSG_SMT);
1820 		break;
1821 	case TAA_MITIGATION_TSX_DISABLED:
1822 	case TAA_MITIGATION_OFF:
1823 		break;
1824 	}
1825 
1826 	switch (mmio_mitigation) {
1827 	case MMIO_MITIGATION_VERW:
1828 	case MMIO_MITIGATION_UCODE_NEEDED:
1829 		if (sched_smt_active())
1830 			pr_warn_once(MMIO_MSG_SMT);
1831 		break;
1832 	case MMIO_MITIGATION_OFF:
1833 		break;
1834 	}
1835 
1836 	mutex_unlock(&spec_ctrl_mutex);
1837 }
1838 
1839 #undef pr_fmt
1840 #define pr_fmt(fmt)	"Speculative Store Bypass: " fmt
1841 
1842 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1843 
1844 /* The kernel command line selection */
1845 enum ssb_mitigation_cmd {
1846 	SPEC_STORE_BYPASS_CMD_NONE,
1847 	SPEC_STORE_BYPASS_CMD_AUTO,
1848 	SPEC_STORE_BYPASS_CMD_ON,
1849 	SPEC_STORE_BYPASS_CMD_PRCTL,
1850 	SPEC_STORE_BYPASS_CMD_SECCOMP,
1851 };
1852 
1853 static const char * const ssb_strings[] = {
1854 	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
1855 	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
1856 	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl",
1857 	[SPEC_STORE_BYPASS_SECCOMP]	= "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1858 };
1859 
1860 static const struct {
1861 	const char *option;
1862 	enum ssb_mitigation_cmd cmd;
1863 } ssb_mitigation_options[]  __initconst = {
1864 	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
1865 	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
1866 	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
1867 	{ "prctl",	SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
1868 	{ "seccomp",	SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1869 };
1870 
1871 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1872 {
1873 	enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1874 	char arg[20];
1875 	int ret, i;
1876 
1877 	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1878 	    cpu_mitigations_off()) {
1879 		return SPEC_STORE_BYPASS_CMD_NONE;
1880 	} else {
1881 		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1882 					  arg, sizeof(arg));
1883 		if (ret < 0)
1884 			return SPEC_STORE_BYPASS_CMD_AUTO;
1885 
1886 		for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1887 			if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1888 				continue;
1889 
1890 			cmd = ssb_mitigation_options[i].cmd;
1891 			break;
1892 		}
1893 
1894 		if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1895 			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1896 			return SPEC_STORE_BYPASS_CMD_AUTO;
1897 		}
1898 	}
1899 
1900 	return cmd;
1901 }
1902 
1903 static enum ssb_mitigation __init __ssb_select_mitigation(void)
1904 {
1905 	enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1906 	enum ssb_mitigation_cmd cmd;
1907 
1908 	if (!boot_cpu_has(X86_FEATURE_SSBD))
1909 		return mode;
1910 
1911 	cmd = ssb_parse_cmdline();
1912 	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1913 	    (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1914 	     cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1915 		return mode;
1916 
1917 	switch (cmd) {
1918 	case SPEC_STORE_BYPASS_CMD_SECCOMP:
1919 		/*
1920 		 * Choose prctl+seccomp as the default mode if seccomp is
1921 		 * enabled.
1922 		 */
1923 		if (IS_ENABLED(CONFIG_SECCOMP))
1924 			mode = SPEC_STORE_BYPASS_SECCOMP;
1925 		else
1926 			mode = SPEC_STORE_BYPASS_PRCTL;
1927 		break;
1928 	case SPEC_STORE_BYPASS_CMD_ON:
1929 		mode = SPEC_STORE_BYPASS_DISABLE;
1930 		break;
1931 	case SPEC_STORE_BYPASS_CMD_AUTO:
1932 	case SPEC_STORE_BYPASS_CMD_PRCTL:
1933 		mode = SPEC_STORE_BYPASS_PRCTL;
1934 		break;
1935 	case SPEC_STORE_BYPASS_CMD_NONE:
1936 		break;
1937 	}
1938 
1939 	/*
1940 	 * We have three CPU feature flags that are in play here:
1941 	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1942 	 *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1943 	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1944 	 */
1945 	if (mode == SPEC_STORE_BYPASS_DISABLE) {
1946 		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1947 		/*
1948 		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1949 		 * use a completely different MSR and bit dependent on family.
1950 		 */
1951 		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1952 		    !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1953 			x86_amd_ssb_disable();
1954 		} else {
1955 			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1956 			update_spec_ctrl(x86_spec_ctrl_base);
1957 		}
1958 	}
1959 
1960 	return mode;
1961 }
1962 
1963 static void ssb_select_mitigation(void)
1964 {
1965 	ssb_mode = __ssb_select_mitigation();
1966 
1967 	if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1968 		pr_info("%s\n", ssb_strings[ssb_mode]);
1969 }
1970 
1971 #undef pr_fmt
1972 #define pr_fmt(fmt)     "Speculation prctl: " fmt
1973 
1974 static void task_update_spec_tif(struct task_struct *tsk)
1975 {
1976 	/* Force the update of the real TIF bits */
1977 	set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1978 
1979 	/*
1980 	 * Immediately update the speculation control MSRs for the current
1981 	 * task, but for a non-current task delay setting the CPU
1982 	 * mitigation until it is scheduled next.
1983 	 *
1984 	 * This can only happen for SECCOMP mitigation. For PRCTL it's
1985 	 * always the current task.
1986 	 */
1987 	if (tsk == current)
1988 		speculation_ctrl_update_current();
1989 }
1990 
1991 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
1992 {
1993 
1994 	if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
1995 		return -EPERM;
1996 
1997 	switch (ctrl) {
1998 	case PR_SPEC_ENABLE:
1999 		set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2000 		return 0;
2001 	case PR_SPEC_DISABLE:
2002 		clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2003 		return 0;
2004 	default:
2005 		return -ERANGE;
2006 	}
2007 }
2008 
2009 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2010 {
2011 	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2012 	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2013 		return -ENXIO;
2014 
2015 	switch (ctrl) {
2016 	case PR_SPEC_ENABLE:
2017 		/* If speculation is force disabled, enable is not allowed */
2018 		if (task_spec_ssb_force_disable(task))
2019 			return -EPERM;
2020 		task_clear_spec_ssb_disable(task);
2021 		task_clear_spec_ssb_noexec(task);
2022 		task_update_spec_tif(task);
2023 		break;
2024 	case PR_SPEC_DISABLE:
2025 		task_set_spec_ssb_disable(task);
2026 		task_clear_spec_ssb_noexec(task);
2027 		task_update_spec_tif(task);
2028 		break;
2029 	case PR_SPEC_FORCE_DISABLE:
2030 		task_set_spec_ssb_disable(task);
2031 		task_set_spec_ssb_force_disable(task);
2032 		task_clear_spec_ssb_noexec(task);
2033 		task_update_spec_tif(task);
2034 		break;
2035 	case PR_SPEC_DISABLE_NOEXEC:
2036 		if (task_spec_ssb_force_disable(task))
2037 			return -EPERM;
2038 		task_set_spec_ssb_disable(task);
2039 		task_set_spec_ssb_noexec(task);
2040 		task_update_spec_tif(task);
2041 		break;
2042 	default:
2043 		return -ERANGE;
2044 	}
2045 	return 0;
2046 }
2047 
2048 static bool is_spec_ib_user_controlled(void)
2049 {
2050 	return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2051 		spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2052 		spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2053 		spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2054 }
2055 
2056 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2057 {
2058 	switch (ctrl) {
2059 	case PR_SPEC_ENABLE:
2060 		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2061 		    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2062 			return 0;
2063 
2064 		/*
2065 		 * With strict mode for both IBPB and STIBP, the instruction
2066 		 * code paths avoid checking this task flag and instead,
2067 		 * unconditionally run the instruction. However, STIBP and IBPB
2068 		 * are independent and either can be set to conditionally
2069 		 * enabled regardless of the mode of the other.
2070 		 *
2071 		 * If either is set to conditional, allow the task flag to be
2072 		 * updated, unless it was force-disabled by a previous prctl
2073 		 * call. Currently, this is possible on an AMD CPU which has the
2074 		 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2075 		 * kernel is booted with 'spectre_v2_user=seccomp', then
2076 		 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2077 		 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2078 		 */
2079 		if (!is_spec_ib_user_controlled() ||
2080 		    task_spec_ib_force_disable(task))
2081 			return -EPERM;
2082 
2083 		task_clear_spec_ib_disable(task);
2084 		task_update_spec_tif(task);
2085 		break;
2086 	case PR_SPEC_DISABLE:
2087 	case PR_SPEC_FORCE_DISABLE:
2088 		/*
2089 		 * Indirect branch speculation is always allowed when
2090 		 * mitigation is force disabled.
2091 		 */
2092 		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2093 		    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2094 			return -EPERM;
2095 
2096 		if (!is_spec_ib_user_controlled())
2097 			return 0;
2098 
2099 		task_set_spec_ib_disable(task);
2100 		if (ctrl == PR_SPEC_FORCE_DISABLE)
2101 			task_set_spec_ib_force_disable(task);
2102 		task_update_spec_tif(task);
2103 		if (task == current)
2104 			indirect_branch_prediction_barrier();
2105 		break;
2106 	default:
2107 		return -ERANGE;
2108 	}
2109 	return 0;
2110 }
2111 
2112 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2113 			     unsigned long ctrl)
2114 {
2115 	switch (which) {
2116 	case PR_SPEC_STORE_BYPASS:
2117 		return ssb_prctl_set(task, ctrl);
2118 	case PR_SPEC_INDIRECT_BRANCH:
2119 		return ib_prctl_set(task, ctrl);
2120 	case PR_SPEC_L1D_FLUSH:
2121 		return l1d_flush_prctl_set(task, ctrl);
2122 	default:
2123 		return -ENODEV;
2124 	}
2125 }
2126 
2127 #ifdef CONFIG_SECCOMP
2128 void arch_seccomp_spec_mitigate(struct task_struct *task)
2129 {
2130 	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2131 		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2132 	if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2133 	    spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2134 		ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2135 }
2136 #endif
2137 
2138 static int l1d_flush_prctl_get(struct task_struct *task)
2139 {
2140 	if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2141 		return PR_SPEC_FORCE_DISABLE;
2142 
2143 	if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2144 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2145 	else
2146 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2147 }
2148 
2149 static int ssb_prctl_get(struct task_struct *task)
2150 {
2151 	switch (ssb_mode) {
2152 	case SPEC_STORE_BYPASS_DISABLE:
2153 		return PR_SPEC_DISABLE;
2154 	case SPEC_STORE_BYPASS_SECCOMP:
2155 	case SPEC_STORE_BYPASS_PRCTL:
2156 		if (task_spec_ssb_force_disable(task))
2157 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2158 		if (task_spec_ssb_noexec(task))
2159 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2160 		if (task_spec_ssb_disable(task))
2161 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2162 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2163 	default:
2164 		if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2165 			return PR_SPEC_ENABLE;
2166 		return PR_SPEC_NOT_AFFECTED;
2167 	}
2168 }
2169 
2170 static int ib_prctl_get(struct task_struct *task)
2171 {
2172 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2173 		return PR_SPEC_NOT_AFFECTED;
2174 
2175 	if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2176 	    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2177 		return PR_SPEC_ENABLE;
2178 	else if (is_spec_ib_user_controlled()) {
2179 		if (task_spec_ib_force_disable(task))
2180 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2181 		if (task_spec_ib_disable(task))
2182 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2183 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2184 	} else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2185 	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2186 	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2187 		return PR_SPEC_DISABLE;
2188 	else
2189 		return PR_SPEC_NOT_AFFECTED;
2190 }
2191 
2192 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2193 {
2194 	switch (which) {
2195 	case PR_SPEC_STORE_BYPASS:
2196 		return ssb_prctl_get(task);
2197 	case PR_SPEC_INDIRECT_BRANCH:
2198 		return ib_prctl_get(task);
2199 	case PR_SPEC_L1D_FLUSH:
2200 		return l1d_flush_prctl_get(task);
2201 	default:
2202 		return -ENODEV;
2203 	}
2204 }
2205 
2206 void x86_spec_ctrl_setup_ap(void)
2207 {
2208 	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2209 		update_spec_ctrl(x86_spec_ctrl_base);
2210 
2211 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2212 		x86_amd_ssb_disable();
2213 }
2214 
2215 bool itlb_multihit_kvm_mitigation;
2216 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2217 
2218 #undef pr_fmt
2219 #define pr_fmt(fmt)	"L1TF: " fmt
2220 
2221 /* Default mitigation for L1TF-affected CPUs */
2222 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
2223 #if IS_ENABLED(CONFIG_KVM_INTEL)
2224 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2225 #endif
2226 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2227 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2228 
2229 /*
2230  * These CPUs all support 44bits physical address space internally in the
2231  * cache but CPUID can report a smaller number of physical address bits.
2232  *
2233  * The L1TF mitigation uses the top most address bit for the inversion of
2234  * non present PTEs. When the installed memory reaches into the top most
2235  * address bit due to memory holes, which has been observed on machines
2236  * which report 36bits physical address bits and have 32G RAM installed,
2237  * then the mitigation range check in l1tf_select_mitigation() triggers.
2238  * This is a false positive because the mitigation is still possible due to
2239  * the fact that the cache uses 44bit internally. Use the cache bits
2240  * instead of the reported physical bits and adjust them on the affected
2241  * machines to 44bit if the reported bits are less than 44.
2242  */
2243 static void override_cache_bits(struct cpuinfo_x86 *c)
2244 {
2245 	if (c->x86 != 6)
2246 		return;
2247 
2248 	switch (c->x86_model) {
2249 	case INTEL_FAM6_NEHALEM:
2250 	case INTEL_FAM6_WESTMERE:
2251 	case INTEL_FAM6_SANDYBRIDGE:
2252 	case INTEL_FAM6_IVYBRIDGE:
2253 	case INTEL_FAM6_HASWELL:
2254 	case INTEL_FAM6_HASWELL_L:
2255 	case INTEL_FAM6_HASWELL_G:
2256 	case INTEL_FAM6_BROADWELL:
2257 	case INTEL_FAM6_BROADWELL_G:
2258 	case INTEL_FAM6_SKYLAKE_L:
2259 	case INTEL_FAM6_SKYLAKE:
2260 	case INTEL_FAM6_KABYLAKE_L:
2261 	case INTEL_FAM6_KABYLAKE:
2262 		if (c->x86_cache_bits < 44)
2263 			c->x86_cache_bits = 44;
2264 		break;
2265 	}
2266 }
2267 
2268 static void __init l1tf_select_mitigation(void)
2269 {
2270 	u64 half_pa;
2271 
2272 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
2273 		return;
2274 
2275 	if (cpu_mitigations_off())
2276 		l1tf_mitigation = L1TF_MITIGATION_OFF;
2277 	else if (cpu_mitigations_auto_nosmt())
2278 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2279 
2280 	override_cache_bits(&boot_cpu_data);
2281 
2282 	switch (l1tf_mitigation) {
2283 	case L1TF_MITIGATION_OFF:
2284 	case L1TF_MITIGATION_FLUSH_NOWARN:
2285 	case L1TF_MITIGATION_FLUSH:
2286 		break;
2287 	case L1TF_MITIGATION_FLUSH_NOSMT:
2288 	case L1TF_MITIGATION_FULL:
2289 		cpu_smt_disable(false);
2290 		break;
2291 	case L1TF_MITIGATION_FULL_FORCE:
2292 		cpu_smt_disable(true);
2293 		break;
2294 	}
2295 
2296 #if CONFIG_PGTABLE_LEVELS == 2
2297 	pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2298 	return;
2299 #endif
2300 
2301 	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2302 	if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2303 			e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2304 		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2305 		pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2306 				half_pa);
2307 		pr_info("However, doing so will make a part of your RAM unusable.\n");
2308 		pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2309 		return;
2310 	}
2311 
2312 	setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2313 }
2314 
2315 static int __init l1tf_cmdline(char *str)
2316 {
2317 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
2318 		return 0;
2319 
2320 	if (!str)
2321 		return -EINVAL;
2322 
2323 	if (!strcmp(str, "off"))
2324 		l1tf_mitigation = L1TF_MITIGATION_OFF;
2325 	else if (!strcmp(str, "flush,nowarn"))
2326 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2327 	else if (!strcmp(str, "flush"))
2328 		l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2329 	else if (!strcmp(str, "flush,nosmt"))
2330 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2331 	else if (!strcmp(str, "full"))
2332 		l1tf_mitigation = L1TF_MITIGATION_FULL;
2333 	else if (!strcmp(str, "full,force"))
2334 		l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2335 
2336 	return 0;
2337 }
2338 early_param("l1tf", l1tf_cmdline);
2339 
2340 #undef pr_fmt
2341 #define pr_fmt(fmt)	"Speculative Return Stack Overflow: " fmt
2342 
2343 enum srso_mitigation {
2344 	SRSO_MITIGATION_NONE,
2345 	SRSO_MITIGATION_MICROCODE,
2346 	SRSO_MITIGATION_SAFE_RET,
2347 	SRSO_MITIGATION_IBPB,
2348 	SRSO_MITIGATION_IBPB_ON_VMEXIT,
2349 };
2350 
2351 enum srso_mitigation_cmd {
2352 	SRSO_CMD_OFF,
2353 	SRSO_CMD_MICROCODE,
2354 	SRSO_CMD_SAFE_RET,
2355 	SRSO_CMD_IBPB,
2356 	SRSO_CMD_IBPB_ON_VMEXIT,
2357 };
2358 
2359 static const char * const srso_strings[] = {
2360 	[SRSO_MITIGATION_NONE]           = "Vulnerable",
2361 	[SRSO_MITIGATION_MICROCODE]      = "Mitigation: microcode",
2362 	[SRSO_MITIGATION_SAFE_RET]	 = "Mitigation: safe RET",
2363 	[SRSO_MITIGATION_IBPB]		 = "Mitigation: IBPB",
2364 	[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
2365 };
2366 
2367 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
2368 static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
2369 
2370 static int __init srso_parse_cmdline(char *str)
2371 {
2372 	if (!str)
2373 		return -EINVAL;
2374 
2375 	if (!strcmp(str, "off"))
2376 		srso_cmd = SRSO_CMD_OFF;
2377 	else if (!strcmp(str, "microcode"))
2378 		srso_cmd = SRSO_CMD_MICROCODE;
2379 	else if (!strcmp(str, "safe-ret"))
2380 		srso_cmd = SRSO_CMD_SAFE_RET;
2381 	else if (!strcmp(str, "ibpb"))
2382 		srso_cmd = SRSO_CMD_IBPB;
2383 	else if (!strcmp(str, "ibpb-vmexit"))
2384 		srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
2385 	else
2386 		pr_err("Ignoring unknown SRSO option (%s).", str);
2387 
2388 	return 0;
2389 }
2390 early_param("spec_rstack_overflow", srso_parse_cmdline);
2391 
2392 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2393 
2394 static void __init srso_select_mitigation(void)
2395 {
2396 	bool has_microcode;
2397 
2398 	if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
2399 		goto pred_cmd;
2400 
2401 	/*
2402 	 * The first check is for the kernel running as a guest in order
2403 	 * for guests to verify whether IBPB is a viable mitigation.
2404 	 */
2405 	has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
2406 	if (!has_microcode) {
2407 		pr_warn("IBPB-extending microcode not applied!\n");
2408 		pr_warn(SRSO_NOTICE);
2409 	} else {
2410 		/*
2411 		 * Enable the synthetic (even if in a real CPUID leaf)
2412 		 * flags for guests.
2413 		 */
2414 		setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
2415 
2416 		/*
2417 		 * Zen1/2 with SMT off aren't vulnerable after the right
2418 		 * IBPB microcode has been applied.
2419 		 */
2420 		if ((boot_cpu_data.x86 < 0x19) &&
2421 		    (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
2422 			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
2423 	}
2424 
2425 	if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2426 		if (has_microcode) {
2427 			pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
2428 			srso_mitigation = SRSO_MITIGATION_IBPB;
2429 			goto pred_cmd;
2430 		}
2431 	}
2432 
2433 	switch (srso_cmd) {
2434 	case SRSO_CMD_OFF:
2435 		return;
2436 
2437 	case SRSO_CMD_MICROCODE:
2438 		if (has_microcode) {
2439 			srso_mitigation = SRSO_MITIGATION_MICROCODE;
2440 			pr_warn(SRSO_NOTICE);
2441 		}
2442 		break;
2443 
2444 	case SRSO_CMD_SAFE_RET:
2445 		if (IS_ENABLED(CONFIG_CPU_SRSO)) {
2446 			/*
2447 			 * Enable the return thunk for generated code
2448 			 * like ftrace, static_call, etc.
2449 			 */
2450 			setup_force_cpu_cap(X86_FEATURE_RETHUNK);
2451 
2452 			if (boot_cpu_data.x86 == 0x19)
2453 				setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
2454 			else
2455 				setup_force_cpu_cap(X86_FEATURE_SRSO);
2456 			srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2457 		} else {
2458 			pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
2459 			goto pred_cmd;
2460 		}
2461 		break;
2462 
2463 	case SRSO_CMD_IBPB:
2464 		if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
2465 			if (has_microcode) {
2466 				setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
2467 				srso_mitigation = SRSO_MITIGATION_IBPB;
2468 			}
2469 		} else {
2470 			pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
2471 			goto pred_cmd;
2472 		}
2473 		break;
2474 
2475 	case SRSO_CMD_IBPB_ON_VMEXIT:
2476 		if (IS_ENABLED(CONFIG_CPU_SRSO)) {
2477 			if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
2478 				setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
2479 				srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2480 			}
2481 		} else {
2482 			pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
2483 			goto pred_cmd;
2484                 }
2485 		break;
2486 
2487 	default:
2488 		break;
2489 	}
2490 
2491 	pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
2492 
2493 pred_cmd:
2494 	if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
2495 	     boot_cpu_has(X86_FEATURE_SBPB))
2496 		x86_pred_cmd = PRED_CMD_SBPB;
2497 }
2498 
2499 #undef pr_fmt
2500 #define pr_fmt(fmt) fmt
2501 
2502 #ifdef CONFIG_SYSFS
2503 
2504 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2505 
2506 #if IS_ENABLED(CONFIG_KVM_INTEL)
2507 static const char * const l1tf_vmx_states[] = {
2508 	[VMENTER_L1D_FLUSH_AUTO]		= "auto",
2509 	[VMENTER_L1D_FLUSH_NEVER]		= "vulnerable",
2510 	[VMENTER_L1D_FLUSH_COND]		= "conditional cache flushes",
2511 	[VMENTER_L1D_FLUSH_ALWAYS]		= "cache flushes",
2512 	[VMENTER_L1D_FLUSH_EPT_DISABLED]	= "EPT disabled",
2513 	[VMENTER_L1D_FLUSH_NOT_REQUIRED]	= "flush not necessary"
2514 };
2515 
2516 static ssize_t l1tf_show_state(char *buf)
2517 {
2518 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
2519 		return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
2520 
2521 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
2522 	    (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
2523 	     sched_smt_active())) {
2524 		return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
2525 				  l1tf_vmx_states[l1tf_vmx_mitigation]);
2526 	}
2527 
2528 	return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
2529 			  l1tf_vmx_states[l1tf_vmx_mitigation],
2530 			  sched_smt_active() ? "vulnerable" : "disabled");
2531 }
2532 
2533 static ssize_t itlb_multihit_show_state(char *buf)
2534 {
2535 	if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2536 	    !boot_cpu_has(X86_FEATURE_VMX))
2537 		return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
2538 	else if (!(cr4_read_shadow() & X86_CR4_VMXE))
2539 		return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
2540 	else if (itlb_multihit_kvm_mitigation)
2541 		return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
2542 	else
2543 		return sysfs_emit(buf, "KVM: Vulnerable\n");
2544 }
2545 #else
2546 static ssize_t l1tf_show_state(char *buf)
2547 {
2548 	return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
2549 }
2550 
2551 static ssize_t itlb_multihit_show_state(char *buf)
2552 {
2553 	return sysfs_emit(buf, "Processor vulnerable\n");
2554 }
2555 #endif
2556 
2557 static ssize_t mds_show_state(char *buf)
2558 {
2559 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2560 		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2561 				  mds_strings[mds_mitigation]);
2562 	}
2563 
2564 	if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
2565 		return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2566 				  (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
2567 				   sched_smt_active() ? "mitigated" : "disabled"));
2568 	}
2569 
2570 	return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2571 			  sched_smt_active() ? "vulnerable" : "disabled");
2572 }
2573 
2574 static ssize_t tsx_async_abort_show_state(char *buf)
2575 {
2576 	if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
2577 	    (taa_mitigation == TAA_MITIGATION_OFF))
2578 		return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
2579 
2580 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2581 		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2582 				  taa_strings[taa_mitigation]);
2583 	}
2584 
2585 	return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
2586 			  sched_smt_active() ? "vulnerable" : "disabled");
2587 }
2588 
2589 static ssize_t mmio_stale_data_show_state(char *buf)
2590 {
2591 	if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2592 		return sysfs_emit(buf, "Unknown: No mitigations\n");
2593 
2594 	if (mmio_mitigation == MMIO_MITIGATION_OFF)
2595 		return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
2596 
2597 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2598 		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2599 				  mmio_strings[mmio_mitigation]);
2600 	}
2601 
2602 	return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
2603 			  sched_smt_active() ? "vulnerable" : "disabled");
2604 }
2605 
2606 static char *stibp_state(void)
2607 {
2608 	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
2609 	    !boot_cpu_has(X86_FEATURE_AUTOIBRS))
2610 		return "";
2611 
2612 	switch (spectre_v2_user_stibp) {
2613 	case SPECTRE_V2_USER_NONE:
2614 		return ", STIBP: disabled";
2615 	case SPECTRE_V2_USER_STRICT:
2616 		return ", STIBP: forced";
2617 	case SPECTRE_V2_USER_STRICT_PREFERRED:
2618 		return ", STIBP: always-on";
2619 	case SPECTRE_V2_USER_PRCTL:
2620 	case SPECTRE_V2_USER_SECCOMP:
2621 		if (static_key_enabled(&switch_to_cond_stibp))
2622 			return ", STIBP: conditional";
2623 	}
2624 	return "";
2625 }
2626 
2627 static char *ibpb_state(void)
2628 {
2629 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
2630 		if (static_key_enabled(&switch_mm_always_ibpb))
2631 			return ", IBPB: always-on";
2632 		if (static_key_enabled(&switch_mm_cond_ibpb))
2633 			return ", IBPB: conditional";
2634 		return ", IBPB: disabled";
2635 	}
2636 	return "";
2637 }
2638 
2639 static char *pbrsb_eibrs_state(void)
2640 {
2641 	if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2642 		if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
2643 		    boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
2644 			return ", PBRSB-eIBRS: SW sequence";
2645 		else
2646 			return ", PBRSB-eIBRS: Vulnerable";
2647 	} else {
2648 		return ", PBRSB-eIBRS: Not affected";
2649 	}
2650 }
2651 
2652 static ssize_t spectre_v2_show_state(char *buf)
2653 {
2654 	if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
2655 		return sysfs_emit(buf, "Vulnerable: LFENCE\n");
2656 
2657 	if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2658 		return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
2659 
2660 	if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2661 	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2662 		return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2663 
2664 	return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
2665 			  spectre_v2_strings[spectre_v2_enabled],
2666 			  ibpb_state(),
2667 			  boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
2668 			  stibp_state(),
2669 			  boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
2670 			  pbrsb_eibrs_state(),
2671 			  spectre_v2_module_string());
2672 }
2673 
2674 static ssize_t srbds_show_state(char *buf)
2675 {
2676 	return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
2677 }
2678 
2679 static ssize_t retbleed_show_state(char *buf)
2680 {
2681 	if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
2682 	    retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2683 		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
2684 		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
2685 			return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
2686 
2687 		return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
2688 				  !sched_smt_active() ? "disabled" :
2689 				  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2690 				  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
2691 				  "enabled with STIBP protection" : "vulnerable");
2692 	}
2693 
2694 	return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
2695 }
2696 
2697 static ssize_t srso_show_state(char *buf)
2698 {
2699 	return sysfs_emit(buf, "%s%s\n",
2700 			  srso_strings[srso_mitigation],
2701 			  (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
2702 }
2703 
2704 static ssize_t gds_show_state(char *buf)
2705 {
2706 	return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
2707 }
2708 
2709 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
2710 			       char *buf, unsigned int bug)
2711 {
2712 	if (!boot_cpu_has_bug(bug))
2713 		return sysfs_emit(buf, "Not affected\n");
2714 
2715 	switch (bug) {
2716 	case X86_BUG_CPU_MELTDOWN:
2717 		if (boot_cpu_has(X86_FEATURE_PTI))
2718 			return sysfs_emit(buf, "Mitigation: PTI\n");
2719 
2720 		if (hypervisor_is_type(X86_HYPER_XEN_PV))
2721 			return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2722 
2723 		break;
2724 
2725 	case X86_BUG_SPECTRE_V1:
2726 		return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
2727 
2728 	case X86_BUG_SPECTRE_V2:
2729 		return spectre_v2_show_state(buf);
2730 
2731 	case X86_BUG_SPEC_STORE_BYPASS:
2732 		return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
2733 
2734 	case X86_BUG_L1TF:
2735 		if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
2736 			return l1tf_show_state(buf);
2737 		break;
2738 
2739 	case X86_BUG_MDS:
2740 		return mds_show_state(buf);
2741 
2742 	case X86_BUG_TAA:
2743 		return tsx_async_abort_show_state(buf);
2744 
2745 	case X86_BUG_ITLB_MULTIHIT:
2746 		return itlb_multihit_show_state(buf);
2747 
2748 	case X86_BUG_SRBDS:
2749 		return srbds_show_state(buf);
2750 
2751 	case X86_BUG_MMIO_STALE_DATA:
2752 	case X86_BUG_MMIO_UNKNOWN:
2753 		return mmio_stale_data_show_state(buf);
2754 
2755 	case X86_BUG_RETBLEED:
2756 		return retbleed_show_state(buf);
2757 
2758 	case X86_BUG_SRSO:
2759 		return srso_show_state(buf);
2760 
2761 	case X86_BUG_GDS:
2762 		return gds_show_state(buf);
2763 
2764 	default:
2765 		break;
2766 	}
2767 
2768 	return sysfs_emit(buf, "Vulnerable\n");
2769 }
2770 
2771 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2772 {
2773 	return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
2774 }
2775 
2776 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2777 {
2778 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
2779 }
2780 
2781 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2782 {
2783 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
2784 }
2785 
2786 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
2787 {
2788 	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
2789 }
2790 
2791 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
2792 {
2793 	return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
2794 }
2795 
2796 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
2797 {
2798 	return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
2799 }
2800 
2801 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
2802 {
2803 	return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
2804 }
2805 
2806 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
2807 {
2808 	return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
2809 }
2810 
2811 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
2812 {
2813 	return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
2814 }
2815 
2816 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
2817 {
2818 	if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2819 		return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
2820 	else
2821 		return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
2822 }
2823 
2824 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
2825 {
2826 	return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
2827 }
2828 
2829 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
2830 {
2831 	return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
2832 }
2833 
2834 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
2835 {
2836 	return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
2837 }
2838 #endif
2839