xref: /linux/arch/x86/kernel/cpu/bugs.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *	- Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *	- Channing Corn (tests & fixes),
9  *	- Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19 
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34 
35 #include "cpu.h"
36 
37 static void __init spectre_v1_select_mitigation(void);
38 static void __init spectre_v2_select_mitigation(void);
39 static void __init retbleed_select_mitigation(void);
40 static void __init spectre_v2_user_select_mitigation(void);
41 static void __init ssb_select_mitigation(void);
42 static void __init l1tf_select_mitigation(void);
43 static void __init mds_select_mitigation(void);
44 static void __init md_clear_update_mitigation(void);
45 static void __init md_clear_select_mitigation(void);
46 static void __init taa_select_mitigation(void);
47 static void __init mmio_select_mitigation(void);
48 static void __init srbds_select_mitigation(void);
49 static void __init l1d_flush_select_mitigation(void);
50 static void __init srso_select_mitigation(void);
51 static void __init gds_select_mitigation(void);
52 
53 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
54 u64 x86_spec_ctrl_base;
55 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
56 
57 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
58 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
59 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
60 
61 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
62 EXPORT_SYMBOL_GPL(x86_pred_cmd);
63 
64 static u64 __ro_after_init x86_arch_cap_msr;
65 
66 static DEFINE_MUTEX(spec_ctrl_mutex);
67 
68 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
69 
70 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
71 static void update_spec_ctrl(u64 val)
72 {
73 	this_cpu_write(x86_spec_ctrl_current, val);
74 	wrmsrl(MSR_IA32_SPEC_CTRL, val);
75 }
76 
77 /*
78  * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
79  * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
80  */
81 void update_spec_ctrl_cond(u64 val)
82 {
83 	if (this_cpu_read(x86_spec_ctrl_current) == val)
84 		return;
85 
86 	this_cpu_write(x86_spec_ctrl_current, val);
87 
88 	/*
89 	 * When KERNEL_IBRS this MSR is written on return-to-user, unless
90 	 * forced the update can be delayed until that time.
91 	 */
92 	if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
93 		wrmsrl(MSR_IA32_SPEC_CTRL, val);
94 }
95 
96 noinstr u64 spec_ctrl_current(void)
97 {
98 	return this_cpu_read(x86_spec_ctrl_current);
99 }
100 EXPORT_SYMBOL_GPL(spec_ctrl_current);
101 
102 /*
103  * AMD specific MSR info for Speculative Store Bypass control.
104  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
105  */
106 u64 __ro_after_init x86_amd_ls_cfg_base;
107 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
108 
109 /* Control conditional STIBP in switch_to() */
110 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
111 /* Control conditional IBPB in switch_mm() */
112 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
113 /* Control unconditional IBPB in switch_mm() */
114 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
115 
116 /* Control MDS CPU buffer clear before idling (halt, mwait) */
117 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
118 EXPORT_SYMBOL_GPL(mds_idle_clear);
119 
120 /*
121  * Controls whether l1d flush based mitigations are enabled,
122  * based on hw features and admin setting via boot parameter
123  * defaults to false
124  */
125 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
126 
127 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
128 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
129 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
130 
131 void __init cpu_select_mitigations(void)
132 {
133 	/*
134 	 * Read the SPEC_CTRL MSR to account for reserved bits which may
135 	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
136 	 * init code as it is not enumerated and depends on the family.
137 	 */
138 	if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
139 		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
140 
141 		/*
142 		 * Previously running kernel (kexec), may have some controls
143 		 * turned ON. Clear them and let the mitigations setup below
144 		 * rediscover them based on configuration.
145 		 */
146 		x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
147 	}
148 
149 	x86_arch_cap_msr = x86_read_arch_cap_msr();
150 
151 	/* Select the proper CPU mitigations before patching alternatives: */
152 	spectre_v1_select_mitigation();
153 	spectre_v2_select_mitigation();
154 	/*
155 	 * retbleed_select_mitigation() relies on the state set by
156 	 * spectre_v2_select_mitigation(); specifically it wants to know about
157 	 * spectre_v2=ibrs.
158 	 */
159 	retbleed_select_mitigation();
160 	/*
161 	 * spectre_v2_user_select_mitigation() relies on the state set by
162 	 * retbleed_select_mitigation(); specifically the STIBP selection is
163 	 * forced for UNRET or IBPB.
164 	 */
165 	spectre_v2_user_select_mitigation();
166 	ssb_select_mitigation();
167 	l1tf_select_mitigation();
168 	md_clear_select_mitigation();
169 	srbds_select_mitigation();
170 	l1d_flush_select_mitigation();
171 
172 	/*
173 	 * srso_select_mitigation() depends and must run after
174 	 * retbleed_select_mitigation().
175 	 */
176 	srso_select_mitigation();
177 	gds_select_mitigation();
178 }
179 
180 /*
181  * NOTE: This function is *only* called for SVM, since Intel uses
182  * MSR_IA32_SPEC_CTRL for SSBD.
183  */
184 void
185 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
186 {
187 	u64 guestval, hostval;
188 	struct thread_info *ti = current_thread_info();
189 
190 	/*
191 	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
192 	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
193 	 */
194 	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
195 	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
196 		return;
197 
198 	/*
199 	 * If the host has SSBD mitigation enabled, force it in the host's
200 	 * virtual MSR value. If its not permanently enabled, evaluate
201 	 * current's TIF_SSBD thread flag.
202 	 */
203 	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
204 		hostval = SPEC_CTRL_SSBD;
205 	else
206 		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
207 
208 	/* Sanitize the guest value */
209 	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
210 
211 	if (hostval != guestval) {
212 		unsigned long tif;
213 
214 		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
215 				 ssbd_spec_ctrl_to_tif(hostval);
216 
217 		speculation_ctrl_update(tif);
218 	}
219 }
220 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
221 
222 static void x86_amd_ssb_disable(void)
223 {
224 	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
225 
226 	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
227 		wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
228 	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
229 		wrmsrl(MSR_AMD64_LS_CFG, msrval);
230 }
231 
232 #undef pr_fmt
233 #define pr_fmt(fmt)	"MDS: " fmt
234 
235 /* Default mitigation for MDS-affected CPUs */
236 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
237 static bool mds_nosmt __ro_after_init = false;
238 
239 static const char * const mds_strings[] = {
240 	[MDS_MITIGATION_OFF]	= "Vulnerable",
241 	[MDS_MITIGATION_FULL]	= "Mitigation: Clear CPU buffers",
242 	[MDS_MITIGATION_VMWERV]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
243 };
244 
245 static void __init mds_select_mitigation(void)
246 {
247 	if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
248 		mds_mitigation = MDS_MITIGATION_OFF;
249 		return;
250 	}
251 
252 	if (mds_mitigation == MDS_MITIGATION_FULL) {
253 		if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
254 			mds_mitigation = MDS_MITIGATION_VMWERV;
255 
256 		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
257 
258 		if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
259 		    (mds_nosmt || cpu_mitigations_auto_nosmt()))
260 			cpu_smt_disable(false);
261 	}
262 }
263 
264 static int __init mds_cmdline(char *str)
265 {
266 	if (!boot_cpu_has_bug(X86_BUG_MDS))
267 		return 0;
268 
269 	if (!str)
270 		return -EINVAL;
271 
272 	if (!strcmp(str, "off"))
273 		mds_mitigation = MDS_MITIGATION_OFF;
274 	else if (!strcmp(str, "full"))
275 		mds_mitigation = MDS_MITIGATION_FULL;
276 	else if (!strcmp(str, "full,nosmt")) {
277 		mds_mitigation = MDS_MITIGATION_FULL;
278 		mds_nosmt = true;
279 	}
280 
281 	return 0;
282 }
283 early_param("mds", mds_cmdline);
284 
285 #undef pr_fmt
286 #define pr_fmt(fmt)	"TAA: " fmt
287 
288 enum taa_mitigations {
289 	TAA_MITIGATION_OFF,
290 	TAA_MITIGATION_UCODE_NEEDED,
291 	TAA_MITIGATION_VERW,
292 	TAA_MITIGATION_TSX_DISABLED,
293 };
294 
295 /* Default mitigation for TAA-affected CPUs */
296 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
297 static bool taa_nosmt __ro_after_init;
298 
299 static const char * const taa_strings[] = {
300 	[TAA_MITIGATION_OFF]		= "Vulnerable",
301 	[TAA_MITIGATION_UCODE_NEEDED]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
302 	[TAA_MITIGATION_VERW]		= "Mitigation: Clear CPU buffers",
303 	[TAA_MITIGATION_TSX_DISABLED]	= "Mitigation: TSX disabled",
304 };
305 
306 static void __init taa_select_mitigation(void)
307 {
308 	if (!boot_cpu_has_bug(X86_BUG_TAA)) {
309 		taa_mitigation = TAA_MITIGATION_OFF;
310 		return;
311 	}
312 
313 	/* TSX previously disabled by tsx=off */
314 	if (!boot_cpu_has(X86_FEATURE_RTM)) {
315 		taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
316 		return;
317 	}
318 
319 	if (cpu_mitigations_off()) {
320 		taa_mitigation = TAA_MITIGATION_OFF;
321 		return;
322 	}
323 
324 	/*
325 	 * TAA mitigation via VERW is turned off if both
326 	 * tsx_async_abort=off and mds=off are specified.
327 	 */
328 	if (taa_mitigation == TAA_MITIGATION_OFF &&
329 	    mds_mitigation == MDS_MITIGATION_OFF)
330 		return;
331 
332 	if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
333 		taa_mitigation = TAA_MITIGATION_VERW;
334 	else
335 		taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
336 
337 	/*
338 	 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
339 	 * A microcode update fixes this behavior to clear CPU buffers. It also
340 	 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
341 	 * ARCH_CAP_TSX_CTRL_MSR bit.
342 	 *
343 	 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
344 	 * update is required.
345 	 */
346 	if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
347 	    !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
348 		taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
349 
350 	/*
351 	 * TSX is enabled, select alternate mitigation for TAA which is
352 	 * the same as MDS. Enable MDS static branch to clear CPU buffers.
353 	 *
354 	 * For guests that can't determine whether the correct microcode is
355 	 * present on host, enable the mitigation for UCODE_NEEDED as well.
356 	 */
357 	setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
358 
359 	if (taa_nosmt || cpu_mitigations_auto_nosmt())
360 		cpu_smt_disable(false);
361 }
362 
363 static int __init tsx_async_abort_parse_cmdline(char *str)
364 {
365 	if (!boot_cpu_has_bug(X86_BUG_TAA))
366 		return 0;
367 
368 	if (!str)
369 		return -EINVAL;
370 
371 	if (!strcmp(str, "off")) {
372 		taa_mitigation = TAA_MITIGATION_OFF;
373 	} else if (!strcmp(str, "full")) {
374 		taa_mitigation = TAA_MITIGATION_VERW;
375 	} else if (!strcmp(str, "full,nosmt")) {
376 		taa_mitigation = TAA_MITIGATION_VERW;
377 		taa_nosmt = true;
378 	}
379 
380 	return 0;
381 }
382 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
383 
384 #undef pr_fmt
385 #define pr_fmt(fmt)	"MMIO Stale Data: " fmt
386 
387 enum mmio_mitigations {
388 	MMIO_MITIGATION_OFF,
389 	MMIO_MITIGATION_UCODE_NEEDED,
390 	MMIO_MITIGATION_VERW,
391 };
392 
393 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
394 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
395 static bool mmio_nosmt __ro_after_init = false;
396 
397 static const char * const mmio_strings[] = {
398 	[MMIO_MITIGATION_OFF]		= "Vulnerable",
399 	[MMIO_MITIGATION_UCODE_NEEDED]	= "Vulnerable: Clear CPU buffers attempted, no microcode",
400 	[MMIO_MITIGATION_VERW]		= "Mitigation: Clear CPU buffers",
401 };
402 
403 static void __init mmio_select_mitigation(void)
404 {
405 	if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
406 	     boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
407 	     cpu_mitigations_off()) {
408 		mmio_mitigation = MMIO_MITIGATION_OFF;
409 		return;
410 	}
411 
412 	if (mmio_mitigation == MMIO_MITIGATION_OFF)
413 		return;
414 
415 	/*
416 	 * Enable CPU buffer clear mitigation for host and VMM, if also affected
417 	 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
418 	 */
419 	if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
420 					      boot_cpu_has(X86_FEATURE_RTM)))
421 		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
422 
423 	/*
424 	 * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
425 	 * mitigations, disable KVM-only mitigation in that case.
426 	 */
427 	if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
428 		static_branch_disable(&mmio_stale_data_clear);
429 	else
430 		static_branch_enable(&mmio_stale_data_clear);
431 
432 	/*
433 	 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
434 	 * be propagated to uncore buffers, clearing the Fill buffers on idle
435 	 * is required irrespective of SMT state.
436 	 */
437 	if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
438 		static_branch_enable(&mds_idle_clear);
439 
440 	/*
441 	 * Check if the system has the right microcode.
442 	 *
443 	 * CPU Fill buffer clear mitigation is enumerated by either an explicit
444 	 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
445 	 * affected systems.
446 	 */
447 	if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
448 	    (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
449 	     boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
450 	     !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
451 		mmio_mitigation = MMIO_MITIGATION_VERW;
452 	else
453 		mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
454 
455 	if (mmio_nosmt || cpu_mitigations_auto_nosmt())
456 		cpu_smt_disable(false);
457 }
458 
459 static int __init mmio_stale_data_parse_cmdline(char *str)
460 {
461 	if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
462 		return 0;
463 
464 	if (!str)
465 		return -EINVAL;
466 
467 	if (!strcmp(str, "off")) {
468 		mmio_mitigation = MMIO_MITIGATION_OFF;
469 	} else if (!strcmp(str, "full")) {
470 		mmio_mitigation = MMIO_MITIGATION_VERW;
471 	} else if (!strcmp(str, "full,nosmt")) {
472 		mmio_mitigation = MMIO_MITIGATION_VERW;
473 		mmio_nosmt = true;
474 	}
475 
476 	return 0;
477 }
478 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
479 
480 #undef pr_fmt
481 #define pr_fmt(fmt)	"Register File Data Sampling: " fmt
482 
483 enum rfds_mitigations {
484 	RFDS_MITIGATION_OFF,
485 	RFDS_MITIGATION_VERW,
486 	RFDS_MITIGATION_UCODE_NEEDED,
487 };
488 
489 /* Default mitigation for Register File Data Sampling */
490 static enum rfds_mitigations rfds_mitigation __ro_after_init =
491 	IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF;
492 
493 static const char * const rfds_strings[] = {
494 	[RFDS_MITIGATION_OFF]			= "Vulnerable",
495 	[RFDS_MITIGATION_VERW]			= "Mitigation: Clear Register File",
496 	[RFDS_MITIGATION_UCODE_NEEDED]		= "Vulnerable: No microcode",
497 };
498 
499 static void __init rfds_select_mitigation(void)
500 {
501 	if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
502 		rfds_mitigation = RFDS_MITIGATION_OFF;
503 		return;
504 	}
505 	if (rfds_mitigation == RFDS_MITIGATION_OFF)
506 		return;
507 
508 	if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
509 		setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
510 	else
511 		rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
512 }
513 
514 static __init int rfds_parse_cmdline(char *str)
515 {
516 	if (!str)
517 		return -EINVAL;
518 
519 	if (!boot_cpu_has_bug(X86_BUG_RFDS))
520 		return 0;
521 
522 	if (!strcmp(str, "off"))
523 		rfds_mitigation = RFDS_MITIGATION_OFF;
524 	else if (!strcmp(str, "on"))
525 		rfds_mitigation = RFDS_MITIGATION_VERW;
526 
527 	return 0;
528 }
529 early_param("reg_file_data_sampling", rfds_parse_cmdline);
530 
531 #undef pr_fmt
532 #define pr_fmt(fmt)     "" fmt
533 
534 static void __init md_clear_update_mitigation(void)
535 {
536 	if (cpu_mitigations_off())
537 		return;
538 
539 	if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
540 		goto out;
541 
542 	/*
543 	 * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
544 	 * Stale Data mitigation, if necessary.
545 	 */
546 	if (mds_mitigation == MDS_MITIGATION_OFF &&
547 	    boot_cpu_has_bug(X86_BUG_MDS)) {
548 		mds_mitigation = MDS_MITIGATION_FULL;
549 		mds_select_mitigation();
550 	}
551 	if (taa_mitigation == TAA_MITIGATION_OFF &&
552 	    boot_cpu_has_bug(X86_BUG_TAA)) {
553 		taa_mitigation = TAA_MITIGATION_VERW;
554 		taa_select_mitigation();
555 	}
556 	/*
557 	 * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
558 	 * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
559 	 */
560 	if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
561 		mmio_mitigation = MMIO_MITIGATION_VERW;
562 		mmio_select_mitigation();
563 	}
564 	if (rfds_mitigation == RFDS_MITIGATION_OFF &&
565 	    boot_cpu_has_bug(X86_BUG_RFDS)) {
566 		rfds_mitigation = RFDS_MITIGATION_VERW;
567 		rfds_select_mitigation();
568 	}
569 out:
570 	if (boot_cpu_has_bug(X86_BUG_MDS))
571 		pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
572 	if (boot_cpu_has_bug(X86_BUG_TAA))
573 		pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
574 	if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
575 		pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
576 	else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
577 		pr_info("MMIO Stale Data: Unknown: No mitigations\n");
578 	if (boot_cpu_has_bug(X86_BUG_RFDS))
579 		pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
580 }
581 
582 static void __init md_clear_select_mitigation(void)
583 {
584 	mds_select_mitigation();
585 	taa_select_mitigation();
586 	mmio_select_mitigation();
587 	rfds_select_mitigation();
588 
589 	/*
590 	 * As these mitigations are inter-related and rely on VERW instruction
591 	 * to clear the microarchitural buffers, update and print their status
592 	 * after mitigation selection is done for each of these vulnerabilities.
593 	 */
594 	md_clear_update_mitigation();
595 }
596 
597 #undef pr_fmt
598 #define pr_fmt(fmt)	"SRBDS: " fmt
599 
600 enum srbds_mitigations {
601 	SRBDS_MITIGATION_OFF,
602 	SRBDS_MITIGATION_UCODE_NEEDED,
603 	SRBDS_MITIGATION_FULL,
604 	SRBDS_MITIGATION_TSX_OFF,
605 	SRBDS_MITIGATION_HYPERVISOR,
606 };
607 
608 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
609 
610 static const char * const srbds_strings[] = {
611 	[SRBDS_MITIGATION_OFF]		= "Vulnerable",
612 	[SRBDS_MITIGATION_UCODE_NEEDED]	= "Vulnerable: No microcode",
613 	[SRBDS_MITIGATION_FULL]		= "Mitigation: Microcode",
614 	[SRBDS_MITIGATION_TSX_OFF]	= "Mitigation: TSX disabled",
615 	[SRBDS_MITIGATION_HYPERVISOR]	= "Unknown: Dependent on hypervisor status",
616 };
617 
618 static bool srbds_off;
619 
620 void update_srbds_msr(void)
621 {
622 	u64 mcu_ctrl;
623 
624 	if (!boot_cpu_has_bug(X86_BUG_SRBDS))
625 		return;
626 
627 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
628 		return;
629 
630 	if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
631 		return;
632 
633 	/*
634 	 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
635 	 * being disabled and it hasn't received the SRBDS MSR microcode.
636 	 */
637 	if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
638 		return;
639 
640 	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
641 
642 	switch (srbds_mitigation) {
643 	case SRBDS_MITIGATION_OFF:
644 	case SRBDS_MITIGATION_TSX_OFF:
645 		mcu_ctrl |= RNGDS_MITG_DIS;
646 		break;
647 	case SRBDS_MITIGATION_FULL:
648 		mcu_ctrl &= ~RNGDS_MITG_DIS;
649 		break;
650 	default:
651 		break;
652 	}
653 
654 	wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
655 }
656 
657 static void __init srbds_select_mitigation(void)
658 {
659 	if (!boot_cpu_has_bug(X86_BUG_SRBDS))
660 		return;
661 
662 	/*
663 	 * Check to see if this is one of the MDS_NO systems supporting TSX that
664 	 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
665 	 * by Processor MMIO Stale Data vulnerability.
666 	 */
667 	if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
668 	    !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
669 		srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
670 	else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
671 		srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
672 	else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
673 		srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
674 	else if (cpu_mitigations_off() || srbds_off)
675 		srbds_mitigation = SRBDS_MITIGATION_OFF;
676 
677 	update_srbds_msr();
678 	pr_info("%s\n", srbds_strings[srbds_mitigation]);
679 }
680 
681 static int __init srbds_parse_cmdline(char *str)
682 {
683 	if (!str)
684 		return -EINVAL;
685 
686 	if (!boot_cpu_has_bug(X86_BUG_SRBDS))
687 		return 0;
688 
689 	srbds_off = !strcmp(str, "off");
690 	return 0;
691 }
692 early_param("srbds", srbds_parse_cmdline);
693 
694 #undef pr_fmt
695 #define pr_fmt(fmt)     "L1D Flush : " fmt
696 
697 enum l1d_flush_mitigations {
698 	L1D_FLUSH_OFF = 0,
699 	L1D_FLUSH_ON,
700 };
701 
702 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
703 
704 static void __init l1d_flush_select_mitigation(void)
705 {
706 	if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
707 		return;
708 
709 	static_branch_enable(&switch_mm_cond_l1d_flush);
710 	pr_info("Conditional flush on switch_mm() enabled\n");
711 }
712 
713 static int __init l1d_flush_parse_cmdline(char *str)
714 {
715 	if (!strcmp(str, "on"))
716 		l1d_flush_mitigation = L1D_FLUSH_ON;
717 
718 	return 0;
719 }
720 early_param("l1d_flush", l1d_flush_parse_cmdline);
721 
722 #undef pr_fmt
723 #define pr_fmt(fmt)	"GDS: " fmt
724 
725 enum gds_mitigations {
726 	GDS_MITIGATION_OFF,
727 	GDS_MITIGATION_UCODE_NEEDED,
728 	GDS_MITIGATION_FORCE,
729 	GDS_MITIGATION_FULL,
730 	GDS_MITIGATION_FULL_LOCKED,
731 	GDS_MITIGATION_HYPERVISOR,
732 };
733 
734 #if IS_ENABLED(CONFIG_MITIGATION_GDS_FORCE)
735 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
736 #else
737 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
738 #endif
739 
740 static const char * const gds_strings[] = {
741 	[GDS_MITIGATION_OFF]		= "Vulnerable",
742 	[GDS_MITIGATION_UCODE_NEEDED]	= "Vulnerable: No microcode",
743 	[GDS_MITIGATION_FORCE]		= "Mitigation: AVX disabled, no microcode",
744 	[GDS_MITIGATION_FULL]		= "Mitigation: Microcode",
745 	[GDS_MITIGATION_FULL_LOCKED]	= "Mitigation: Microcode (locked)",
746 	[GDS_MITIGATION_HYPERVISOR]	= "Unknown: Dependent on hypervisor status",
747 };
748 
749 bool gds_ucode_mitigated(void)
750 {
751 	return (gds_mitigation == GDS_MITIGATION_FULL ||
752 		gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
753 }
754 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
755 
756 void update_gds_msr(void)
757 {
758 	u64 mcu_ctrl_after;
759 	u64 mcu_ctrl;
760 
761 	switch (gds_mitigation) {
762 	case GDS_MITIGATION_OFF:
763 		rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
764 		mcu_ctrl |= GDS_MITG_DIS;
765 		break;
766 	case GDS_MITIGATION_FULL_LOCKED:
767 		/*
768 		 * The LOCKED state comes from the boot CPU. APs might not have
769 		 * the same state. Make sure the mitigation is enabled on all
770 		 * CPUs.
771 		 */
772 	case GDS_MITIGATION_FULL:
773 		rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
774 		mcu_ctrl &= ~GDS_MITG_DIS;
775 		break;
776 	case GDS_MITIGATION_FORCE:
777 	case GDS_MITIGATION_UCODE_NEEDED:
778 	case GDS_MITIGATION_HYPERVISOR:
779 		return;
780 	}
781 
782 	wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
783 
784 	/*
785 	 * Check to make sure that the WRMSR value was not ignored. Writes to
786 	 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
787 	 * processor was not.
788 	 */
789 	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
790 	WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
791 }
792 
793 static void __init gds_select_mitigation(void)
794 {
795 	u64 mcu_ctrl;
796 
797 	if (!boot_cpu_has_bug(X86_BUG_GDS))
798 		return;
799 
800 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
801 		gds_mitigation = GDS_MITIGATION_HYPERVISOR;
802 		goto out;
803 	}
804 
805 	if (cpu_mitigations_off())
806 		gds_mitigation = GDS_MITIGATION_OFF;
807 	/* Will verify below that mitigation _can_ be disabled */
808 
809 	/* No microcode */
810 	if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
811 		if (gds_mitigation == GDS_MITIGATION_FORCE) {
812 			/*
813 			 * This only needs to be done on the boot CPU so do it
814 			 * here rather than in update_gds_msr()
815 			 */
816 			setup_clear_cpu_cap(X86_FEATURE_AVX);
817 			pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
818 		} else {
819 			gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
820 		}
821 		goto out;
822 	}
823 
824 	/* Microcode has mitigation, use it */
825 	if (gds_mitigation == GDS_MITIGATION_FORCE)
826 		gds_mitigation = GDS_MITIGATION_FULL;
827 
828 	rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
829 	if (mcu_ctrl & GDS_MITG_LOCKED) {
830 		if (gds_mitigation == GDS_MITIGATION_OFF)
831 			pr_warn("Mitigation locked. Disable failed.\n");
832 
833 		/*
834 		 * The mitigation is selected from the boot CPU. All other CPUs
835 		 * _should_ have the same state. If the boot CPU isn't locked
836 		 * but others are then update_gds_msr() will WARN() of the state
837 		 * mismatch. If the boot CPU is locked update_gds_msr() will
838 		 * ensure the other CPUs have the mitigation enabled.
839 		 */
840 		gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
841 	}
842 
843 	update_gds_msr();
844 out:
845 	pr_info("%s\n", gds_strings[gds_mitigation]);
846 }
847 
848 static int __init gds_parse_cmdline(char *str)
849 {
850 	if (!str)
851 		return -EINVAL;
852 
853 	if (!boot_cpu_has_bug(X86_BUG_GDS))
854 		return 0;
855 
856 	if (!strcmp(str, "off"))
857 		gds_mitigation = GDS_MITIGATION_OFF;
858 	else if (!strcmp(str, "force"))
859 		gds_mitigation = GDS_MITIGATION_FORCE;
860 
861 	return 0;
862 }
863 early_param("gather_data_sampling", gds_parse_cmdline);
864 
865 #undef pr_fmt
866 #define pr_fmt(fmt)     "Spectre V1 : " fmt
867 
868 enum spectre_v1_mitigation {
869 	SPECTRE_V1_MITIGATION_NONE,
870 	SPECTRE_V1_MITIGATION_AUTO,
871 };
872 
873 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
874 	SPECTRE_V1_MITIGATION_AUTO;
875 
876 static const char * const spectre_v1_strings[] = {
877 	[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
878 	[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
879 };
880 
881 /*
882  * Does SMAP provide full mitigation against speculative kernel access to
883  * userspace?
884  */
885 static bool smap_works_speculatively(void)
886 {
887 	if (!boot_cpu_has(X86_FEATURE_SMAP))
888 		return false;
889 
890 	/*
891 	 * On CPUs which are vulnerable to Meltdown, SMAP does not
892 	 * prevent speculative access to user data in the L1 cache.
893 	 * Consider SMAP to be non-functional as a mitigation on these
894 	 * CPUs.
895 	 */
896 	if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
897 		return false;
898 
899 	return true;
900 }
901 
902 static void __init spectre_v1_select_mitigation(void)
903 {
904 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
905 		spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
906 		return;
907 	}
908 
909 	if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
910 		/*
911 		 * With Spectre v1, a user can speculatively control either
912 		 * path of a conditional swapgs with a user-controlled GS
913 		 * value.  The mitigation is to add lfences to both code paths.
914 		 *
915 		 * If FSGSBASE is enabled, the user can put a kernel address in
916 		 * GS, in which case SMAP provides no protection.
917 		 *
918 		 * If FSGSBASE is disabled, the user can only put a user space
919 		 * address in GS.  That makes an attack harder, but still
920 		 * possible if there's no SMAP protection.
921 		 */
922 		if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
923 		    !smap_works_speculatively()) {
924 			/*
925 			 * Mitigation can be provided from SWAPGS itself or
926 			 * PTI as the CR3 write in the Meltdown mitigation
927 			 * is serializing.
928 			 *
929 			 * If neither is there, mitigate with an LFENCE to
930 			 * stop speculation through swapgs.
931 			 */
932 			if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
933 			    !boot_cpu_has(X86_FEATURE_PTI))
934 				setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
935 
936 			/*
937 			 * Enable lfences in the kernel entry (non-swapgs)
938 			 * paths, to prevent user entry from speculatively
939 			 * skipping swapgs.
940 			 */
941 			setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
942 		}
943 	}
944 
945 	pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
946 }
947 
948 static int __init nospectre_v1_cmdline(char *str)
949 {
950 	spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
951 	return 0;
952 }
953 early_param("nospectre_v1", nospectre_v1_cmdline);
954 
955 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
956 
957 #undef pr_fmt
958 #define pr_fmt(fmt)     "RETBleed: " fmt
959 
960 enum retbleed_mitigation {
961 	RETBLEED_MITIGATION_NONE,
962 	RETBLEED_MITIGATION_UNRET,
963 	RETBLEED_MITIGATION_IBPB,
964 	RETBLEED_MITIGATION_IBRS,
965 	RETBLEED_MITIGATION_EIBRS,
966 	RETBLEED_MITIGATION_STUFF,
967 };
968 
969 enum retbleed_mitigation_cmd {
970 	RETBLEED_CMD_OFF,
971 	RETBLEED_CMD_AUTO,
972 	RETBLEED_CMD_UNRET,
973 	RETBLEED_CMD_IBPB,
974 	RETBLEED_CMD_STUFF,
975 };
976 
977 static const char * const retbleed_strings[] = {
978 	[RETBLEED_MITIGATION_NONE]	= "Vulnerable",
979 	[RETBLEED_MITIGATION_UNRET]	= "Mitigation: untrained return thunk",
980 	[RETBLEED_MITIGATION_IBPB]	= "Mitigation: IBPB",
981 	[RETBLEED_MITIGATION_IBRS]	= "Mitigation: IBRS",
982 	[RETBLEED_MITIGATION_EIBRS]	= "Mitigation: Enhanced IBRS",
983 	[RETBLEED_MITIGATION_STUFF]	= "Mitigation: Stuffing",
984 };
985 
986 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
987 	RETBLEED_MITIGATION_NONE;
988 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
989 	RETBLEED_CMD_AUTO;
990 
991 static int __ro_after_init retbleed_nosmt = false;
992 
993 static int __init retbleed_parse_cmdline(char *str)
994 {
995 	if (!str)
996 		return -EINVAL;
997 
998 	while (str) {
999 		char *next = strchr(str, ',');
1000 		if (next) {
1001 			*next = 0;
1002 			next++;
1003 		}
1004 
1005 		if (!strcmp(str, "off")) {
1006 			retbleed_cmd = RETBLEED_CMD_OFF;
1007 		} else if (!strcmp(str, "auto")) {
1008 			retbleed_cmd = RETBLEED_CMD_AUTO;
1009 		} else if (!strcmp(str, "unret")) {
1010 			retbleed_cmd = RETBLEED_CMD_UNRET;
1011 		} else if (!strcmp(str, "ibpb")) {
1012 			retbleed_cmd = RETBLEED_CMD_IBPB;
1013 		} else if (!strcmp(str, "stuff")) {
1014 			retbleed_cmd = RETBLEED_CMD_STUFF;
1015 		} else if (!strcmp(str, "nosmt")) {
1016 			retbleed_nosmt = true;
1017 		} else if (!strcmp(str, "force")) {
1018 			setup_force_cpu_bug(X86_BUG_RETBLEED);
1019 		} else {
1020 			pr_err("Ignoring unknown retbleed option (%s).", str);
1021 		}
1022 
1023 		str = next;
1024 	}
1025 
1026 	return 0;
1027 }
1028 early_param("retbleed", retbleed_parse_cmdline);
1029 
1030 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1031 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1032 
1033 static void __init retbleed_select_mitigation(void)
1034 {
1035 	bool mitigate_smt = false;
1036 
1037 	if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
1038 		return;
1039 
1040 	switch (retbleed_cmd) {
1041 	case RETBLEED_CMD_OFF:
1042 		return;
1043 
1044 	case RETBLEED_CMD_UNRET:
1045 		if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1046 			retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1047 		} else {
1048 			pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1049 			goto do_cmd_auto;
1050 		}
1051 		break;
1052 
1053 	case RETBLEED_CMD_IBPB:
1054 		if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1055 			pr_err("WARNING: CPU does not support IBPB.\n");
1056 			goto do_cmd_auto;
1057 		} else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1058 			retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1059 		} else {
1060 			pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1061 			goto do_cmd_auto;
1062 		}
1063 		break;
1064 
1065 	case RETBLEED_CMD_STUFF:
1066 		if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) &&
1067 		    spectre_v2_enabled == SPECTRE_V2_RETPOLINE) {
1068 			retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1069 
1070 		} else {
1071 			if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))
1072 				pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
1073 			else
1074 				pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1075 
1076 			goto do_cmd_auto;
1077 		}
1078 		break;
1079 
1080 do_cmd_auto:
1081 	case RETBLEED_CMD_AUTO:
1082 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1083 		    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1084 			if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1085 				retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1086 			else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1087 				 boot_cpu_has(X86_FEATURE_IBPB))
1088 				retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1089 		}
1090 
1091 		/*
1092 		 * The Intel mitigation (IBRS or eIBRS) was already selected in
1093 		 * spectre_v2_select_mitigation().  'retbleed_mitigation' will
1094 		 * be set accordingly below.
1095 		 */
1096 
1097 		break;
1098 	}
1099 
1100 	switch (retbleed_mitigation) {
1101 	case RETBLEED_MITIGATION_UNRET:
1102 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1103 		setup_force_cpu_cap(X86_FEATURE_UNRET);
1104 
1105 		x86_return_thunk = retbleed_return_thunk;
1106 
1107 		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1108 		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1109 			pr_err(RETBLEED_UNTRAIN_MSG);
1110 
1111 		mitigate_smt = true;
1112 		break;
1113 
1114 	case RETBLEED_MITIGATION_IBPB:
1115 		setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1116 		setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1117 		mitigate_smt = true;
1118 		break;
1119 
1120 	case RETBLEED_MITIGATION_STUFF:
1121 		setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1122 		setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1123 
1124 		x86_return_thunk = call_depth_return_thunk;
1125 		break;
1126 
1127 	default:
1128 		break;
1129 	}
1130 
1131 	if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1132 	    (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
1133 		cpu_smt_disable(false);
1134 
1135 	/*
1136 	 * Let IBRS trump all on Intel without affecting the effects of the
1137 	 * retbleed= cmdline option except for call depth based stuffing
1138 	 */
1139 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1140 		switch (spectre_v2_enabled) {
1141 		case SPECTRE_V2_IBRS:
1142 			retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1143 			break;
1144 		case SPECTRE_V2_EIBRS:
1145 		case SPECTRE_V2_EIBRS_RETPOLINE:
1146 		case SPECTRE_V2_EIBRS_LFENCE:
1147 			retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1148 			break;
1149 		default:
1150 			if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1151 				pr_err(RETBLEED_INTEL_MSG);
1152 		}
1153 	}
1154 
1155 	pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1156 }
1157 
1158 #undef pr_fmt
1159 #define pr_fmt(fmt)     "Spectre V2 : " fmt
1160 
1161 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1162 	SPECTRE_V2_USER_NONE;
1163 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1164 	SPECTRE_V2_USER_NONE;
1165 
1166 #ifdef CONFIG_MITIGATION_RETPOLINE
1167 static bool spectre_v2_bad_module;
1168 
1169 bool retpoline_module_ok(bool has_retpoline)
1170 {
1171 	if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1172 		return true;
1173 
1174 	pr_err("System may be vulnerable to spectre v2\n");
1175 	spectre_v2_bad_module = true;
1176 	return false;
1177 }
1178 
1179 static inline const char *spectre_v2_module_string(void)
1180 {
1181 	return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1182 }
1183 #else
1184 static inline const char *spectre_v2_module_string(void) { return ""; }
1185 #endif
1186 
1187 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1188 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1189 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1190 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1191 
1192 #ifdef CONFIG_BPF_SYSCALL
1193 void unpriv_ebpf_notify(int new_state)
1194 {
1195 	if (new_state)
1196 		return;
1197 
1198 	/* Unprivileged eBPF is enabled */
1199 
1200 	switch (spectre_v2_enabled) {
1201 	case SPECTRE_V2_EIBRS:
1202 		pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1203 		break;
1204 	case SPECTRE_V2_EIBRS_LFENCE:
1205 		if (sched_smt_active())
1206 			pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1207 		break;
1208 	default:
1209 		break;
1210 	}
1211 }
1212 #endif
1213 
1214 static inline bool match_option(const char *arg, int arglen, const char *opt)
1215 {
1216 	int len = strlen(opt);
1217 
1218 	return len == arglen && !strncmp(arg, opt, len);
1219 }
1220 
1221 /* The kernel command line selection for spectre v2 */
1222 enum spectre_v2_mitigation_cmd {
1223 	SPECTRE_V2_CMD_NONE,
1224 	SPECTRE_V2_CMD_AUTO,
1225 	SPECTRE_V2_CMD_FORCE,
1226 	SPECTRE_V2_CMD_RETPOLINE,
1227 	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1228 	SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1229 	SPECTRE_V2_CMD_EIBRS,
1230 	SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1231 	SPECTRE_V2_CMD_EIBRS_LFENCE,
1232 	SPECTRE_V2_CMD_IBRS,
1233 };
1234 
1235 enum spectre_v2_user_cmd {
1236 	SPECTRE_V2_USER_CMD_NONE,
1237 	SPECTRE_V2_USER_CMD_AUTO,
1238 	SPECTRE_V2_USER_CMD_FORCE,
1239 	SPECTRE_V2_USER_CMD_PRCTL,
1240 	SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1241 	SPECTRE_V2_USER_CMD_SECCOMP,
1242 	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1243 };
1244 
1245 static const char * const spectre_v2_user_strings[] = {
1246 	[SPECTRE_V2_USER_NONE]			= "User space: Vulnerable",
1247 	[SPECTRE_V2_USER_STRICT]		= "User space: Mitigation: STIBP protection",
1248 	[SPECTRE_V2_USER_STRICT_PREFERRED]	= "User space: Mitigation: STIBP always-on protection",
1249 	[SPECTRE_V2_USER_PRCTL]			= "User space: Mitigation: STIBP via prctl",
1250 	[SPECTRE_V2_USER_SECCOMP]		= "User space: Mitigation: STIBP via seccomp and prctl",
1251 };
1252 
1253 static const struct {
1254 	const char			*option;
1255 	enum spectre_v2_user_cmd	cmd;
1256 	bool				secure;
1257 } v2_user_options[] __initconst = {
1258 	{ "auto",		SPECTRE_V2_USER_CMD_AUTO,		false },
1259 	{ "off",		SPECTRE_V2_USER_CMD_NONE,		false },
1260 	{ "on",			SPECTRE_V2_USER_CMD_FORCE,		true  },
1261 	{ "prctl",		SPECTRE_V2_USER_CMD_PRCTL,		false },
1262 	{ "prctl,ibpb",		SPECTRE_V2_USER_CMD_PRCTL_IBPB,		false },
1263 	{ "seccomp",		SPECTRE_V2_USER_CMD_SECCOMP,		false },
1264 	{ "seccomp,ibpb",	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,	false },
1265 };
1266 
1267 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1268 {
1269 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1270 		pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1271 }
1272 
1273 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
1274 
1275 static enum spectre_v2_user_cmd __init
1276 spectre_v2_parse_user_cmdline(void)
1277 {
1278 	char arg[20];
1279 	int ret, i;
1280 
1281 	switch (spectre_v2_cmd) {
1282 	case SPECTRE_V2_CMD_NONE:
1283 		return SPECTRE_V2_USER_CMD_NONE;
1284 	case SPECTRE_V2_CMD_FORCE:
1285 		return SPECTRE_V2_USER_CMD_FORCE;
1286 	default:
1287 		break;
1288 	}
1289 
1290 	ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1291 				  arg, sizeof(arg));
1292 	if (ret < 0)
1293 		return SPECTRE_V2_USER_CMD_AUTO;
1294 
1295 	for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1296 		if (match_option(arg, ret, v2_user_options[i].option)) {
1297 			spec_v2_user_print_cond(v2_user_options[i].option,
1298 						v2_user_options[i].secure);
1299 			return v2_user_options[i].cmd;
1300 		}
1301 	}
1302 
1303 	pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
1304 	return SPECTRE_V2_USER_CMD_AUTO;
1305 }
1306 
1307 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1308 {
1309 	return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1310 }
1311 
1312 static void __init
1313 spectre_v2_user_select_mitigation(void)
1314 {
1315 	enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
1316 	bool smt_possible = IS_ENABLED(CONFIG_SMP);
1317 	enum spectre_v2_user_cmd cmd;
1318 
1319 	if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1320 		return;
1321 
1322 	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
1323 	    cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
1324 		smt_possible = false;
1325 
1326 	cmd = spectre_v2_parse_user_cmdline();
1327 	switch (cmd) {
1328 	case SPECTRE_V2_USER_CMD_NONE:
1329 		goto set_mode;
1330 	case SPECTRE_V2_USER_CMD_FORCE:
1331 		mode = SPECTRE_V2_USER_STRICT;
1332 		break;
1333 	case SPECTRE_V2_USER_CMD_AUTO:
1334 	case SPECTRE_V2_USER_CMD_PRCTL:
1335 	case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1336 		mode = SPECTRE_V2_USER_PRCTL;
1337 		break;
1338 	case SPECTRE_V2_USER_CMD_SECCOMP:
1339 	case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1340 		if (IS_ENABLED(CONFIG_SECCOMP))
1341 			mode = SPECTRE_V2_USER_SECCOMP;
1342 		else
1343 			mode = SPECTRE_V2_USER_PRCTL;
1344 		break;
1345 	}
1346 
1347 	/* Initialize Indirect Branch Prediction Barrier */
1348 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
1349 		setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1350 
1351 		spectre_v2_user_ibpb = mode;
1352 		switch (cmd) {
1353 		case SPECTRE_V2_USER_CMD_NONE:
1354 			break;
1355 		case SPECTRE_V2_USER_CMD_FORCE:
1356 		case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1357 		case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1358 			static_branch_enable(&switch_mm_always_ibpb);
1359 			spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1360 			break;
1361 		case SPECTRE_V2_USER_CMD_PRCTL:
1362 		case SPECTRE_V2_USER_CMD_AUTO:
1363 		case SPECTRE_V2_USER_CMD_SECCOMP:
1364 			static_branch_enable(&switch_mm_cond_ibpb);
1365 			break;
1366 		}
1367 
1368 		pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1369 			static_key_enabled(&switch_mm_always_ibpb) ?
1370 			"always-on" : "conditional");
1371 	}
1372 
1373 	/*
1374 	 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1375 	 * is not required.
1376 	 *
1377 	 * Intel's Enhanced IBRS also protects against cross-thread branch target
1378 	 * injection in user-mode as the IBRS bit remains always set which
1379 	 * implicitly enables cross-thread protections.  However, in legacy IBRS
1380 	 * mode, the IBRS bit is set only on kernel entry and cleared on return
1381 	 * to userspace.  AMD Automatic IBRS also does not protect userspace.
1382 	 * These modes therefore disable the implicit cross-thread protection,
1383 	 * so allow for STIBP to be selected in those cases.
1384 	 */
1385 	if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1386 	    !smt_possible ||
1387 	    (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1388 	     !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
1389 		return;
1390 
1391 	/*
1392 	 * At this point, an STIBP mode other than "off" has been set.
1393 	 * If STIBP support is not being forced, check if STIBP always-on
1394 	 * is preferred.
1395 	 */
1396 	if (mode != SPECTRE_V2_USER_STRICT &&
1397 	    boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1398 		mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1399 
1400 	if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1401 	    retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
1402 		if (mode != SPECTRE_V2_USER_STRICT &&
1403 		    mode != SPECTRE_V2_USER_STRICT_PREFERRED)
1404 			pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1405 		mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1406 	}
1407 
1408 	spectre_v2_user_stibp = mode;
1409 
1410 set_mode:
1411 	pr_info("%s\n", spectre_v2_user_strings[mode]);
1412 }
1413 
1414 static const char * const spectre_v2_strings[] = {
1415 	[SPECTRE_V2_NONE]			= "Vulnerable",
1416 	[SPECTRE_V2_RETPOLINE]			= "Mitigation: Retpolines",
1417 	[SPECTRE_V2_LFENCE]			= "Mitigation: LFENCE",
1418 	[SPECTRE_V2_EIBRS]			= "Mitigation: Enhanced / Automatic IBRS",
1419 	[SPECTRE_V2_EIBRS_LFENCE]		= "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1420 	[SPECTRE_V2_EIBRS_RETPOLINE]		= "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1421 	[SPECTRE_V2_IBRS]			= "Mitigation: IBRS",
1422 };
1423 
1424 static const struct {
1425 	const char *option;
1426 	enum spectre_v2_mitigation_cmd cmd;
1427 	bool secure;
1428 } mitigation_options[] __initconst = {
1429 	{ "off",		SPECTRE_V2_CMD_NONE,		  false },
1430 	{ "on",			SPECTRE_V2_CMD_FORCE,		  true  },
1431 	{ "retpoline",		SPECTRE_V2_CMD_RETPOLINE,	  false },
1432 	{ "retpoline,amd",	SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1433 	{ "retpoline,lfence",	SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1434 	{ "retpoline,generic",	SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1435 	{ "eibrs",		SPECTRE_V2_CMD_EIBRS,		  false },
1436 	{ "eibrs,lfence",	SPECTRE_V2_CMD_EIBRS_LFENCE,	  false },
1437 	{ "eibrs,retpoline",	SPECTRE_V2_CMD_EIBRS_RETPOLINE,	  false },
1438 	{ "auto",		SPECTRE_V2_CMD_AUTO,		  false },
1439 	{ "ibrs",		SPECTRE_V2_CMD_IBRS,              false },
1440 };
1441 
1442 static void __init spec_v2_print_cond(const char *reason, bool secure)
1443 {
1444 	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1445 		pr_info("%s selected on command line.\n", reason);
1446 }
1447 
1448 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1449 {
1450 	enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
1451 	char arg[20];
1452 	int ret, i;
1453 
1454 	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1455 	    cpu_mitigations_off())
1456 		return SPECTRE_V2_CMD_NONE;
1457 
1458 	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1459 	if (ret < 0)
1460 		return SPECTRE_V2_CMD_AUTO;
1461 
1462 	for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1463 		if (!match_option(arg, ret, mitigation_options[i].option))
1464 			continue;
1465 		cmd = mitigation_options[i].cmd;
1466 		break;
1467 	}
1468 
1469 	if (i >= ARRAY_SIZE(mitigation_options)) {
1470 		pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1471 		return SPECTRE_V2_CMD_AUTO;
1472 	}
1473 
1474 	if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1475 	     cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1476 	     cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1477 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1478 	     cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1479 	    !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1480 		pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1481 		       mitigation_options[i].option);
1482 		return SPECTRE_V2_CMD_AUTO;
1483 	}
1484 
1485 	if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1486 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1487 	     cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1488 	    !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1489 		pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
1490 		       mitigation_options[i].option);
1491 		return SPECTRE_V2_CMD_AUTO;
1492 	}
1493 
1494 	if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1495 	     cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1496 	    !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1497 		pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1498 		       mitigation_options[i].option);
1499 		return SPECTRE_V2_CMD_AUTO;
1500 	}
1501 
1502 	if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
1503 		pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1504 		       mitigation_options[i].option);
1505 		return SPECTRE_V2_CMD_AUTO;
1506 	}
1507 
1508 	if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1509 		pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1510 		       mitigation_options[i].option);
1511 		return SPECTRE_V2_CMD_AUTO;
1512 	}
1513 
1514 	if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
1515 		pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1516 		       mitigation_options[i].option);
1517 		return SPECTRE_V2_CMD_AUTO;
1518 	}
1519 
1520 	if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
1521 		pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1522 		       mitigation_options[i].option);
1523 		return SPECTRE_V2_CMD_AUTO;
1524 	}
1525 
1526 	spec_v2_print_cond(mitigation_options[i].option,
1527 			   mitigation_options[i].secure);
1528 	return cmd;
1529 }
1530 
1531 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1532 {
1533 	if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1534 		pr_err("Kernel not compiled with retpoline; no mitigation available!");
1535 		return SPECTRE_V2_NONE;
1536 	}
1537 
1538 	return SPECTRE_V2_RETPOLINE;
1539 }
1540 
1541 static bool __ro_after_init rrsba_disabled;
1542 
1543 /* Disable in-kernel use of non-RSB RET predictors */
1544 static void __init spec_ctrl_disable_kernel_rrsba(void)
1545 {
1546 	if (rrsba_disabled)
1547 		return;
1548 
1549 	if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
1550 		rrsba_disabled = true;
1551 		return;
1552 	}
1553 
1554 	if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1555 		return;
1556 
1557 	x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1558 	update_spec_ctrl(x86_spec_ctrl_base);
1559 	rrsba_disabled = true;
1560 }
1561 
1562 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
1563 {
1564 	/*
1565 	 * Similar to context switches, there are two types of RSB attacks
1566 	 * after VM exit:
1567 	 *
1568 	 * 1) RSB underflow
1569 	 *
1570 	 * 2) Poisoned RSB entry
1571 	 *
1572 	 * When retpoline is enabled, both are mitigated by filling/clearing
1573 	 * the RSB.
1574 	 *
1575 	 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1576 	 * prediction isolation protections, RSB still needs to be cleared
1577 	 * because of #2.  Note that SMEP provides no protection here, unlike
1578 	 * user-space-poisoned RSB entries.
1579 	 *
1580 	 * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1581 	 * bug is present then a LITE version of RSB protection is required,
1582 	 * just a single call needs to retire before a RET is executed.
1583 	 */
1584 	switch (mode) {
1585 	case SPECTRE_V2_NONE:
1586 		return;
1587 
1588 	case SPECTRE_V2_EIBRS_LFENCE:
1589 	case SPECTRE_V2_EIBRS:
1590 		if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
1591 			setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
1592 			pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1593 		}
1594 		return;
1595 
1596 	case SPECTRE_V2_EIBRS_RETPOLINE:
1597 	case SPECTRE_V2_RETPOLINE:
1598 	case SPECTRE_V2_LFENCE:
1599 	case SPECTRE_V2_IBRS:
1600 		setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1601 		pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1602 		return;
1603 	}
1604 
1605 	pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1606 	dump_stack();
1607 }
1608 
1609 /*
1610  * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
1611  * branch history in userspace. Not needed if BHI_NO is set.
1612  */
1613 static bool __init spec_ctrl_bhi_dis(void)
1614 {
1615 	if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
1616 		return false;
1617 
1618 	x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
1619 	update_spec_ctrl(x86_spec_ctrl_base);
1620 	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
1621 
1622 	return true;
1623 }
1624 
1625 enum bhi_mitigations {
1626 	BHI_MITIGATION_OFF,
1627 	BHI_MITIGATION_ON,
1628 	BHI_MITIGATION_VMEXIT_ONLY,
1629 };
1630 
1631 static enum bhi_mitigations bhi_mitigation __ro_after_init =
1632 	IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
1633 
1634 static int __init spectre_bhi_parse_cmdline(char *str)
1635 {
1636 	if (!str)
1637 		return -EINVAL;
1638 
1639 	if (!strcmp(str, "off"))
1640 		bhi_mitigation = BHI_MITIGATION_OFF;
1641 	else if (!strcmp(str, "on"))
1642 		bhi_mitigation = BHI_MITIGATION_ON;
1643 	else if (!strcmp(str, "vmexit"))
1644 		bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
1645 	else
1646 		pr_err("Ignoring unknown spectre_bhi option (%s)", str);
1647 
1648 	return 0;
1649 }
1650 early_param("spectre_bhi", spectre_bhi_parse_cmdline);
1651 
1652 static void __init bhi_select_mitigation(void)
1653 {
1654 	if (bhi_mitigation == BHI_MITIGATION_OFF)
1655 		return;
1656 
1657 	/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
1658 	if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
1659 	    !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
1660 		spec_ctrl_disable_kernel_rrsba();
1661 		if (rrsba_disabled)
1662 			return;
1663 	}
1664 
1665 	/* Mitigate in hardware if supported */
1666 	if (spec_ctrl_bhi_dis())
1667 		return;
1668 
1669 	if (!IS_ENABLED(CONFIG_X86_64))
1670 		return;
1671 
1672 	if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
1673 		pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
1674 		setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
1675 		return;
1676 	}
1677 
1678 	pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
1679 	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
1680 	setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
1681 }
1682 
1683 static void __init spectre_v2_select_mitigation(void)
1684 {
1685 	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
1686 	enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
1687 
1688 	/*
1689 	 * If the CPU is not affected and the command line mode is NONE or AUTO
1690 	 * then nothing to do.
1691 	 */
1692 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
1693 	    (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
1694 		return;
1695 
1696 	switch (cmd) {
1697 	case SPECTRE_V2_CMD_NONE:
1698 		return;
1699 
1700 	case SPECTRE_V2_CMD_FORCE:
1701 	case SPECTRE_V2_CMD_AUTO:
1702 		if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1703 			mode = SPECTRE_V2_EIBRS;
1704 			break;
1705 		}
1706 
1707 		if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
1708 		    boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1709 		    retbleed_cmd != RETBLEED_CMD_OFF &&
1710 		    retbleed_cmd != RETBLEED_CMD_STUFF &&
1711 		    boot_cpu_has(X86_FEATURE_IBRS) &&
1712 		    boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1713 			mode = SPECTRE_V2_IBRS;
1714 			break;
1715 		}
1716 
1717 		mode = spectre_v2_select_retpoline();
1718 		break;
1719 
1720 	case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1721 		pr_err(SPECTRE_V2_LFENCE_MSG);
1722 		mode = SPECTRE_V2_LFENCE;
1723 		break;
1724 
1725 	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
1726 		mode = SPECTRE_V2_RETPOLINE;
1727 		break;
1728 
1729 	case SPECTRE_V2_CMD_RETPOLINE:
1730 		mode = spectre_v2_select_retpoline();
1731 		break;
1732 
1733 	case SPECTRE_V2_CMD_IBRS:
1734 		mode = SPECTRE_V2_IBRS;
1735 		break;
1736 
1737 	case SPECTRE_V2_CMD_EIBRS:
1738 		mode = SPECTRE_V2_EIBRS;
1739 		break;
1740 
1741 	case SPECTRE_V2_CMD_EIBRS_LFENCE:
1742 		mode = SPECTRE_V2_EIBRS_LFENCE;
1743 		break;
1744 
1745 	case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1746 		mode = SPECTRE_V2_EIBRS_RETPOLINE;
1747 		break;
1748 	}
1749 
1750 	if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1751 		pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1752 
1753 	if (spectre_v2_in_ibrs_mode(mode)) {
1754 		if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
1755 			msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
1756 		} else {
1757 			x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1758 			update_spec_ctrl(x86_spec_ctrl_base);
1759 		}
1760 	}
1761 
1762 	switch (mode) {
1763 	case SPECTRE_V2_NONE:
1764 	case SPECTRE_V2_EIBRS:
1765 		break;
1766 
1767 	case SPECTRE_V2_IBRS:
1768 		setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
1769 		if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1770 			pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
1771 		break;
1772 
1773 	case SPECTRE_V2_LFENCE:
1774 	case SPECTRE_V2_EIBRS_LFENCE:
1775 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1776 		fallthrough;
1777 
1778 	case SPECTRE_V2_RETPOLINE:
1779 	case SPECTRE_V2_EIBRS_RETPOLINE:
1780 		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1781 		break;
1782 	}
1783 
1784 	/*
1785 	 * Disable alternate RSB predictions in kernel when indirect CALLs and
1786 	 * JMPs gets protection against BHI and Intramode-BTI, but RET
1787 	 * prediction from a non-RSB predictor is still a risk.
1788 	 */
1789 	if (mode == SPECTRE_V2_EIBRS_LFENCE ||
1790 	    mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1791 	    mode == SPECTRE_V2_RETPOLINE)
1792 		spec_ctrl_disable_kernel_rrsba();
1793 
1794 	if (boot_cpu_has(X86_BUG_BHI))
1795 		bhi_select_mitigation();
1796 
1797 	spectre_v2_enabled = mode;
1798 	pr_info("%s\n", spectre_v2_strings[mode]);
1799 
1800 	/*
1801 	 * If Spectre v2 protection has been enabled, fill the RSB during a
1802 	 * context switch.  In general there are two types of RSB attacks
1803 	 * across context switches, for which the CALLs/RETs may be unbalanced.
1804 	 *
1805 	 * 1) RSB underflow
1806 	 *
1807 	 *    Some Intel parts have "bottomless RSB".  When the RSB is empty,
1808 	 *    speculated return targets may come from the branch predictor,
1809 	 *    which could have a user-poisoned BTB or BHB entry.
1810 	 *
1811 	 *    AMD has it even worse: *all* returns are speculated from the BTB,
1812 	 *    regardless of the state of the RSB.
1813 	 *
1814 	 *    When IBRS or eIBRS is enabled, the "user -> kernel" attack
1815 	 *    scenario is mitigated by the IBRS branch prediction isolation
1816 	 *    properties, so the RSB buffer filling wouldn't be necessary to
1817 	 *    protect against this type of attack.
1818 	 *
1819 	 *    The "user -> user" attack scenario is mitigated by RSB filling.
1820 	 *
1821 	 * 2) Poisoned RSB entry
1822 	 *
1823 	 *    If the 'next' in-kernel return stack is shorter than 'prev',
1824 	 *    'next' could be tricked into speculating with a user-poisoned RSB
1825 	 *    entry.
1826 	 *
1827 	 *    The "user -> kernel" attack scenario is mitigated by SMEP and
1828 	 *    eIBRS.
1829 	 *
1830 	 *    The "user -> user" scenario, also known as SpectreBHB, requires
1831 	 *    RSB clearing.
1832 	 *
1833 	 * So to mitigate all cases, unconditionally fill RSB on context
1834 	 * switches.
1835 	 *
1836 	 * FIXME: Is this pointless for retbleed-affected AMD?
1837 	 */
1838 	setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1839 	pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1840 
1841 	spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
1842 
1843 	/*
1844 	 * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
1845 	 * and Enhanced IBRS protect firmware too, so enable IBRS around
1846 	 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
1847 	 * otherwise enabled.
1848 	 *
1849 	 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1850 	 * the user might select retpoline on the kernel command line and if
1851 	 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1852 	 * enable IBRS around firmware calls.
1853 	 */
1854 	if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1855 	    boot_cpu_has(X86_FEATURE_IBPB) &&
1856 	    (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1857 	     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
1858 
1859 		if (retbleed_cmd != RETBLEED_CMD_IBPB) {
1860 			setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
1861 			pr_info("Enabling Speculation Barrier for firmware calls\n");
1862 		}
1863 
1864 	} else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
1865 		setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1866 		pr_info("Enabling Restricted Speculation for firmware calls\n");
1867 	}
1868 
1869 	/* Set up IBPB and STIBP depending on the general spectre V2 command */
1870 	spectre_v2_cmd = cmd;
1871 }
1872 
1873 static void update_stibp_msr(void * __unused)
1874 {
1875 	u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
1876 	update_spec_ctrl(val);
1877 }
1878 
1879 /* Update x86_spec_ctrl_base in case SMT state changed. */
1880 static void update_stibp_strict(void)
1881 {
1882 	u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1883 
1884 	if (sched_smt_active())
1885 		mask |= SPEC_CTRL_STIBP;
1886 
1887 	if (mask == x86_spec_ctrl_base)
1888 		return;
1889 
1890 	pr_info("Update user space SMT mitigation: STIBP %s\n",
1891 		mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1892 	x86_spec_ctrl_base = mask;
1893 	on_each_cpu(update_stibp_msr, NULL, 1);
1894 }
1895 
1896 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1897 static void update_indir_branch_cond(void)
1898 {
1899 	if (sched_smt_active())
1900 		static_branch_enable(&switch_to_cond_stibp);
1901 	else
1902 		static_branch_disable(&switch_to_cond_stibp);
1903 }
1904 
1905 #undef pr_fmt
1906 #define pr_fmt(fmt) fmt
1907 
1908 /* Update the static key controlling the MDS CPU buffer clear in idle */
1909 static void update_mds_branch_idle(void)
1910 {
1911 	/*
1912 	 * Enable the idle clearing if SMT is active on CPUs which are
1913 	 * affected only by MSBDS and not any other MDS variant.
1914 	 *
1915 	 * The other variants cannot be mitigated when SMT is enabled, so
1916 	 * clearing the buffers on idle just to prevent the Store Buffer
1917 	 * repartitioning leak would be a window dressing exercise.
1918 	 */
1919 	if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1920 		return;
1921 
1922 	if (sched_smt_active()) {
1923 		static_branch_enable(&mds_idle_clear);
1924 	} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1925 		   (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
1926 		static_branch_disable(&mds_idle_clear);
1927 	}
1928 }
1929 
1930 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1931 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1932 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1933 
1934 void cpu_bugs_smt_update(void)
1935 {
1936 	mutex_lock(&spec_ctrl_mutex);
1937 
1938 	if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1939 	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1940 		pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1941 
1942 	switch (spectre_v2_user_stibp) {
1943 	case SPECTRE_V2_USER_NONE:
1944 		break;
1945 	case SPECTRE_V2_USER_STRICT:
1946 	case SPECTRE_V2_USER_STRICT_PREFERRED:
1947 		update_stibp_strict();
1948 		break;
1949 	case SPECTRE_V2_USER_PRCTL:
1950 	case SPECTRE_V2_USER_SECCOMP:
1951 		update_indir_branch_cond();
1952 		break;
1953 	}
1954 
1955 	switch (mds_mitigation) {
1956 	case MDS_MITIGATION_FULL:
1957 	case MDS_MITIGATION_VMWERV:
1958 		if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1959 			pr_warn_once(MDS_MSG_SMT);
1960 		update_mds_branch_idle();
1961 		break;
1962 	case MDS_MITIGATION_OFF:
1963 		break;
1964 	}
1965 
1966 	switch (taa_mitigation) {
1967 	case TAA_MITIGATION_VERW:
1968 	case TAA_MITIGATION_UCODE_NEEDED:
1969 		if (sched_smt_active())
1970 			pr_warn_once(TAA_MSG_SMT);
1971 		break;
1972 	case TAA_MITIGATION_TSX_DISABLED:
1973 	case TAA_MITIGATION_OFF:
1974 		break;
1975 	}
1976 
1977 	switch (mmio_mitigation) {
1978 	case MMIO_MITIGATION_VERW:
1979 	case MMIO_MITIGATION_UCODE_NEEDED:
1980 		if (sched_smt_active())
1981 			pr_warn_once(MMIO_MSG_SMT);
1982 		break;
1983 	case MMIO_MITIGATION_OFF:
1984 		break;
1985 	}
1986 
1987 	mutex_unlock(&spec_ctrl_mutex);
1988 }
1989 
1990 #undef pr_fmt
1991 #define pr_fmt(fmt)	"Speculative Store Bypass: " fmt
1992 
1993 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1994 
1995 /* The kernel command line selection */
1996 enum ssb_mitigation_cmd {
1997 	SPEC_STORE_BYPASS_CMD_NONE,
1998 	SPEC_STORE_BYPASS_CMD_AUTO,
1999 	SPEC_STORE_BYPASS_CMD_ON,
2000 	SPEC_STORE_BYPASS_CMD_PRCTL,
2001 	SPEC_STORE_BYPASS_CMD_SECCOMP,
2002 };
2003 
2004 static const char * const ssb_strings[] = {
2005 	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
2006 	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
2007 	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl",
2008 	[SPEC_STORE_BYPASS_SECCOMP]	= "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2009 };
2010 
2011 static const struct {
2012 	const char *option;
2013 	enum ssb_mitigation_cmd cmd;
2014 } ssb_mitigation_options[]  __initconst = {
2015 	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
2016 	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
2017 	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
2018 	{ "prctl",	SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
2019 	{ "seccomp",	SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
2020 };
2021 
2022 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
2023 {
2024 	enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
2025 	char arg[20];
2026 	int ret, i;
2027 
2028 	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
2029 	    cpu_mitigations_off()) {
2030 		return SPEC_STORE_BYPASS_CMD_NONE;
2031 	} else {
2032 		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
2033 					  arg, sizeof(arg));
2034 		if (ret < 0)
2035 			return SPEC_STORE_BYPASS_CMD_AUTO;
2036 
2037 		for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
2038 			if (!match_option(arg, ret, ssb_mitigation_options[i].option))
2039 				continue;
2040 
2041 			cmd = ssb_mitigation_options[i].cmd;
2042 			break;
2043 		}
2044 
2045 		if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
2046 			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
2047 			return SPEC_STORE_BYPASS_CMD_AUTO;
2048 		}
2049 	}
2050 
2051 	return cmd;
2052 }
2053 
2054 static enum ssb_mitigation __init __ssb_select_mitigation(void)
2055 {
2056 	enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
2057 	enum ssb_mitigation_cmd cmd;
2058 
2059 	if (!boot_cpu_has(X86_FEATURE_SSBD))
2060 		return mode;
2061 
2062 	cmd = ssb_parse_cmdline();
2063 	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
2064 	    (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
2065 	     cmd == SPEC_STORE_BYPASS_CMD_AUTO))
2066 		return mode;
2067 
2068 	switch (cmd) {
2069 	case SPEC_STORE_BYPASS_CMD_SECCOMP:
2070 		/*
2071 		 * Choose prctl+seccomp as the default mode if seccomp is
2072 		 * enabled.
2073 		 */
2074 		if (IS_ENABLED(CONFIG_SECCOMP))
2075 			mode = SPEC_STORE_BYPASS_SECCOMP;
2076 		else
2077 			mode = SPEC_STORE_BYPASS_PRCTL;
2078 		break;
2079 	case SPEC_STORE_BYPASS_CMD_ON:
2080 		mode = SPEC_STORE_BYPASS_DISABLE;
2081 		break;
2082 	case SPEC_STORE_BYPASS_CMD_AUTO:
2083 	case SPEC_STORE_BYPASS_CMD_PRCTL:
2084 		mode = SPEC_STORE_BYPASS_PRCTL;
2085 		break;
2086 	case SPEC_STORE_BYPASS_CMD_NONE:
2087 		break;
2088 	}
2089 
2090 	/*
2091 	 * We have three CPU feature flags that are in play here:
2092 	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2093 	 *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2094 	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2095 	 */
2096 	if (mode == SPEC_STORE_BYPASS_DISABLE) {
2097 		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2098 		/*
2099 		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2100 		 * use a completely different MSR and bit dependent on family.
2101 		 */
2102 		if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2103 		    !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2104 			x86_amd_ssb_disable();
2105 		} else {
2106 			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2107 			update_spec_ctrl(x86_spec_ctrl_base);
2108 		}
2109 	}
2110 
2111 	return mode;
2112 }
2113 
2114 static void ssb_select_mitigation(void)
2115 {
2116 	ssb_mode = __ssb_select_mitigation();
2117 
2118 	if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2119 		pr_info("%s\n", ssb_strings[ssb_mode]);
2120 }
2121 
2122 #undef pr_fmt
2123 #define pr_fmt(fmt)     "Speculation prctl: " fmt
2124 
2125 static void task_update_spec_tif(struct task_struct *tsk)
2126 {
2127 	/* Force the update of the real TIF bits */
2128 	set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2129 
2130 	/*
2131 	 * Immediately update the speculation control MSRs for the current
2132 	 * task, but for a non-current task delay setting the CPU
2133 	 * mitigation until it is scheduled next.
2134 	 *
2135 	 * This can only happen for SECCOMP mitigation. For PRCTL it's
2136 	 * always the current task.
2137 	 */
2138 	if (tsk == current)
2139 		speculation_ctrl_update_current();
2140 }
2141 
2142 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2143 {
2144 
2145 	if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2146 		return -EPERM;
2147 
2148 	switch (ctrl) {
2149 	case PR_SPEC_ENABLE:
2150 		set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2151 		return 0;
2152 	case PR_SPEC_DISABLE:
2153 		clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2154 		return 0;
2155 	default:
2156 		return -ERANGE;
2157 	}
2158 }
2159 
2160 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2161 {
2162 	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2163 	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2164 		return -ENXIO;
2165 
2166 	switch (ctrl) {
2167 	case PR_SPEC_ENABLE:
2168 		/* If speculation is force disabled, enable is not allowed */
2169 		if (task_spec_ssb_force_disable(task))
2170 			return -EPERM;
2171 		task_clear_spec_ssb_disable(task);
2172 		task_clear_spec_ssb_noexec(task);
2173 		task_update_spec_tif(task);
2174 		break;
2175 	case PR_SPEC_DISABLE:
2176 		task_set_spec_ssb_disable(task);
2177 		task_clear_spec_ssb_noexec(task);
2178 		task_update_spec_tif(task);
2179 		break;
2180 	case PR_SPEC_FORCE_DISABLE:
2181 		task_set_spec_ssb_disable(task);
2182 		task_set_spec_ssb_force_disable(task);
2183 		task_clear_spec_ssb_noexec(task);
2184 		task_update_spec_tif(task);
2185 		break;
2186 	case PR_SPEC_DISABLE_NOEXEC:
2187 		if (task_spec_ssb_force_disable(task))
2188 			return -EPERM;
2189 		task_set_spec_ssb_disable(task);
2190 		task_set_spec_ssb_noexec(task);
2191 		task_update_spec_tif(task);
2192 		break;
2193 	default:
2194 		return -ERANGE;
2195 	}
2196 	return 0;
2197 }
2198 
2199 static bool is_spec_ib_user_controlled(void)
2200 {
2201 	return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2202 		spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2203 		spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2204 		spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2205 }
2206 
2207 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2208 {
2209 	switch (ctrl) {
2210 	case PR_SPEC_ENABLE:
2211 		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2212 		    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2213 			return 0;
2214 
2215 		/*
2216 		 * With strict mode for both IBPB and STIBP, the instruction
2217 		 * code paths avoid checking this task flag and instead,
2218 		 * unconditionally run the instruction. However, STIBP and IBPB
2219 		 * are independent and either can be set to conditionally
2220 		 * enabled regardless of the mode of the other.
2221 		 *
2222 		 * If either is set to conditional, allow the task flag to be
2223 		 * updated, unless it was force-disabled by a previous prctl
2224 		 * call. Currently, this is possible on an AMD CPU which has the
2225 		 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2226 		 * kernel is booted with 'spectre_v2_user=seccomp', then
2227 		 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2228 		 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2229 		 */
2230 		if (!is_spec_ib_user_controlled() ||
2231 		    task_spec_ib_force_disable(task))
2232 			return -EPERM;
2233 
2234 		task_clear_spec_ib_disable(task);
2235 		task_update_spec_tif(task);
2236 		break;
2237 	case PR_SPEC_DISABLE:
2238 	case PR_SPEC_FORCE_DISABLE:
2239 		/*
2240 		 * Indirect branch speculation is always allowed when
2241 		 * mitigation is force disabled.
2242 		 */
2243 		if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2244 		    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2245 			return -EPERM;
2246 
2247 		if (!is_spec_ib_user_controlled())
2248 			return 0;
2249 
2250 		task_set_spec_ib_disable(task);
2251 		if (ctrl == PR_SPEC_FORCE_DISABLE)
2252 			task_set_spec_ib_force_disable(task);
2253 		task_update_spec_tif(task);
2254 		if (task == current)
2255 			indirect_branch_prediction_barrier();
2256 		break;
2257 	default:
2258 		return -ERANGE;
2259 	}
2260 	return 0;
2261 }
2262 
2263 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2264 			     unsigned long ctrl)
2265 {
2266 	switch (which) {
2267 	case PR_SPEC_STORE_BYPASS:
2268 		return ssb_prctl_set(task, ctrl);
2269 	case PR_SPEC_INDIRECT_BRANCH:
2270 		return ib_prctl_set(task, ctrl);
2271 	case PR_SPEC_L1D_FLUSH:
2272 		return l1d_flush_prctl_set(task, ctrl);
2273 	default:
2274 		return -ENODEV;
2275 	}
2276 }
2277 
2278 #ifdef CONFIG_SECCOMP
2279 void arch_seccomp_spec_mitigate(struct task_struct *task)
2280 {
2281 	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2282 		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2283 	if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2284 	    spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2285 		ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2286 }
2287 #endif
2288 
2289 static int l1d_flush_prctl_get(struct task_struct *task)
2290 {
2291 	if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2292 		return PR_SPEC_FORCE_DISABLE;
2293 
2294 	if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2295 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2296 	else
2297 		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2298 }
2299 
2300 static int ssb_prctl_get(struct task_struct *task)
2301 {
2302 	switch (ssb_mode) {
2303 	case SPEC_STORE_BYPASS_NONE:
2304 		if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2305 			return PR_SPEC_ENABLE;
2306 		return PR_SPEC_NOT_AFFECTED;
2307 	case SPEC_STORE_BYPASS_DISABLE:
2308 		return PR_SPEC_DISABLE;
2309 	case SPEC_STORE_BYPASS_SECCOMP:
2310 	case SPEC_STORE_BYPASS_PRCTL:
2311 		if (task_spec_ssb_force_disable(task))
2312 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2313 		if (task_spec_ssb_noexec(task))
2314 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2315 		if (task_spec_ssb_disable(task))
2316 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2317 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2318 	}
2319 	BUG();
2320 }
2321 
2322 static int ib_prctl_get(struct task_struct *task)
2323 {
2324 	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2325 		return PR_SPEC_NOT_AFFECTED;
2326 
2327 	if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2328 	    spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2329 		return PR_SPEC_ENABLE;
2330 	else if (is_spec_ib_user_controlled()) {
2331 		if (task_spec_ib_force_disable(task))
2332 			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2333 		if (task_spec_ib_disable(task))
2334 			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2335 		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2336 	} else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2337 	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2338 	    spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2339 		return PR_SPEC_DISABLE;
2340 	else
2341 		return PR_SPEC_NOT_AFFECTED;
2342 }
2343 
2344 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2345 {
2346 	switch (which) {
2347 	case PR_SPEC_STORE_BYPASS:
2348 		return ssb_prctl_get(task);
2349 	case PR_SPEC_INDIRECT_BRANCH:
2350 		return ib_prctl_get(task);
2351 	case PR_SPEC_L1D_FLUSH:
2352 		return l1d_flush_prctl_get(task);
2353 	default:
2354 		return -ENODEV;
2355 	}
2356 }
2357 
2358 void x86_spec_ctrl_setup_ap(void)
2359 {
2360 	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2361 		update_spec_ctrl(x86_spec_ctrl_base);
2362 
2363 	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2364 		x86_amd_ssb_disable();
2365 }
2366 
2367 bool itlb_multihit_kvm_mitigation;
2368 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2369 
2370 #undef pr_fmt
2371 #define pr_fmt(fmt)	"L1TF: " fmt
2372 
2373 /* Default mitigation for L1TF-affected CPUs */
2374 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
2375 #if IS_ENABLED(CONFIG_KVM_INTEL)
2376 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2377 #endif
2378 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2379 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2380 
2381 /*
2382  * These CPUs all support 44bits physical address space internally in the
2383  * cache but CPUID can report a smaller number of physical address bits.
2384  *
2385  * The L1TF mitigation uses the top most address bit for the inversion of
2386  * non present PTEs. When the installed memory reaches into the top most
2387  * address bit due to memory holes, which has been observed on machines
2388  * which report 36bits physical address bits and have 32G RAM installed,
2389  * then the mitigation range check in l1tf_select_mitigation() triggers.
2390  * This is a false positive because the mitigation is still possible due to
2391  * the fact that the cache uses 44bit internally. Use the cache bits
2392  * instead of the reported physical bits and adjust them on the affected
2393  * machines to 44bit if the reported bits are less than 44.
2394  */
2395 static void override_cache_bits(struct cpuinfo_x86 *c)
2396 {
2397 	if (c->x86 != 6)
2398 		return;
2399 
2400 	switch (c->x86_vfm) {
2401 	case INTEL_NEHALEM:
2402 	case INTEL_WESTMERE:
2403 	case INTEL_SANDYBRIDGE:
2404 	case INTEL_IVYBRIDGE:
2405 	case INTEL_HASWELL:
2406 	case INTEL_HASWELL_L:
2407 	case INTEL_HASWELL_G:
2408 	case INTEL_BROADWELL:
2409 	case INTEL_BROADWELL_G:
2410 	case INTEL_SKYLAKE_L:
2411 	case INTEL_SKYLAKE:
2412 	case INTEL_KABYLAKE_L:
2413 	case INTEL_KABYLAKE:
2414 		if (c->x86_cache_bits < 44)
2415 			c->x86_cache_bits = 44;
2416 		break;
2417 	}
2418 }
2419 
2420 static void __init l1tf_select_mitigation(void)
2421 {
2422 	u64 half_pa;
2423 
2424 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
2425 		return;
2426 
2427 	if (cpu_mitigations_off())
2428 		l1tf_mitigation = L1TF_MITIGATION_OFF;
2429 	else if (cpu_mitigations_auto_nosmt())
2430 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2431 
2432 	override_cache_bits(&boot_cpu_data);
2433 
2434 	switch (l1tf_mitigation) {
2435 	case L1TF_MITIGATION_OFF:
2436 	case L1TF_MITIGATION_FLUSH_NOWARN:
2437 	case L1TF_MITIGATION_FLUSH:
2438 		break;
2439 	case L1TF_MITIGATION_FLUSH_NOSMT:
2440 	case L1TF_MITIGATION_FULL:
2441 		cpu_smt_disable(false);
2442 		break;
2443 	case L1TF_MITIGATION_FULL_FORCE:
2444 		cpu_smt_disable(true);
2445 		break;
2446 	}
2447 
2448 #if CONFIG_PGTABLE_LEVELS == 2
2449 	pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2450 	return;
2451 #endif
2452 
2453 	half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2454 	if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2455 			e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2456 		pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2457 		pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2458 				half_pa);
2459 		pr_info("However, doing so will make a part of your RAM unusable.\n");
2460 		pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2461 		return;
2462 	}
2463 
2464 	setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2465 }
2466 
2467 static int __init l1tf_cmdline(char *str)
2468 {
2469 	if (!boot_cpu_has_bug(X86_BUG_L1TF))
2470 		return 0;
2471 
2472 	if (!str)
2473 		return -EINVAL;
2474 
2475 	if (!strcmp(str, "off"))
2476 		l1tf_mitigation = L1TF_MITIGATION_OFF;
2477 	else if (!strcmp(str, "flush,nowarn"))
2478 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2479 	else if (!strcmp(str, "flush"))
2480 		l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2481 	else if (!strcmp(str, "flush,nosmt"))
2482 		l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2483 	else if (!strcmp(str, "full"))
2484 		l1tf_mitigation = L1TF_MITIGATION_FULL;
2485 	else if (!strcmp(str, "full,force"))
2486 		l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2487 
2488 	return 0;
2489 }
2490 early_param("l1tf", l1tf_cmdline);
2491 
2492 #undef pr_fmt
2493 #define pr_fmt(fmt)	"Speculative Return Stack Overflow: " fmt
2494 
2495 enum srso_mitigation {
2496 	SRSO_MITIGATION_NONE,
2497 	SRSO_MITIGATION_UCODE_NEEDED,
2498 	SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
2499 	SRSO_MITIGATION_MICROCODE,
2500 	SRSO_MITIGATION_SAFE_RET,
2501 	SRSO_MITIGATION_IBPB,
2502 	SRSO_MITIGATION_IBPB_ON_VMEXIT,
2503 };
2504 
2505 enum srso_mitigation_cmd {
2506 	SRSO_CMD_OFF,
2507 	SRSO_CMD_MICROCODE,
2508 	SRSO_CMD_SAFE_RET,
2509 	SRSO_CMD_IBPB,
2510 	SRSO_CMD_IBPB_ON_VMEXIT,
2511 };
2512 
2513 static const char * const srso_strings[] = {
2514 	[SRSO_MITIGATION_NONE]			= "Vulnerable",
2515 	[SRSO_MITIGATION_UCODE_NEEDED]		= "Vulnerable: No microcode",
2516 	[SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED]	= "Vulnerable: Safe RET, no microcode",
2517 	[SRSO_MITIGATION_MICROCODE]		= "Vulnerable: Microcode, no safe RET",
2518 	[SRSO_MITIGATION_SAFE_RET]		= "Mitigation: Safe RET",
2519 	[SRSO_MITIGATION_IBPB]			= "Mitigation: IBPB",
2520 	[SRSO_MITIGATION_IBPB_ON_VMEXIT]	= "Mitigation: IBPB on VMEXIT only"
2521 };
2522 
2523 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
2524 static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
2525 
2526 static int __init srso_parse_cmdline(char *str)
2527 {
2528 	if (!str)
2529 		return -EINVAL;
2530 
2531 	if (!strcmp(str, "off"))
2532 		srso_cmd = SRSO_CMD_OFF;
2533 	else if (!strcmp(str, "microcode"))
2534 		srso_cmd = SRSO_CMD_MICROCODE;
2535 	else if (!strcmp(str, "safe-ret"))
2536 		srso_cmd = SRSO_CMD_SAFE_RET;
2537 	else if (!strcmp(str, "ibpb"))
2538 		srso_cmd = SRSO_CMD_IBPB;
2539 	else if (!strcmp(str, "ibpb-vmexit"))
2540 		srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
2541 	else
2542 		pr_err("Ignoring unknown SRSO option (%s).", str);
2543 
2544 	return 0;
2545 }
2546 early_param("spec_rstack_overflow", srso_parse_cmdline);
2547 
2548 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2549 
2550 static void __init srso_select_mitigation(void)
2551 {
2552 	bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
2553 
2554 	if (cpu_mitigations_off())
2555 		return;
2556 
2557 	if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
2558 		if (boot_cpu_has(X86_FEATURE_SBPB))
2559 			x86_pred_cmd = PRED_CMD_SBPB;
2560 		return;
2561 	}
2562 
2563 	if (has_microcode) {
2564 		/*
2565 		 * Zen1/2 with SMT off aren't vulnerable after the right
2566 		 * IBPB microcode has been applied.
2567 		 *
2568 		 * Zen1/2 don't have SBPB, no need to try to enable it here.
2569 		 */
2570 		if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
2571 			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
2572 			return;
2573 		}
2574 
2575 		if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2576 			srso_mitigation = SRSO_MITIGATION_IBPB;
2577 			goto out;
2578 		}
2579 	} else {
2580 		pr_warn("IBPB-extending microcode not applied!\n");
2581 		pr_warn(SRSO_NOTICE);
2582 
2583 		/* may be overwritten by SRSO_CMD_SAFE_RET below */
2584 		srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
2585 	}
2586 
2587 	switch (srso_cmd) {
2588 	case SRSO_CMD_OFF:
2589 		if (boot_cpu_has(X86_FEATURE_SBPB))
2590 			x86_pred_cmd = PRED_CMD_SBPB;
2591 		return;
2592 
2593 	case SRSO_CMD_MICROCODE:
2594 		if (has_microcode) {
2595 			srso_mitigation = SRSO_MITIGATION_MICROCODE;
2596 			pr_warn(SRSO_NOTICE);
2597 		}
2598 		break;
2599 
2600 	case SRSO_CMD_SAFE_RET:
2601 		if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
2602 			/*
2603 			 * Enable the return thunk for generated code
2604 			 * like ftrace, static_call, etc.
2605 			 */
2606 			setup_force_cpu_cap(X86_FEATURE_RETHUNK);
2607 			setup_force_cpu_cap(X86_FEATURE_UNRET);
2608 
2609 			if (boot_cpu_data.x86 == 0x19) {
2610 				setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
2611 				x86_return_thunk = srso_alias_return_thunk;
2612 			} else {
2613 				setup_force_cpu_cap(X86_FEATURE_SRSO);
2614 				x86_return_thunk = srso_return_thunk;
2615 			}
2616 			if (has_microcode)
2617 				srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2618 			else
2619 				srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
2620 		} else {
2621 			pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2622 		}
2623 		break;
2624 
2625 	case SRSO_CMD_IBPB:
2626 		if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
2627 			if (has_microcode) {
2628 				setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
2629 				srso_mitigation = SRSO_MITIGATION_IBPB;
2630 			}
2631 		} else {
2632 			pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
2633 		}
2634 		break;
2635 
2636 	case SRSO_CMD_IBPB_ON_VMEXIT:
2637 		if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
2638 			if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
2639 				setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
2640 				srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2641 			}
2642 		} else {
2643 			pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2644                 }
2645 		break;
2646 	}
2647 
2648 out:
2649 	pr_info("%s\n", srso_strings[srso_mitigation]);
2650 }
2651 
2652 #undef pr_fmt
2653 #define pr_fmt(fmt) fmt
2654 
2655 #ifdef CONFIG_SYSFS
2656 
2657 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2658 
2659 #if IS_ENABLED(CONFIG_KVM_INTEL)
2660 static const char * const l1tf_vmx_states[] = {
2661 	[VMENTER_L1D_FLUSH_AUTO]		= "auto",
2662 	[VMENTER_L1D_FLUSH_NEVER]		= "vulnerable",
2663 	[VMENTER_L1D_FLUSH_COND]		= "conditional cache flushes",
2664 	[VMENTER_L1D_FLUSH_ALWAYS]		= "cache flushes",
2665 	[VMENTER_L1D_FLUSH_EPT_DISABLED]	= "EPT disabled",
2666 	[VMENTER_L1D_FLUSH_NOT_REQUIRED]	= "flush not necessary"
2667 };
2668 
2669 static ssize_t l1tf_show_state(char *buf)
2670 {
2671 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
2672 		return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
2673 
2674 	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
2675 	    (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
2676 	     sched_smt_active())) {
2677 		return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
2678 				  l1tf_vmx_states[l1tf_vmx_mitigation]);
2679 	}
2680 
2681 	return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
2682 			  l1tf_vmx_states[l1tf_vmx_mitigation],
2683 			  sched_smt_active() ? "vulnerable" : "disabled");
2684 }
2685 
2686 static ssize_t itlb_multihit_show_state(char *buf)
2687 {
2688 	if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2689 	    !boot_cpu_has(X86_FEATURE_VMX))
2690 		return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
2691 	else if (!(cr4_read_shadow() & X86_CR4_VMXE))
2692 		return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
2693 	else if (itlb_multihit_kvm_mitigation)
2694 		return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
2695 	else
2696 		return sysfs_emit(buf, "KVM: Vulnerable\n");
2697 }
2698 #else
2699 static ssize_t l1tf_show_state(char *buf)
2700 {
2701 	return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
2702 }
2703 
2704 static ssize_t itlb_multihit_show_state(char *buf)
2705 {
2706 	return sysfs_emit(buf, "Processor vulnerable\n");
2707 }
2708 #endif
2709 
2710 static ssize_t mds_show_state(char *buf)
2711 {
2712 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2713 		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2714 				  mds_strings[mds_mitigation]);
2715 	}
2716 
2717 	if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
2718 		return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2719 				  (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
2720 				   sched_smt_active() ? "mitigated" : "disabled"));
2721 	}
2722 
2723 	return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2724 			  sched_smt_active() ? "vulnerable" : "disabled");
2725 }
2726 
2727 static ssize_t tsx_async_abort_show_state(char *buf)
2728 {
2729 	if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
2730 	    (taa_mitigation == TAA_MITIGATION_OFF))
2731 		return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
2732 
2733 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2734 		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2735 				  taa_strings[taa_mitigation]);
2736 	}
2737 
2738 	return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
2739 			  sched_smt_active() ? "vulnerable" : "disabled");
2740 }
2741 
2742 static ssize_t mmio_stale_data_show_state(char *buf)
2743 {
2744 	if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2745 		return sysfs_emit(buf, "Unknown: No mitigations\n");
2746 
2747 	if (mmio_mitigation == MMIO_MITIGATION_OFF)
2748 		return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
2749 
2750 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2751 		return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2752 				  mmio_strings[mmio_mitigation]);
2753 	}
2754 
2755 	return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
2756 			  sched_smt_active() ? "vulnerable" : "disabled");
2757 }
2758 
2759 static ssize_t rfds_show_state(char *buf)
2760 {
2761 	return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
2762 }
2763 
2764 static char *stibp_state(void)
2765 {
2766 	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
2767 	    !boot_cpu_has(X86_FEATURE_AUTOIBRS))
2768 		return "";
2769 
2770 	switch (spectre_v2_user_stibp) {
2771 	case SPECTRE_V2_USER_NONE:
2772 		return "; STIBP: disabled";
2773 	case SPECTRE_V2_USER_STRICT:
2774 		return "; STIBP: forced";
2775 	case SPECTRE_V2_USER_STRICT_PREFERRED:
2776 		return "; STIBP: always-on";
2777 	case SPECTRE_V2_USER_PRCTL:
2778 	case SPECTRE_V2_USER_SECCOMP:
2779 		if (static_key_enabled(&switch_to_cond_stibp))
2780 			return "; STIBP: conditional";
2781 	}
2782 	return "";
2783 }
2784 
2785 static char *ibpb_state(void)
2786 {
2787 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
2788 		if (static_key_enabled(&switch_mm_always_ibpb))
2789 			return "; IBPB: always-on";
2790 		if (static_key_enabled(&switch_mm_cond_ibpb))
2791 			return "; IBPB: conditional";
2792 		return "; IBPB: disabled";
2793 	}
2794 	return "";
2795 }
2796 
2797 static char *pbrsb_eibrs_state(void)
2798 {
2799 	if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2800 		if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
2801 		    boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
2802 			return "; PBRSB-eIBRS: SW sequence";
2803 		else
2804 			return "; PBRSB-eIBRS: Vulnerable";
2805 	} else {
2806 		return "; PBRSB-eIBRS: Not affected";
2807 	}
2808 }
2809 
2810 static const char *spectre_bhi_state(void)
2811 {
2812 	if (!boot_cpu_has_bug(X86_BUG_BHI))
2813 		return "; BHI: Not affected";
2814 	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
2815 		return "; BHI: BHI_DIS_S";
2816 	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
2817 		return "; BHI: SW loop, KVM: SW loop";
2818 	else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2819 		 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
2820 		 rrsba_disabled)
2821 		return "; BHI: Retpoline";
2822 	else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
2823 		return "; BHI: Vulnerable, KVM: SW loop";
2824 
2825 	return "; BHI: Vulnerable";
2826 }
2827 
2828 static ssize_t spectre_v2_show_state(char *buf)
2829 {
2830 	if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
2831 		return sysfs_emit(buf, "Vulnerable: LFENCE\n");
2832 
2833 	if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2834 		return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
2835 
2836 	if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2837 	    spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2838 		return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2839 
2840 	return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
2841 			  spectre_v2_strings[spectre_v2_enabled],
2842 			  ibpb_state(),
2843 			  boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
2844 			  stibp_state(),
2845 			  boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
2846 			  pbrsb_eibrs_state(),
2847 			  spectre_bhi_state(),
2848 			  /* this should always be at the end */
2849 			  spectre_v2_module_string());
2850 }
2851 
2852 static ssize_t srbds_show_state(char *buf)
2853 {
2854 	return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
2855 }
2856 
2857 static ssize_t retbleed_show_state(char *buf)
2858 {
2859 	if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
2860 	    retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2861 		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
2862 		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
2863 			return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
2864 
2865 		return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
2866 				  !sched_smt_active() ? "disabled" :
2867 				  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2868 				  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
2869 				  "enabled with STIBP protection" : "vulnerable");
2870 	}
2871 
2872 	return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
2873 }
2874 
2875 static ssize_t srso_show_state(char *buf)
2876 {
2877 	if (boot_cpu_has(X86_FEATURE_SRSO_NO))
2878 		return sysfs_emit(buf, "Mitigation: SMT disabled\n");
2879 
2880 	return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
2881 }
2882 
2883 static ssize_t gds_show_state(char *buf)
2884 {
2885 	return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
2886 }
2887 
2888 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
2889 			       char *buf, unsigned int bug)
2890 {
2891 	if (!boot_cpu_has_bug(bug))
2892 		return sysfs_emit(buf, "Not affected\n");
2893 
2894 	switch (bug) {
2895 	case X86_BUG_CPU_MELTDOWN:
2896 		if (boot_cpu_has(X86_FEATURE_PTI))
2897 			return sysfs_emit(buf, "Mitigation: PTI\n");
2898 
2899 		if (hypervisor_is_type(X86_HYPER_XEN_PV))
2900 			return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2901 
2902 		break;
2903 
2904 	case X86_BUG_SPECTRE_V1:
2905 		return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
2906 
2907 	case X86_BUG_SPECTRE_V2:
2908 		return spectre_v2_show_state(buf);
2909 
2910 	case X86_BUG_SPEC_STORE_BYPASS:
2911 		return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
2912 
2913 	case X86_BUG_L1TF:
2914 		if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
2915 			return l1tf_show_state(buf);
2916 		break;
2917 
2918 	case X86_BUG_MDS:
2919 		return mds_show_state(buf);
2920 
2921 	case X86_BUG_TAA:
2922 		return tsx_async_abort_show_state(buf);
2923 
2924 	case X86_BUG_ITLB_MULTIHIT:
2925 		return itlb_multihit_show_state(buf);
2926 
2927 	case X86_BUG_SRBDS:
2928 		return srbds_show_state(buf);
2929 
2930 	case X86_BUG_MMIO_STALE_DATA:
2931 	case X86_BUG_MMIO_UNKNOWN:
2932 		return mmio_stale_data_show_state(buf);
2933 
2934 	case X86_BUG_RETBLEED:
2935 		return retbleed_show_state(buf);
2936 
2937 	case X86_BUG_SRSO:
2938 		return srso_show_state(buf);
2939 
2940 	case X86_BUG_GDS:
2941 		return gds_show_state(buf);
2942 
2943 	case X86_BUG_RFDS:
2944 		return rfds_show_state(buf);
2945 
2946 	default:
2947 		break;
2948 	}
2949 
2950 	return sysfs_emit(buf, "Vulnerable\n");
2951 }
2952 
2953 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2954 {
2955 	return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
2956 }
2957 
2958 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2959 {
2960 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
2961 }
2962 
2963 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2964 {
2965 	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
2966 }
2967 
2968 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
2969 {
2970 	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
2971 }
2972 
2973 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
2974 {
2975 	return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
2976 }
2977 
2978 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
2979 {
2980 	return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
2981 }
2982 
2983 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
2984 {
2985 	return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
2986 }
2987 
2988 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
2989 {
2990 	return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
2991 }
2992 
2993 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
2994 {
2995 	return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
2996 }
2997 
2998 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
2999 {
3000 	if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
3001 		return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
3002 	else
3003 		return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3004 }
3005 
3006 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3007 {
3008 	return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3009 }
3010 
3011 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3012 {
3013 	return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3014 }
3015 
3016 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3017 {
3018 	return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3019 }
3020 
3021 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3022 {
3023 	return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3024 }
3025 #endif
3026 
3027 void __warn_thunk(void)
3028 {
3029 	WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3030 }
3031