1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34
35 #include "cpu.h"
36
37 /*
38 * Speculation Vulnerability Handling
39 *
40 * Each vulnerability is handled with the following functions:
41 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
42 * take into account all relevant command line
43 * options.
44 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
45 * selected a mitigation, in case the selection
46 * may want to change based on other choices
47 * made. This function is optional.
48 * <vuln>_apply_mitigation() -- Enable the selected mitigation.
49 *
50 * The compile-time mitigation in all cases should be AUTO. An explicit
51 * command-line option can override AUTO. If no such option is
52 * provided, <vuln>_select_mitigation() will override AUTO to the best
53 * mitigation option.
54 */
55
56 static void __init spectre_v1_select_mitigation(void);
57 static void __init spectre_v1_apply_mitigation(void);
58 static void __init spectre_v2_select_mitigation(void);
59 static void __init spectre_v2_update_mitigation(void);
60 static void __init spectre_v2_apply_mitigation(void);
61 static void __init retbleed_select_mitigation(void);
62 static void __init retbleed_update_mitigation(void);
63 static void __init retbleed_apply_mitigation(void);
64 static void __init spectre_v2_user_select_mitigation(void);
65 static void __init spectre_v2_user_update_mitigation(void);
66 static void __init spectre_v2_user_apply_mitigation(void);
67 static void __init ssb_select_mitigation(void);
68 static void __init ssb_apply_mitigation(void);
69 static void __init l1tf_select_mitigation(void);
70 static void __init l1tf_apply_mitigation(void);
71 static void __init mds_select_mitigation(void);
72 static void __init mds_update_mitigation(void);
73 static void __init mds_apply_mitigation(void);
74 static void __init taa_select_mitigation(void);
75 static void __init taa_update_mitigation(void);
76 static void __init taa_apply_mitigation(void);
77 static void __init mmio_select_mitigation(void);
78 static void __init mmio_update_mitigation(void);
79 static void __init mmio_apply_mitigation(void);
80 static void __init rfds_select_mitigation(void);
81 static void __init rfds_update_mitigation(void);
82 static void __init rfds_apply_mitigation(void);
83 static void __init srbds_select_mitigation(void);
84 static void __init srbds_apply_mitigation(void);
85 static void __init l1d_flush_select_mitigation(void);
86 static void __init srso_select_mitigation(void);
87 static void __init srso_update_mitigation(void);
88 static void __init srso_apply_mitigation(void);
89 static void __init gds_select_mitigation(void);
90 static void __init gds_apply_mitigation(void);
91 static void __init bhi_select_mitigation(void);
92 static void __init bhi_update_mitigation(void);
93 static void __init bhi_apply_mitigation(void);
94 static void __init its_select_mitigation(void);
95 static void __init its_update_mitigation(void);
96 static void __init its_apply_mitigation(void);
97 static void __init tsa_select_mitigation(void);
98 static void __init tsa_apply_mitigation(void);
99 static void __init vmscape_select_mitigation(void);
100 static void __init vmscape_update_mitigation(void);
101 static void __init vmscape_apply_mitigation(void);
102
103 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
104 u64 x86_spec_ctrl_base;
105 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
106
107 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
108 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
109 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
110
111 /*
112 * Set when the CPU has run a potentially malicious guest. An IBPB will
113 * be needed to before running userspace. That IBPB will flush the branch
114 * predictor content.
115 */
116 DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
117 EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
118
119 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
120
121 static u64 __ro_after_init x86_arch_cap_msr;
122
123 static DEFINE_MUTEX(spec_ctrl_mutex);
124
125 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
126
set_return_thunk(void * thunk)127 static void __init set_return_thunk(void *thunk)
128 {
129 x86_return_thunk = thunk;
130
131 pr_info("active return thunk: %ps\n", thunk);
132 }
133
134 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
update_spec_ctrl(u64 val)135 static void update_spec_ctrl(u64 val)
136 {
137 this_cpu_write(x86_spec_ctrl_current, val);
138 wrmsrq(MSR_IA32_SPEC_CTRL, val);
139 }
140
141 /*
142 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
143 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
144 */
update_spec_ctrl_cond(u64 val)145 void update_spec_ctrl_cond(u64 val)
146 {
147 if (this_cpu_read(x86_spec_ctrl_current) == val)
148 return;
149
150 this_cpu_write(x86_spec_ctrl_current, val);
151
152 /*
153 * When KERNEL_IBRS this MSR is written on return-to-user, unless
154 * forced the update can be delayed until that time.
155 */
156 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
157 wrmsrq(MSR_IA32_SPEC_CTRL, val);
158 }
159
spec_ctrl_current(void)160 noinstr u64 spec_ctrl_current(void)
161 {
162 return this_cpu_read(x86_spec_ctrl_current);
163 }
164 EXPORT_SYMBOL_GPL(spec_ctrl_current);
165
166 /*
167 * AMD specific MSR info for Speculative Store Bypass control.
168 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
169 */
170 u64 __ro_after_init x86_amd_ls_cfg_base;
171 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
172
173 /* Control conditional STIBP in switch_to() */
174 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
175 /* Control conditional IBPB in switch_mm() */
176 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
177 /* Control unconditional IBPB in switch_mm() */
178 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
179
180 /* Control IBPB on vCPU load */
181 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
182 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
183
184 /* Control CPU buffer clear before idling (halt, mwait) */
185 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
186 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
187
188 /*
189 * Controls whether l1d flush based mitigations are enabled,
190 * based on hw features and admin setting via boot parameter
191 * defaults to false
192 */
193 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
194
195 /*
196 * Controls CPU Fill buffer clear before VMenter. This is a subset of
197 * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
198 * mitigation is required.
199 */
200 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
201 EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
202
203 #undef pr_fmt
204 #define pr_fmt(fmt) "mitigations: " fmt
205
cpu_print_attack_vectors(void)206 static void __init cpu_print_attack_vectors(void)
207 {
208 pr_info("Enabled attack vectors: ");
209
210 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
211 pr_cont("user_kernel, ");
212
213 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER))
214 pr_cont("user_user, ");
215
216 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST))
217 pr_cont("guest_host, ");
218
219 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST))
220 pr_cont("guest_guest, ");
221
222 pr_cont("SMT mitigations: ");
223
224 switch (smt_mitigations) {
225 case SMT_MITIGATIONS_OFF:
226 pr_cont("off\n");
227 break;
228 case SMT_MITIGATIONS_AUTO:
229 pr_cont("auto\n");
230 break;
231 case SMT_MITIGATIONS_ON:
232 pr_cont("on\n");
233 }
234 }
235
cpu_select_mitigations(void)236 void __init cpu_select_mitigations(void)
237 {
238 /*
239 * Read the SPEC_CTRL MSR to account for reserved bits which may
240 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
241 * init code as it is not enumerated and depends on the family.
242 */
243 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
244 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
245
246 /*
247 * Previously running kernel (kexec), may have some controls
248 * turned ON. Clear them and let the mitigations setup below
249 * rediscover them based on configuration.
250 */
251 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
252 }
253
254 x86_arch_cap_msr = x86_read_arch_cap_msr();
255
256 cpu_print_attack_vectors();
257
258 /* Select the proper CPU mitigations before patching alternatives: */
259 spectre_v1_select_mitigation();
260 spectre_v2_select_mitigation();
261 retbleed_select_mitigation();
262 spectre_v2_user_select_mitigation();
263 ssb_select_mitigation();
264 l1tf_select_mitigation();
265 mds_select_mitigation();
266 taa_select_mitigation();
267 mmio_select_mitigation();
268 rfds_select_mitigation();
269 srbds_select_mitigation();
270 l1d_flush_select_mitigation();
271 srso_select_mitigation();
272 gds_select_mitigation();
273 its_select_mitigation();
274 bhi_select_mitigation();
275 tsa_select_mitigation();
276 vmscape_select_mitigation();
277
278 /*
279 * After mitigations are selected, some may need to update their
280 * choices.
281 */
282 spectre_v2_update_mitigation();
283 /*
284 * retbleed_update_mitigation() relies on the state set by
285 * spectre_v2_update_mitigation(); specifically it wants to know about
286 * spectre_v2=ibrs.
287 */
288 retbleed_update_mitigation();
289 /*
290 * its_update_mitigation() depends on spectre_v2_update_mitigation()
291 * and retbleed_update_mitigation().
292 */
293 its_update_mitigation();
294
295 /*
296 * spectre_v2_user_update_mitigation() depends on
297 * retbleed_update_mitigation(), specifically the STIBP
298 * selection is forced for UNRET or IBPB.
299 */
300 spectre_v2_user_update_mitigation();
301 mds_update_mitigation();
302 taa_update_mitigation();
303 mmio_update_mitigation();
304 rfds_update_mitigation();
305 bhi_update_mitigation();
306 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
307 srso_update_mitigation();
308 vmscape_update_mitigation();
309
310 spectre_v1_apply_mitigation();
311 spectre_v2_apply_mitigation();
312 retbleed_apply_mitigation();
313 spectre_v2_user_apply_mitigation();
314 ssb_apply_mitigation();
315 l1tf_apply_mitigation();
316 mds_apply_mitigation();
317 taa_apply_mitigation();
318 mmio_apply_mitigation();
319 rfds_apply_mitigation();
320 srbds_apply_mitigation();
321 srso_apply_mitigation();
322 gds_apply_mitigation();
323 its_apply_mitigation();
324 bhi_apply_mitigation();
325 tsa_apply_mitigation();
326 vmscape_apply_mitigation();
327 }
328
329 /*
330 * NOTE: This function is *only* called for SVM, since Intel uses
331 * MSR_IA32_SPEC_CTRL for SSBD.
332 */
333 void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl,bool setguest)334 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
335 {
336 u64 guestval, hostval;
337 struct thread_info *ti = current_thread_info();
338
339 /*
340 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
341 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
342 */
343 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
344 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
345 return;
346
347 /*
348 * If the host has SSBD mitigation enabled, force it in the host's
349 * virtual MSR value. If its not permanently enabled, evaluate
350 * current's TIF_SSBD thread flag.
351 */
352 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
353 hostval = SPEC_CTRL_SSBD;
354 else
355 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
356
357 /* Sanitize the guest value */
358 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
359
360 if (hostval != guestval) {
361 unsigned long tif;
362
363 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
364 ssbd_spec_ctrl_to_tif(hostval);
365
366 speculation_ctrl_update(tif);
367 }
368 }
369 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
370
x86_amd_ssb_disable(void)371 static void x86_amd_ssb_disable(void)
372 {
373 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
374
375 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
376 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
377 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
378 wrmsrq(MSR_AMD64_LS_CFG, msrval);
379 }
380
381 #undef pr_fmt
382 #define pr_fmt(fmt) "MDS: " fmt
383
384 /*
385 * Returns true if vulnerability should be mitigated based on the
386 * selected attack vector controls.
387 *
388 * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
389 */
should_mitigate_vuln(unsigned int bug)390 static bool __init should_mitigate_vuln(unsigned int bug)
391 {
392 switch (bug) {
393 /*
394 * The only runtime-selected spectre_v1 mitigations in the kernel are
395 * related to SWAPGS protection on kernel entry. Therefore, protection
396 * is only required for the user->kernel attack vector.
397 */
398 case X86_BUG_SPECTRE_V1:
399 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL);
400
401 case X86_BUG_SPECTRE_V2:
402 case X86_BUG_RETBLEED:
403 case X86_BUG_L1TF:
404 case X86_BUG_ITS:
405 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
406 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
407
408 case X86_BUG_SPECTRE_V2_USER:
409 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
410 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
411
412 /*
413 * All the vulnerabilities below allow potentially leaking data
414 * across address spaces. Therefore, mitigation is required for
415 * any of these 4 attack vectors.
416 */
417 case X86_BUG_MDS:
418 case X86_BUG_TAA:
419 case X86_BUG_MMIO_STALE_DATA:
420 case X86_BUG_RFDS:
421 case X86_BUG_SRBDS:
422 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
423 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
424 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
425 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
426
427 case X86_BUG_GDS:
428 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
429 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
430 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
431 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
432 (smt_mitigations != SMT_MITIGATIONS_OFF);
433
434 case X86_BUG_SPEC_STORE_BYPASS:
435 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
436
437 default:
438 WARN(1, "Unknown bug %x\n", bug);
439 return false;
440 }
441 }
442
443 /* Default mitigation for MDS-affected CPUs */
444 static enum mds_mitigations mds_mitigation __ro_after_init =
445 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
446 static bool mds_nosmt __ro_after_init = false;
447
448 static const char * const mds_strings[] = {
449 [MDS_MITIGATION_OFF] = "Vulnerable",
450 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
451 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
452 };
453
454 enum taa_mitigations {
455 TAA_MITIGATION_OFF,
456 TAA_MITIGATION_AUTO,
457 TAA_MITIGATION_UCODE_NEEDED,
458 TAA_MITIGATION_VERW,
459 TAA_MITIGATION_TSX_DISABLED,
460 };
461
462 /* Default mitigation for TAA-affected CPUs */
463 static enum taa_mitigations taa_mitigation __ro_after_init =
464 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
465
466 enum mmio_mitigations {
467 MMIO_MITIGATION_OFF,
468 MMIO_MITIGATION_AUTO,
469 MMIO_MITIGATION_UCODE_NEEDED,
470 MMIO_MITIGATION_VERW,
471 };
472
473 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
474 static enum mmio_mitigations mmio_mitigation __ro_after_init =
475 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
476
477 enum rfds_mitigations {
478 RFDS_MITIGATION_OFF,
479 RFDS_MITIGATION_AUTO,
480 RFDS_MITIGATION_VERW,
481 RFDS_MITIGATION_UCODE_NEEDED,
482 };
483
484 /* Default mitigation for Register File Data Sampling */
485 static enum rfds_mitigations rfds_mitigation __ro_after_init =
486 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
487
488 /*
489 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
490 * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
491 */
492 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
493
mds_select_mitigation(void)494 static void __init mds_select_mitigation(void)
495 {
496 if (!boot_cpu_has_bug(X86_BUG_MDS)) {
497 mds_mitigation = MDS_MITIGATION_OFF;
498 return;
499 }
500
501 if (mds_mitigation == MDS_MITIGATION_AUTO) {
502 if (should_mitigate_vuln(X86_BUG_MDS))
503 mds_mitigation = MDS_MITIGATION_FULL;
504 else
505 mds_mitigation = MDS_MITIGATION_OFF;
506 }
507
508 if (mds_mitigation == MDS_MITIGATION_OFF)
509 return;
510
511 verw_clear_cpu_buf_mitigation_selected = true;
512 }
513
mds_update_mitigation(void)514 static void __init mds_update_mitigation(void)
515 {
516 if (!boot_cpu_has_bug(X86_BUG_MDS))
517 return;
518
519 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
520 if (verw_clear_cpu_buf_mitigation_selected)
521 mds_mitigation = MDS_MITIGATION_FULL;
522
523 if (mds_mitigation == MDS_MITIGATION_FULL) {
524 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
525 mds_mitigation = MDS_MITIGATION_VMWERV;
526 }
527
528 pr_info("%s\n", mds_strings[mds_mitigation]);
529 }
530
mds_apply_mitigation(void)531 static void __init mds_apply_mitigation(void)
532 {
533 if (mds_mitigation == MDS_MITIGATION_FULL ||
534 mds_mitigation == MDS_MITIGATION_VMWERV) {
535 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
536 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
537 (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
538 cpu_smt_disable(false);
539 }
540 }
541
mds_cmdline(char * str)542 static int __init mds_cmdline(char *str)
543 {
544 if (!boot_cpu_has_bug(X86_BUG_MDS))
545 return 0;
546
547 if (!str)
548 return -EINVAL;
549
550 if (!strcmp(str, "off"))
551 mds_mitigation = MDS_MITIGATION_OFF;
552 else if (!strcmp(str, "full"))
553 mds_mitigation = MDS_MITIGATION_FULL;
554 else if (!strcmp(str, "full,nosmt")) {
555 mds_mitigation = MDS_MITIGATION_FULL;
556 mds_nosmt = true;
557 }
558
559 return 0;
560 }
561 early_param("mds", mds_cmdline);
562
563 #undef pr_fmt
564 #define pr_fmt(fmt) "TAA: " fmt
565
566 static bool taa_nosmt __ro_after_init;
567
568 static const char * const taa_strings[] = {
569 [TAA_MITIGATION_OFF] = "Vulnerable",
570 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
571 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
572 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
573 };
574
taa_vulnerable(void)575 static bool __init taa_vulnerable(void)
576 {
577 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
578 }
579
taa_select_mitigation(void)580 static void __init taa_select_mitigation(void)
581 {
582 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
583 taa_mitigation = TAA_MITIGATION_OFF;
584 return;
585 }
586
587 /* TSX previously disabled by tsx=off */
588 if (!boot_cpu_has(X86_FEATURE_RTM)) {
589 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
590 return;
591 }
592
593 /* Microcode will be checked in taa_update_mitigation(). */
594 if (taa_mitigation == TAA_MITIGATION_AUTO) {
595 if (should_mitigate_vuln(X86_BUG_TAA))
596 taa_mitigation = TAA_MITIGATION_VERW;
597 else
598 taa_mitigation = TAA_MITIGATION_OFF;
599 }
600
601 if (taa_mitigation != TAA_MITIGATION_OFF)
602 verw_clear_cpu_buf_mitigation_selected = true;
603 }
604
taa_update_mitigation(void)605 static void __init taa_update_mitigation(void)
606 {
607 if (!taa_vulnerable())
608 return;
609
610 if (verw_clear_cpu_buf_mitigation_selected)
611 taa_mitigation = TAA_MITIGATION_VERW;
612
613 if (taa_mitigation == TAA_MITIGATION_VERW) {
614 /* Check if the requisite ucode is available. */
615 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
616 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
617
618 /*
619 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
620 * A microcode update fixes this behavior to clear CPU buffers. It also
621 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
622 * ARCH_CAP_TSX_CTRL_MSR bit.
623 *
624 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
625 * update is required.
626 */
627 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
628 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
629 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
630 }
631
632 pr_info("%s\n", taa_strings[taa_mitigation]);
633 }
634
taa_apply_mitigation(void)635 static void __init taa_apply_mitigation(void)
636 {
637 if (taa_mitigation == TAA_MITIGATION_VERW ||
638 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
639 /*
640 * TSX is enabled, select alternate mitigation for TAA which is
641 * the same as MDS. Enable MDS static branch to clear CPU buffers.
642 *
643 * For guests that can't determine whether the correct microcode is
644 * present on host, enable the mitigation for UCODE_NEEDED as well.
645 */
646 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
647
648 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
649 cpu_smt_disable(false);
650 }
651 }
652
tsx_async_abort_parse_cmdline(char * str)653 static int __init tsx_async_abort_parse_cmdline(char *str)
654 {
655 if (!boot_cpu_has_bug(X86_BUG_TAA))
656 return 0;
657
658 if (!str)
659 return -EINVAL;
660
661 if (!strcmp(str, "off")) {
662 taa_mitigation = TAA_MITIGATION_OFF;
663 } else if (!strcmp(str, "full")) {
664 taa_mitigation = TAA_MITIGATION_VERW;
665 } else if (!strcmp(str, "full,nosmt")) {
666 taa_mitigation = TAA_MITIGATION_VERW;
667 taa_nosmt = true;
668 }
669
670 return 0;
671 }
672 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
673
674 #undef pr_fmt
675 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
676
677 static bool mmio_nosmt __ro_after_init = false;
678
679 static const char * const mmio_strings[] = {
680 [MMIO_MITIGATION_OFF] = "Vulnerable",
681 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
682 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
683 };
684
mmio_select_mitigation(void)685 static void __init mmio_select_mitigation(void)
686 {
687 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
688 cpu_mitigations_off()) {
689 mmio_mitigation = MMIO_MITIGATION_OFF;
690 return;
691 }
692
693 /* Microcode will be checked in mmio_update_mitigation(). */
694 if (mmio_mitigation == MMIO_MITIGATION_AUTO) {
695 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA))
696 mmio_mitigation = MMIO_MITIGATION_VERW;
697 else
698 mmio_mitigation = MMIO_MITIGATION_OFF;
699 }
700
701 if (mmio_mitigation == MMIO_MITIGATION_OFF)
702 return;
703
704 /*
705 * Enable CPU buffer clear mitigation for host and VMM, if also affected
706 * by MDS or TAA.
707 */
708 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
709 verw_clear_cpu_buf_mitigation_selected = true;
710 }
711
mmio_update_mitigation(void)712 static void __init mmio_update_mitigation(void)
713 {
714 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
715 return;
716
717 if (verw_clear_cpu_buf_mitigation_selected)
718 mmio_mitigation = MMIO_MITIGATION_VERW;
719
720 if (mmio_mitigation == MMIO_MITIGATION_VERW) {
721 /*
722 * Check if the system has the right microcode.
723 *
724 * CPU Fill buffer clear mitigation is enumerated by either an explicit
725 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
726 * affected systems.
727 */
728 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
729 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
730 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
731 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
732 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
733 }
734
735 pr_info("%s\n", mmio_strings[mmio_mitigation]);
736 }
737
mmio_apply_mitigation(void)738 static void __init mmio_apply_mitigation(void)
739 {
740 if (mmio_mitigation == MMIO_MITIGATION_OFF)
741 return;
742
743 /*
744 * Only enable the VMM mitigation if the CPU buffer clear mitigation is
745 * not being used.
746 */
747 if (verw_clear_cpu_buf_mitigation_selected) {
748 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
749 static_branch_disable(&cpu_buf_vm_clear);
750 } else {
751 static_branch_enable(&cpu_buf_vm_clear);
752 }
753
754 /*
755 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
756 * be propagated to uncore buffers, clearing the Fill buffers on idle
757 * is required irrespective of SMT state.
758 */
759 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
760 static_branch_enable(&cpu_buf_idle_clear);
761
762 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
763 cpu_smt_disable(false);
764 }
765
mmio_stale_data_parse_cmdline(char * str)766 static int __init mmio_stale_data_parse_cmdline(char *str)
767 {
768 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
769 return 0;
770
771 if (!str)
772 return -EINVAL;
773
774 if (!strcmp(str, "off")) {
775 mmio_mitigation = MMIO_MITIGATION_OFF;
776 } else if (!strcmp(str, "full")) {
777 mmio_mitigation = MMIO_MITIGATION_VERW;
778 } else if (!strcmp(str, "full,nosmt")) {
779 mmio_mitigation = MMIO_MITIGATION_VERW;
780 mmio_nosmt = true;
781 }
782
783 return 0;
784 }
785 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
786
787 #undef pr_fmt
788 #define pr_fmt(fmt) "Register File Data Sampling: " fmt
789
790 static const char * const rfds_strings[] = {
791 [RFDS_MITIGATION_OFF] = "Vulnerable",
792 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
793 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
794 };
795
verw_clears_cpu_reg_file(void)796 static inline bool __init verw_clears_cpu_reg_file(void)
797 {
798 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
799 }
800
rfds_select_mitigation(void)801 static void __init rfds_select_mitigation(void)
802 {
803 if (!boot_cpu_has_bug(X86_BUG_RFDS)) {
804 rfds_mitigation = RFDS_MITIGATION_OFF;
805 return;
806 }
807
808 if (rfds_mitigation == RFDS_MITIGATION_AUTO) {
809 if (should_mitigate_vuln(X86_BUG_RFDS))
810 rfds_mitigation = RFDS_MITIGATION_VERW;
811 else
812 rfds_mitigation = RFDS_MITIGATION_OFF;
813 }
814
815 if (rfds_mitigation == RFDS_MITIGATION_OFF)
816 return;
817
818 if (verw_clears_cpu_reg_file())
819 verw_clear_cpu_buf_mitigation_selected = true;
820 }
821
rfds_update_mitigation(void)822 static void __init rfds_update_mitigation(void)
823 {
824 if (!boot_cpu_has_bug(X86_BUG_RFDS))
825 return;
826
827 if (verw_clear_cpu_buf_mitigation_selected)
828 rfds_mitigation = RFDS_MITIGATION_VERW;
829
830 if (rfds_mitigation == RFDS_MITIGATION_VERW) {
831 if (!verw_clears_cpu_reg_file())
832 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
833 }
834
835 pr_info("%s\n", rfds_strings[rfds_mitigation]);
836 }
837
rfds_apply_mitigation(void)838 static void __init rfds_apply_mitigation(void)
839 {
840 if (rfds_mitigation == RFDS_MITIGATION_VERW)
841 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
842 }
843
rfds_parse_cmdline(char * str)844 static __init int rfds_parse_cmdline(char *str)
845 {
846 if (!str)
847 return -EINVAL;
848
849 if (!boot_cpu_has_bug(X86_BUG_RFDS))
850 return 0;
851
852 if (!strcmp(str, "off"))
853 rfds_mitigation = RFDS_MITIGATION_OFF;
854 else if (!strcmp(str, "on"))
855 rfds_mitigation = RFDS_MITIGATION_VERW;
856
857 return 0;
858 }
859 early_param("reg_file_data_sampling", rfds_parse_cmdline);
860
861 #undef pr_fmt
862 #define pr_fmt(fmt) "SRBDS: " fmt
863
864 enum srbds_mitigations {
865 SRBDS_MITIGATION_OFF,
866 SRBDS_MITIGATION_AUTO,
867 SRBDS_MITIGATION_UCODE_NEEDED,
868 SRBDS_MITIGATION_FULL,
869 SRBDS_MITIGATION_TSX_OFF,
870 SRBDS_MITIGATION_HYPERVISOR,
871 };
872
873 static enum srbds_mitigations srbds_mitigation __ro_after_init =
874 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
875
876 static const char * const srbds_strings[] = {
877 [SRBDS_MITIGATION_OFF] = "Vulnerable",
878 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
879 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
880 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
881 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
882 };
883
884 static bool srbds_off;
885
update_srbds_msr(void)886 void update_srbds_msr(void)
887 {
888 u64 mcu_ctrl;
889
890 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
891 return;
892
893 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
894 return;
895
896 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
897 return;
898
899 /*
900 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
901 * being disabled and it hasn't received the SRBDS MSR microcode.
902 */
903 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
904 return;
905
906 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
907
908 switch (srbds_mitigation) {
909 case SRBDS_MITIGATION_OFF:
910 case SRBDS_MITIGATION_TSX_OFF:
911 mcu_ctrl |= RNGDS_MITG_DIS;
912 break;
913 case SRBDS_MITIGATION_FULL:
914 mcu_ctrl &= ~RNGDS_MITG_DIS;
915 break;
916 default:
917 break;
918 }
919
920 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
921 }
922
srbds_select_mitigation(void)923 static void __init srbds_select_mitigation(void)
924 {
925 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) {
926 srbds_mitigation = SRBDS_MITIGATION_OFF;
927 return;
928 }
929
930 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) {
931 if (should_mitigate_vuln(X86_BUG_SRBDS))
932 srbds_mitigation = SRBDS_MITIGATION_FULL;
933 else {
934 srbds_mitigation = SRBDS_MITIGATION_OFF;
935 return;
936 }
937 }
938
939 /*
940 * Check to see if this is one of the MDS_NO systems supporting TSX that
941 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
942 * by Processor MMIO Stale Data vulnerability.
943 */
944 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
945 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
946 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
947 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
948 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
949 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
950 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
951 else if (srbds_off)
952 srbds_mitigation = SRBDS_MITIGATION_OFF;
953
954 pr_info("%s\n", srbds_strings[srbds_mitigation]);
955 }
956
srbds_apply_mitigation(void)957 static void __init srbds_apply_mitigation(void)
958 {
959 update_srbds_msr();
960 }
961
srbds_parse_cmdline(char * str)962 static int __init srbds_parse_cmdline(char *str)
963 {
964 if (!str)
965 return -EINVAL;
966
967 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
968 return 0;
969
970 srbds_off = !strcmp(str, "off");
971 return 0;
972 }
973 early_param("srbds", srbds_parse_cmdline);
974
975 #undef pr_fmt
976 #define pr_fmt(fmt) "L1D Flush : " fmt
977
978 enum l1d_flush_mitigations {
979 L1D_FLUSH_OFF = 0,
980 L1D_FLUSH_ON,
981 };
982
983 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
984
l1d_flush_select_mitigation(void)985 static void __init l1d_flush_select_mitigation(void)
986 {
987 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
988 return;
989
990 static_branch_enable(&switch_mm_cond_l1d_flush);
991 pr_info("Conditional flush on switch_mm() enabled\n");
992 }
993
l1d_flush_parse_cmdline(char * str)994 static int __init l1d_flush_parse_cmdline(char *str)
995 {
996 if (!strcmp(str, "on"))
997 l1d_flush_mitigation = L1D_FLUSH_ON;
998
999 return 0;
1000 }
1001 early_param("l1d_flush", l1d_flush_parse_cmdline);
1002
1003 #undef pr_fmt
1004 #define pr_fmt(fmt) "GDS: " fmt
1005
1006 enum gds_mitigations {
1007 GDS_MITIGATION_OFF,
1008 GDS_MITIGATION_AUTO,
1009 GDS_MITIGATION_UCODE_NEEDED,
1010 GDS_MITIGATION_FORCE,
1011 GDS_MITIGATION_FULL,
1012 GDS_MITIGATION_FULL_LOCKED,
1013 GDS_MITIGATION_HYPERVISOR,
1014 };
1015
1016 static enum gds_mitigations gds_mitigation __ro_after_init =
1017 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
1018
1019 static const char * const gds_strings[] = {
1020 [GDS_MITIGATION_OFF] = "Vulnerable",
1021 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1022 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
1023 [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
1024 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
1025 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
1026 };
1027
gds_ucode_mitigated(void)1028 bool gds_ucode_mitigated(void)
1029 {
1030 return (gds_mitigation == GDS_MITIGATION_FULL ||
1031 gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
1032 }
1033 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
1034
update_gds_msr(void)1035 void update_gds_msr(void)
1036 {
1037 u64 mcu_ctrl_after;
1038 u64 mcu_ctrl;
1039
1040 switch (gds_mitigation) {
1041 case GDS_MITIGATION_OFF:
1042 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1043 mcu_ctrl |= GDS_MITG_DIS;
1044 break;
1045 case GDS_MITIGATION_FULL_LOCKED:
1046 /*
1047 * The LOCKED state comes from the boot CPU. APs might not have
1048 * the same state. Make sure the mitigation is enabled on all
1049 * CPUs.
1050 */
1051 case GDS_MITIGATION_FULL:
1052 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1053 mcu_ctrl &= ~GDS_MITG_DIS;
1054 break;
1055 case GDS_MITIGATION_FORCE:
1056 case GDS_MITIGATION_UCODE_NEEDED:
1057 case GDS_MITIGATION_HYPERVISOR:
1058 case GDS_MITIGATION_AUTO:
1059 return;
1060 }
1061
1062 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1063
1064 /*
1065 * Check to make sure that the WRMSR value was not ignored. Writes to
1066 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
1067 * processor was not.
1068 */
1069 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
1070 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
1071 }
1072
gds_select_mitigation(void)1073 static void __init gds_select_mitigation(void)
1074 {
1075 u64 mcu_ctrl;
1076
1077 if (!boot_cpu_has_bug(X86_BUG_GDS))
1078 return;
1079
1080 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1081 gds_mitigation = GDS_MITIGATION_HYPERVISOR;
1082 return;
1083 }
1084
1085 /* Will verify below that mitigation _can_ be disabled */
1086 if (gds_mitigation == GDS_MITIGATION_AUTO) {
1087 if (should_mitigate_vuln(X86_BUG_GDS))
1088 gds_mitigation = GDS_MITIGATION_FULL;
1089 else
1090 gds_mitigation = GDS_MITIGATION_OFF;
1091 }
1092
1093 /* No microcode */
1094 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
1095 if (gds_mitigation != GDS_MITIGATION_FORCE)
1096 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
1097 return;
1098 }
1099
1100 /* Microcode has mitigation, use it */
1101 if (gds_mitigation == GDS_MITIGATION_FORCE)
1102 gds_mitigation = GDS_MITIGATION_FULL;
1103
1104 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1105 if (mcu_ctrl & GDS_MITG_LOCKED) {
1106 if (gds_mitigation == GDS_MITIGATION_OFF)
1107 pr_warn("Mitigation locked. Disable failed.\n");
1108
1109 /*
1110 * The mitigation is selected from the boot CPU. All other CPUs
1111 * _should_ have the same state. If the boot CPU isn't locked
1112 * but others are then update_gds_msr() will WARN() of the state
1113 * mismatch. If the boot CPU is locked update_gds_msr() will
1114 * ensure the other CPUs have the mitigation enabled.
1115 */
1116 gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
1117 }
1118 }
1119
gds_apply_mitigation(void)1120 static void __init gds_apply_mitigation(void)
1121 {
1122 if (!boot_cpu_has_bug(X86_BUG_GDS))
1123 return;
1124
1125 /* Microcode is present */
1126 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
1127 update_gds_msr();
1128 else if (gds_mitigation == GDS_MITIGATION_FORCE) {
1129 /*
1130 * This only needs to be done on the boot CPU so do it
1131 * here rather than in update_gds_msr()
1132 */
1133 setup_clear_cpu_cap(X86_FEATURE_AVX);
1134 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
1135 }
1136
1137 pr_info("%s\n", gds_strings[gds_mitigation]);
1138 }
1139
gds_parse_cmdline(char * str)1140 static int __init gds_parse_cmdline(char *str)
1141 {
1142 if (!str)
1143 return -EINVAL;
1144
1145 if (!boot_cpu_has_bug(X86_BUG_GDS))
1146 return 0;
1147
1148 if (!strcmp(str, "off"))
1149 gds_mitigation = GDS_MITIGATION_OFF;
1150 else if (!strcmp(str, "force"))
1151 gds_mitigation = GDS_MITIGATION_FORCE;
1152
1153 return 0;
1154 }
1155 early_param("gather_data_sampling", gds_parse_cmdline);
1156
1157 #undef pr_fmt
1158 #define pr_fmt(fmt) "Spectre V1 : " fmt
1159
1160 enum spectre_v1_mitigation {
1161 SPECTRE_V1_MITIGATION_NONE,
1162 SPECTRE_V1_MITIGATION_AUTO,
1163 };
1164
1165 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
1166 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
1167 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
1168
1169 static const char * const spectre_v1_strings[] = {
1170 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
1171 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
1172 };
1173
1174 /*
1175 * Does SMAP provide full mitigation against speculative kernel access to
1176 * userspace?
1177 */
smap_works_speculatively(void)1178 static bool smap_works_speculatively(void)
1179 {
1180 if (!boot_cpu_has(X86_FEATURE_SMAP))
1181 return false;
1182
1183 /*
1184 * On CPUs which are vulnerable to Meltdown, SMAP does not
1185 * prevent speculative access to user data in the L1 cache.
1186 * Consider SMAP to be non-functional as a mitigation on these
1187 * CPUs.
1188 */
1189 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
1190 return false;
1191
1192 return true;
1193 }
1194
spectre_v1_select_mitigation(void)1195 static void __init spectre_v1_select_mitigation(void)
1196 {
1197 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1198 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1199
1200 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1))
1201 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1202 }
1203
spectre_v1_apply_mitigation(void)1204 static void __init spectre_v1_apply_mitigation(void)
1205 {
1206 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1207 return;
1208
1209 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
1210 /*
1211 * With Spectre v1, a user can speculatively control either
1212 * path of a conditional swapgs with a user-controlled GS
1213 * value. The mitigation is to add lfences to both code paths.
1214 *
1215 * If FSGSBASE is enabled, the user can put a kernel address in
1216 * GS, in which case SMAP provides no protection.
1217 *
1218 * If FSGSBASE is disabled, the user can only put a user space
1219 * address in GS. That makes an attack harder, but still
1220 * possible if there's no SMAP protection.
1221 */
1222 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
1223 !smap_works_speculatively()) {
1224 /*
1225 * Mitigation can be provided from SWAPGS itself or
1226 * PTI as the CR3 write in the Meltdown mitigation
1227 * is serializing.
1228 *
1229 * If neither is there, mitigate with an LFENCE to
1230 * stop speculation through swapgs.
1231 */
1232 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
1233 !boot_cpu_has(X86_FEATURE_PTI))
1234 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
1235
1236 /*
1237 * Enable lfences in the kernel entry (non-swapgs)
1238 * paths, to prevent user entry from speculatively
1239 * skipping swapgs.
1240 */
1241 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
1242 }
1243 }
1244
1245 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1246 }
1247
nospectre_v1_cmdline(char * str)1248 static int __init nospectre_v1_cmdline(char *str)
1249 {
1250 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1251 return 0;
1252 }
1253 early_param("nospectre_v1", nospectre_v1_cmdline);
1254
1255 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
1256
1257 /* Depends on spectre_v2 mitigation selected already */
cdt_possible(enum spectre_v2_mitigation mode)1258 static inline bool cdt_possible(enum spectre_v2_mitigation mode)
1259 {
1260 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) ||
1261 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE))
1262 return false;
1263
1264 if (mode == SPECTRE_V2_RETPOLINE ||
1265 mode == SPECTRE_V2_EIBRS_RETPOLINE)
1266 return true;
1267
1268 return false;
1269 }
1270
1271 #undef pr_fmt
1272 #define pr_fmt(fmt) "RETBleed: " fmt
1273
1274 enum its_mitigation {
1275 ITS_MITIGATION_OFF,
1276 ITS_MITIGATION_AUTO,
1277 ITS_MITIGATION_VMEXIT_ONLY,
1278 ITS_MITIGATION_ALIGNED_THUNKS,
1279 ITS_MITIGATION_RETPOLINE_STUFF,
1280 };
1281
1282 static enum its_mitigation its_mitigation __ro_after_init =
1283 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
1284
1285 enum retbleed_mitigation {
1286 RETBLEED_MITIGATION_NONE,
1287 RETBLEED_MITIGATION_AUTO,
1288 RETBLEED_MITIGATION_UNRET,
1289 RETBLEED_MITIGATION_IBPB,
1290 RETBLEED_MITIGATION_IBRS,
1291 RETBLEED_MITIGATION_EIBRS,
1292 RETBLEED_MITIGATION_STUFF,
1293 };
1294
1295 static const char * const retbleed_strings[] = {
1296 [RETBLEED_MITIGATION_NONE] = "Vulnerable",
1297 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
1298 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
1299 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
1300 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
1301 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
1302 };
1303
1304 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
1305 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
1306
1307 static int __ro_after_init retbleed_nosmt = false;
1308
1309 enum srso_mitigation {
1310 SRSO_MITIGATION_NONE,
1311 SRSO_MITIGATION_AUTO,
1312 SRSO_MITIGATION_UCODE_NEEDED,
1313 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
1314 SRSO_MITIGATION_MICROCODE,
1315 SRSO_MITIGATION_NOSMT,
1316 SRSO_MITIGATION_SAFE_RET,
1317 SRSO_MITIGATION_IBPB,
1318 SRSO_MITIGATION_IBPB_ON_VMEXIT,
1319 SRSO_MITIGATION_BP_SPEC_REDUCE,
1320 };
1321
1322 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
1323
retbleed_parse_cmdline(char * str)1324 static int __init retbleed_parse_cmdline(char *str)
1325 {
1326 if (!str)
1327 return -EINVAL;
1328
1329 while (str) {
1330 char *next = strchr(str, ',');
1331 if (next) {
1332 *next = 0;
1333 next++;
1334 }
1335
1336 if (!strcmp(str, "off")) {
1337 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1338 } else if (!strcmp(str, "auto")) {
1339 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1340 } else if (!strcmp(str, "unret")) {
1341 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1342 } else if (!strcmp(str, "ibpb")) {
1343 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1344 } else if (!strcmp(str, "stuff")) {
1345 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1346 } else if (!strcmp(str, "nosmt")) {
1347 retbleed_nosmt = true;
1348 } else if (!strcmp(str, "force")) {
1349 setup_force_cpu_bug(X86_BUG_RETBLEED);
1350 } else {
1351 pr_err("Ignoring unknown retbleed option (%s).", str);
1352 }
1353
1354 str = next;
1355 }
1356
1357 return 0;
1358 }
1359 early_param("retbleed", retbleed_parse_cmdline);
1360
1361 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1362 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1363
retbleed_select_mitigation(void)1364 static void __init retbleed_select_mitigation(void)
1365 {
1366 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) {
1367 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1368 return;
1369 }
1370
1371 switch (retbleed_mitigation) {
1372 case RETBLEED_MITIGATION_UNRET:
1373 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1374 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1375 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1376 }
1377 break;
1378 case RETBLEED_MITIGATION_IBPB:
1379 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1380 pr_err("WARNING: CPU does not support IBPB.\n");
1381 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1382 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1383 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1384 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1385 }
1386 break;
1387 case RETBLEED_MITIGATION_STUFF:
1388 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1389 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1390 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1391 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1392 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
1393 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1394 }
1395 break;
1396 default:
1397 break;
1398 }
1399
1400 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
1401 return;
1402
1403 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) {
1404 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1405 return;
1406 }
1407
1408 /* Intel mitigation selected in retbleed_update_mitigation() */
1409 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1410 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1411 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1412 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1413 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1414 boot_cpu_has(X86_FEATURE_IBPB))
1415 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1416 else
1417 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1418 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1419 /* Final mitigation depends on spectre-v2 selection */
1420 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1421 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1422 else if (boot_cpu_has(X86_FEATURE_IBRS))
1423 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1424 else
1425 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1426 }
1427 }
1428
retbleed_update_mitigation(void)1429 static void __init retbleed_update_mitigation(void)
1430 {
1431 if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
1432 return;
1433
1434 /* ITS can also enable stuffing */
1435 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF)
1436 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1437
1438 /* If SRSO is using IBPB, that works for retbleed too */
1439 if (srso_mitigation == SRSO_MITIGATION_IBPB)
1440 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1441
1442 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF &&
1443 !cdt_possible(spectre_v2_enabled)) {
1444 pr_err("WARNING: retbleed=stuff depends on retpoline\n");
1445 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1446 }
1447
1448 /*
1449 * Let IBRS trump all on Intel without affecting the effects of the
1450 * retbleed= cmdline option except for call depth based stuffing
1451 */
1452 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1453 switch (spectre_v2_enabled) {
1454 case SPECTRE_V2_IBRS:
1455 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1456 break;
1457 case SPECTRE_V2_EIBRS:
1458 case SPECTRE_V2_EIBRS_RETPOLINE:
1459 case SPECTRE_V2_EIBRS_LFENCE:
1460 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1461 break;
1462 default:
1463 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1464 pr_err(RETBLEED_INTEL_MSG);
1465 }
1466 }
1467
1468 pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1469 }
1470
retbleed_apply_mitigation(void)1471 static void __init retbleed_apply_mitigation(void)
1472 {
1473 bool mitigate_smt = false;
1474
1475 switch (retbleed_mitigation) {
1476 case RETBLEED_MITIGATION_NONE:
1477 return;
1478
1479 case RETBLEED_MITIGATION_UNRET:
1480 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1481 setup_force_cpu_cap(X86_FEATURE_UNRET);
1482
1483 set_return_thunk(retbleed_return_thunk);
1484
1485 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1486 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1487 pr_err(RETBLEED_UNTRAIN_MSG);
1488
1489 mitigate_smt = true;
1490 break;
1491
1492 case RETBLEED_MITIGATION_IBPB:
1493 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1494 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1495 mitigate_smt = true;
1496
1497 /*
1498 * IBPB on entry already obviates the need for
1499 * software-based untraining so clear those in case some
1500 * other mitigation like SRSO has selected them.
1501 */
1502 setup_clear_cpu_cap(X86_FEATURE_UNRET);
1503 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1504
1505 /*
1506 * There is no need for RSB filling: write_ibpb() ensures
1507 * all predictions, including the RSB, are invalidated,
1508 * regardless of IBPB implementation.
1509 */
1510 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1511
1512 break;
1513
1514 case RETBLEED_MITIGATION_STUFF:
1515 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1516 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1517
1518 set_return_thunk(call_depth_return_thunk);
1519 break;
1520
1521 default:
1522 break;
1523 }
1524
1525 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1526 (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
1527 cpu_smt_disable(false);
1528 }
1529
1530 #undef pr_fmt
1531 #define pr_fmt(fmt) "ITS: " fmt
1532
1533 static const char * const its_strings[] = {
1534 [ITS_MITIGATION_OFF] = "Vulnerable",
1535 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
1536 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
1537 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
1538 };
1539
its_parse_cmdline(char * str)1540 static int __init its_parse_cmdline(char *str)
1541 {
1542 if (!str)
1543 return -EINVAL;
1544
1545 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
1546 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
1547 return 0;
1548 }
1549
1550 if (!strcmp(str, "off")) {
1551 its_mitigation = ITS_MITIGATION_OFF;
1552 } else if (!strcmp(str, "on")) {
1553 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1554 } else if (!strcmp(str, "force")) {
1555 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1556 setup_force_cpu_bug(X86_BUG_ITS);
1557 } else if (!strcmp(str, "vmexit")) {
1558 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
1559 } else if (!strcmp(str, "stuff")) {
1560 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1561 } else {
1562 pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
1563 }
1564
1565 return 0;
1566 }
1567 early_param("indirect_target_selection", its_parse_cmdline);
1568
its_select_mitigation(void)1569 static void __init its_select_mitigation(void)
1570 {
1571 if (!boot_cpu_has_bug(X86_BUG_ITS)) {
1572 its_mitigation = ITS_MITIGATION_OFF;
1573 return;
1574 }
1575
1576 if (its_mitigation == ITS_MITIGATION_AUTO) {
1577 if (should_mitigate_vuln(X86_BUG_ITS))
1578 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1579 else
1580 its_mitigation = ITS_MITIGATION_OFF;
1581 }
1582
1583 if (its_mitigation == ITS_MITIGATION_OFF)
1584 return;
1585
1586 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
1587 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
1588 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
1589 its_mitigation = ITS_MITIGATION_OFF;
1590 return;
1591 }
1592
1593 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
1594 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
1595 its_mitigation = ITS_MITIGATION_OFF;
1596 return;
1597 }
1598
1599 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1600 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1601 pr_err("RSB stuff mitigation not supported, using default\n");
1602 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1603 }
1604
1605 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
1606 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
1607 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1608 }
1609
its_update_mitigation(void)1610 static void __init its_update_mitigation(void)
1611 {
1612 if (!boot_cpu_has_bug(X86_BUG_ITS))
1613 return;
1614
1615 switch (spectre_v2_enabled) {
1616 case SPECTRE_V2_NONE:
1617 if (its_mitigation != ITS_MITIGATION_OFF)
1618 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
1619 its_mitigation = ITS_MITIGATION_OFF;
1620 break;
1621 case SPECTRE_V2_RETPOLINE:
1622 case SPECTRE_V2_EIBRS_RETPOLINE:
1623 /* Retpoline+CDT mitigates ITS */
1624 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
1625 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1626 break;
1627 case SPECTRE_V2_LFENCE:
1628 case SPECTRE_V2_EIBRS_LFENCE:
1629 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
1630 its_mitigation = ITS_MITIGATION_OFF;
1631 break;
1632 default:
1633 break;
1634 }
1635
1636 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1637 !cdt_possible(spectre_v2_enabled))
1638 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1639
1640 pr_info("%s\n", its_strings[its_mitigation]);
1641 }
1642
its_apply_mitigation(void)1643 static void __init its_apply_mitigation(void)
1644 {
1645 switch (its_mitigation) {
1646 case ITS_MITIGATION_OFF:
1647 case ITS_MITIGATION_AUTO:
1648 case ITS_MITIGATION_VMEXIT_ONLY:
1649 break;
1650 case ITS_MITIGATION_ALIGNED_THUNKS:
1651 if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
1652 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
1653
1654 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1655 set_return_thunk(its_return_thunk);
1656 break;
1657 case ITS_MITIGATION_RETPOLINE_STUFF:
1658 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1659 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1660 set_return_thunk(call_depth_return_thunk);
1661 break;
1662 }
1663 }
1664
1665 #undef pr_fmt
1666 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
1667
1668 enum tsa_mitigations {
1669 TSA_MITIGATION_NONE,
1670 TSA_MITIGATION_AUTO,
1671 TSA_MITIGATION_UCODE_NEEDED,
1672 TSA_MITIGATION_USER_KERNEL,
1673 TSA_MITIGATION_VM,
1674 TSA_MITIGATION_FULL,
1675 };
1676
1677 static const char * const tsa_strings[] = {
1678 [TSA_MITIGATION_NONE] = "Vulnerable",
1679 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1680 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
1681 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
1682 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
1683 };
1684
1685 static enum tsa_mitigations tsa_mitigation __ro_after_init =
1686 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
1687
tsa_parse_cmdline(char * str)1688 static int __init tsa_parse_cmdline(char *str)
1689 {
1690 if (!str)
1691 return -EINVAL;
1692
1693 if (!strcmp(str, "off"))
1694 tsa_mitigation = TSA_MITIGATION_NONE;
1695 else if (!strcmp(str, "on"))
1696 tsa_mitigation = TSA_MITIGATION_FULL;
1697 else if (!strcmp(str, "user"))
1698 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1699 else if (!strcmp(str, "vm"))
1700 tsa_mitigation = TSA_MITIGATION_VM;
1701 else
1702 pr_err("Ignoring unknown tsa=%s option.\n", str);
1703
1704 return 0;
1705 }
1706 early_param("tsa", tsa_parse_cmdline);
1707
tsa_select_mitigation(void)1708 static void __init tsa_select_mitigation(void)
1709 {
1710 if (!boot_cpu_has_bug(X86_BUG_TSA)) {
1711 tsa_mitigation = TSA_MITIGATION_NONE;
1712 return;
1713 }
1714
1715 if (tsa_mitigation == TSA_MITIGATION_AUTO) {
1716 bool vm = false, uk = false;
1717
1718 tsa_mitigation = TSA_MITIGATION_NONE;
1719
1720 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
1721 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) {
1722 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1723 uk = true;
1724 }
1725
1726 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
1727 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
1728 tsa_mitigation = TSA_MITIGATION_VM;
1729 vm = true;
1730 }
1731
1732 if (uk && vm)
1733 tsa_mitigation = TSA_MITIGATION_FULL;
1734 }
1735
1736 if (tsa_mitigation == TSA_MITIGATION_NONE)
1737 return;
1738
1739 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
1740 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
1741
1742 /*
1743 * No need to set verw_clear_cpu_buf_mitigation_selected - it
1744 * doesn't fit all cases here and it is not needed because this
1745 * is the only VERW-based mitigation on AMD.
1746 */
1747 pr_info("%s\n", tsa_strings[tsa_mitigation]);
1748 }
1749
tsa_apply_mitigation(void)1750 static void __init tsa_apply_mitigation(void)
1751 {
1752 switch (tsa_mitigation) {
1753 case TSA_MITIGATION_USER_KERNEL:
1754 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1755 break;
1756 case TSA_MITIGATION_VM:
1757 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1758 break;
1759 case TSA_MITIGATION_FULL:
1760 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1761 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1762 break;
1763 default:
1764 break;
1765 }
1766 }
1767
1768 #undef pr_fmt
1769 #define pr_fmt(fmt) "Spectre V2 : " fmt
1770
1771 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1772 SPECTRE_V2_USER_NONE;
1773 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1774 SPECTRE_V2_USER_NONE;
1775
1776 #ifdef CONFIG_MITIGATION_RETPOLINE
1777 static bool spectre_v2_bad_module;
1778
retpoline_module_ok(bool has_retpoline)1779 bool retpoline_module_ok(bool has_retpoline)
1780 {
1781 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1782 return true;
1783
1784 pr_err("System may be vulnerable to spectre v2\n");
1785 spectre_v2_bad_module = true;
1786 return false;
1787 }
1788
spectre_v2_module_string(void)1789 static inline const char *spectre_v2_module_string(void)
1790 {
1791 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1792 }
1793 #else
spectre_v2_module_string(void)1794 static inline const char *spectre_v2_module_string(void) { return ""; }
1795 #endif
1796
1797 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1798 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1799 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1800 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1801
1802 #ifdef CONFIG_BPF_SYSCALL
unpriv_ebpf_notify(int new_state)1803 void unpriv_ebpf_notify(int new_state)
1804 {
1805 if (new_state)
1806 return;
1807
1808 /* Unprivileged eBPF is enabled */
1809
1810 switch (spectre_v2_enabled) {
1811 case SPECTRE_V2_EIBRS:
1812 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1813 break;
1814 case SPECTRE_V2_EIBRS_LFENCE:
1815 if (sched_smt_active())
1816 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1817 break;
1818 default:
1819 break;
1820 }
1821 }
1822 #endif
1823
match_option(const char * arg,int arglen,const char * opt)1824 static inline bool match_option(const char *arg, int arglen, const char *opt)
1825 {
1826 int len = strlen(opt);
1827
1828 return len == arglen && !strncmp(arg, opt, len);
1829 }
1830
1831 /* The kernel command line selection for spectre v2 */
1832 enum spectre_v2_mitigation_cmd {
1833 SPECTRE_V2_CMD_NONE,
1834 SPECTRE_V2_CMD_AUTO,
1835 SPECTRE_V2_CMD_FORCE,
1836 SPECTRE_V2_CMD_RETPOLINE,
1837 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1838 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1839 SPECTRE_V2_CMD_EIBRS,
1840 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1841 SPECTRE_V2_CMD_EIBRS_LFENCE,
1842 SPECTRE_V2_CMD_IBRS,
1843 };
1844
1845 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO;
1846
1847 enum spectre_v2_user_cmd {
1848 SPECTRE_V2_USER_CMD_NONE,
1849 SPECTRE_V2_USER_CMD_AUTO,
1850 SPECTRE_V2_USER_CMD_FORCE,
1851 SPECTRE_V2_USER_CMD_PRCTL,
1852 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1853 SPECTRE_V2_USER_CMD_SECCOMP,
1854 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1855 };
1856
1857 static const char * const spectre_v2_user_strings[] = {
1858 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
1859 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
1860 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
1861 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
1862 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
1863 };
1864
1865 static const struct {
1866 const char *option;
1867 enum spectre_v2_user_cmd cmd;
1868 bool secure;
1869 } v2_user_options[] __initconst = {
1870 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
1871 { "off", SPECTRE_V2_USER_CMD_NONE, false },
1872 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
1873 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
1874 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
1875 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
1876 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
1877 };
1878
spec_v2_user_print_cond(const char * reason,bool secure)1879 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1880 {
1881 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1882 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1883 }
1884
spectre_v2_parse_user_cmdline(void)1885 static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void)
1886 {
1887 char arg[20];
1888 int ret, i;
1889
1890 if (!IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2))
1891 return SPECTRE_V2_USER_CMD_NONE;
1892
1893 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1894 arg, sizeof(arg));
1895 if (ret < 0)
1896 return SPECTRE_V2_USER_CMD_AUTO;
1897
1898 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1899 if (match_option(arg, ret, v2_user_options[i].option)) {
1900 spec_v2_user_print_cond(v2_user_options[i].option,
1901 v2_user_options[i].secure);
1902 return v2_user_options[i].cmd;
1903 }
1904 }
1905
1906 pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
1907 return SPECTRE_V2_USER_CMD_AUTO;
1908 }
1909
spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)1910 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1911 {
1912 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1913 }
1914
spectre_v2_user_select_mitigation(void)1915 static void __init spectre_v2_user_select_mitigation(void)
1916 {
1917 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1918 return;
1919
1920 switch (spectre_v2_parse_user_cmdline()) {
1921 case SPECTRE_V2_USER_CMD_NONE:
1922 return;
1923 case SPECTRE_V2_USER_CMD_FORCE:
1924 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1925 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1926 break;
1927 case SPECTRE_V2_USER_CMD_AUTO:
1928 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER))
1929 break;
1930 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1931 if (smt_mitigations == SMT_MITIGATIONS_OFF)
1932 break;
1933 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1934 break;
1935 case SPECTRE_V2_USER_CMD_PRCTL:
1936 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1937 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1938 break;
1939 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1940 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1941 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1942 break;
1943 case SPECTRE_V2_USER_CMD_SECCOMP:
1944 if (IS_ENABLED(CONFIG_SECCOMP))
1945 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
1946 else
1947 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1948 spectre_v2_user_stibp = spectre_v2_user_ibpb;
1949 break;
1950 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1951 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1952 if (IS_ENABLED(CONFIG_SECCOMP))
1953 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
1954 else
1955 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1956 break;
1957 }
1958
1959 /*
1960 * At this point, an STIBP mode other than "off" has been set.
1961 * If STIBP support is not being forced, check if STIBP always-on
1962 * is preferred.
1963 */
1964 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1965 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
1966 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1967 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1968
1969 if (!boot_cpu_has(X86_FEATURE_IBPB))
1970 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1971
1972 if (!boot_cpu_has(X86_FEATURE_STIBP))
1973 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1974 }
1975
spectre_v2_user_update_mitigation(void)1976 static void __init spectre_v2_user_update_mitigation(void)
1977 {
1978 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1979 return;
1980
1981 /* The spectre_v2 cmd line can override spectre_v2_user options */
1982 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
1983 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1984 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1985 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
1986 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1987 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1988 }
1989
1990 /*
1991 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1992 * is not required.
1993 *
1994 * Intel's Enhanced IBRS also protects against cross-thread branch target
1995 * injection in user-mode as the IBRS bit remains always set which
1996 * implicitly enables cross-thread protections. However, in legacy IBRS
1997 * mode, the IBRS bit is set only on kernel entry and cleared on return
1998 * to userspace. AMD Automatic IBRS also does not protect userspace.
1999 * These modes therefore disable the implicit cross-thread protection,
2000 * so allow for STIBP to be selected in those cases.
2001 */
2002 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
2003 !cpu_smt_possible() ||
2004 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
2005 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
2006 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
2007 return;
2008 }
2009
2010 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
2011 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
2012 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
2013 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
2014 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
2015 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
2016 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
2017 }
2018 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
2019 }
2020
spectre_v2_user_apply_mitigation(void)2021 static void __init spectre_v2_user_apply_mitigation(void)
2022 {
2023 /* Initialize Indirect Branch Prediction Barrier */
2024 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
2025 static_branch_enable(&switch_vcpu_ibpb);
2026
2027 switch (spectre_v2_user_ibpb) {
2028 case SPECTRE_V2_USER_STRICT:
2029 static_branch_enable(&switch_mm_always_ibpb);
2030 break;
2031 case SPECTRE_V2_USER_PRCTL:
2032 case SPECTRE_V2_USER_SECCOMP:
2033 static_branch_enable(&switch_mm_cond_ibpb);
2034 break;
2035 default:
2036 break;
2037 }
2038
2039 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
2040 static_key_enabled(&switch_mm_always_ibpb) ?
2041 "always-on" : "conditional");
2042 }
2043 }
2044
2045 static const char * const spectre_v2_strings[] = {
2046 [SPECTRE_V2_NONE] = "Vulnerable",
2047 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
2048 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
2049 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
2050 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
2051 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
2052 [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
2053 };
2054
2055 static const struct {
2056 const char *option;
2057 enum spectre_v2_mitigation_cmd cmd;
2058 bool secure;
2059 } mitigation_options[] __initconst = {
2060 { "off", SPECTRE_V2_CMD_NONE, false },
2061 { "on", SPECTRE_V2_CMD_FORCE, true },
2062 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
2063 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
2064 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
2065 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
2066 { "eibrs", SPECTRE_V2_CMD_EIBRS, false },
2067 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
2068 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
2069 { "auto", SPECTRE_V2_CMD_AUTO, false },
2070 { "ibrs", SPECTRE_V2_CMD_IBRS, false },
2071 };
2072
spec_v2_print_cond(const char * reason,bool secure)2073 static void __init spec_v2_print_cond(const char *reason, bool secure)
2074 {
2075 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
2076 pr_info("%s selected on command line.\n", reason);
2077 }
2078
spectre_v2_parse_cmdline(void)2079 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
2080 {
2081 enum spectre_v2_mitigation_cmd cmd;
2082 char arg[20];
2083 int ret, i;
2084
2085 cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
2086 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
2087 return SPECTRE_V2_CMD_NONE;
2088
2089 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
2090 if (ret < 0)
2091 return cmd;
2092
2093 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
2094 if (!match_option(arg, ret, mitigation_options[i].option))
2095 continue;
2096 cmd = mitigation_options[i].cmd;
2097 break;
2098 }
2099
2100 if (i >= ARRAY_SIZE(mitigation_options)) {
2101 pr_err("unknown option (%s). Switching to default mode\n", arg);
2102 return cmd;
2103 }
2104
2105 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
2106 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2107 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
2108 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2109 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2110 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2111 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
2112 mitigation_options[i].option);
2113 return SPECTRE_V2_CMD_AUTO;
2114 }
2115
2116 if ((cmd == SPECTRE_V2_CMD_EIBRS ||
2117 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2118 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2119 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2120 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
2121 mitigation_options[i].option);
2122 return SPECTRE_V2_CMD_AUTO;
2123 }
2124
2125 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2126 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
2127 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
2128 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
2129 mitigation_options[i].option);
2130 return SPECTRE_V2_CMD_AUTO;
2131 }
2132
2133 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
2134 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
2135 mitigation_options[i].option);
2136 return SPECTRE_V2_CMD_AUTO;
2137 }
2138
2139 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
2140 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
2141 mitigation_options[i].option);
2142 return SPECTRE_V2_CMD_AUTO;
2143 }
2144
2145 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
2146 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
2147 mitigation_options[i].option);
2148 return SPECTRE_V2_CMD_AUTO;
2149 }
2150
2151 if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
2152 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
2153 mitigation_options[i].option);
2154 return SPECTRE_V2_CMD_AUTO;
2155 }
2156
2157 spec_v2_print_cond(mitigation_options[i].option,
2158 mitigation_options[i].secure);
2159 return cmd;
2160 }
2161
spectre_v2_select_retpoline(void)2162 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
2163 {
2164 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2165 pr_err("Kernel not compiled with retpoline; no mitigation available!");
2166 return SPECTRE_V2_NONE;
2167 }
2168
2169 return SPECTRE_V2_RETPOLINE;
2170 }
2171
2172 static bool __ro_after_init rrsba_disabled;
2173
2174 /* Disable in-kernel use of non-RSB RET predictors */
spec_ctrl_disable_kernel_rrsba(void)2175 static void __init spec_ctrl_disable_kernel_rrsba(void)
2176 {
2177 if (rrsba_disabled)
2178 return;
2179
2180 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
2181 rrsba_disabled = true;
2182 return;
2183 }
2184
2185 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
2186 return;
2187
2188 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
2189 update_spec_ctrl(x86_spec_ctrl_base);
2190 rrsba_disabled = true;
2191 }
2192
spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)2193 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
2194 {
2195 /*
2196 * WARNING! There are many subtleties to consider when changing *any*
2197 * code related to RSB-related mitigations. Before doing so, carefully
2198 * read the following document, and update if necessary:
2199 *
2200 * Documentation/admin-guide/hw-vuln/rsb.rst
2201 *
2202 * In an overly simplified nutshell:
2203 *
2204 * - User->user RSB attacks are conditionally mitigated during
2205 * context switches by cond_mitigation -> write_ibpb().
2206 *
2207 * - User->kernel and guest->host attacks are mitigated by eIBRS or
2208 * RSB filling.
2209 *
2210 * Though, depending on config, note that other alternative
2211 * mitigations may end up getting used instead, e.g., IBPB on
2212 * entry/vmexit, call depth tracking, or return thunks.
2213 */
2214
2215 switch (mode) {
2216 case SPECTRE_V2_NONE:
2217 break;
2218
2219 case SPECTRE_V2_EIBRS:
2220 case SPECTRE_V2_EIBRS_LFENCE:
2221 case SPECTRE_V2_EIBRS_RETPOLINE:
2222 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2223 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
2224 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
2225 }
2226 break;
2227
2228 case SPECTRE_V2_RETPOLINE:
2229 case SPECTRE_V2_LFENCE:
2230 case SPECTRE_V2_IBRS:
2231 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
2232 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
2233 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
2234 break;
2235
2236 default:
2237 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
2238 dump_stack();
2239 break;
2240 }
2241 }
2242
2243 /*
2244 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
2245 * branch history in userspace. Not needed if BHI_NO is set.
2246 */
spec_ctrl_bhi_dis(void)2247 static bool __init spec_ctrl_bhi_dis(void)
2248 {
2249 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
2250 return false;
2251
2252 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
2253 update_spec_ctrl(x86_spec_ctrl_base);
2254 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
2255
2256 return true;
2257 }
2258
2259 enum bhi_mitigations {
2260 BHI_MITIGATION_OFF,
2261 BHI_MITIGATION_AUTO,
2262 BHI_MITIGATION_ON,
2263 BHI_MITIGATION_VMEXIT_ONLY,
2264 };
2265
2266 static enum bhi_mitigations bhi_mitigation __ro_after_init =
2267 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
2268
spectre_bhi_parse_cmdline(char * str)2269 static int __init spectre_bhi_parse_cmdline(char *str)
2270 {
2271 if (!str)
2272 return -EINVAL;
2273
2274 if (!strcmp(str, "off"))
2275 bhi_mitigation = BHI_MITIGATION_OFF;
2276 else if (!strcmp(str, "on"))
2277 bhi_mitigation = BHI_MITIGATION_ON;
2278 else if (!strcmp(str, "vmexit"))
2279 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2280 else
2281 pr_err("Ignoring unknown spectre_bhi option (%s)", str);
2282
2283 return 0;
2284 }
2285 early_param("spectre_bhi", spectre_bhi_parse_cmdline);
2286
bhi_select_mitigation(void)2287 static void __init bhi_select_mitigation(void)
2288 {
2289 if (!boot_cpu_has(X86_BUG_BHI))
2290 bhi_mitigation = BHI_MITIGATION_OFF;
2291
2292 if (bhi_mitigation != BHI_MITIGATION_AUTO)
2293 return;
2294
2295 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) {
2296 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
2297 bhi_mitigation = BHI_MITIGATION_ON;
2298 else
2299 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2300 } else {
2301 bhi_mitigation = BHI_MITIGATION_OFF;
2302 }
2303 }
2304
bhi_update_mitigation(void)2305 static void __init bhi_update_mitigation(void)
2306 {
2307 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
2308 bhi_mitigation = BHI_MITIGATION_OFF;
2309
2310 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2311 spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)
2312 bhi_mitigation = BHI_MITIGATION_OFF;
2313 }
2314
bhi_apply_mitigation(void)2315 static void __init bhi_apply_mitigation(void)
2316 {
2317 if (bhi_mitigation == BHI_MITIGATION_OFF)
2318 return;
2319
2320 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
2321 if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2322 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
2323 spec_ctrl_disable_kernel_rrsba();
2324 if (rrsba_disabled)
2325 return;
2326 }
2327
2328 if (!IS_ENABLED(CONFIG_X86_64))
2329 return;
2330
2331 /* Mitigate in hardware if supported */
2332 if (spec_ctrl_bhi_dis())
2333 return;
2334
2335 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
2336 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
2337 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2338 return;
2339 }
2340
2341 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
2342 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
2343 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2344 }
2345
spectre_v2_select_mitigation(void)2346 static void __init spectre_v2_select_mitigation(void)
2347 {
2348 spectre_v2_cmd = spectre_v2_parse_cmdline();
2349
2350 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2351 (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO))
2352 return;
2353
2354 switch (spectre_v2_cmd) {
2355 case SPECTRE_V2_CMD_NONE:
2356 return;
2357
2358 case SPECTRE_V2_CMD_AUTO:
2359 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2))
2360 break;
2361 fallthrough;
2362 case SPECTRE_V2_CMD_FORCE:
2363 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2364 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2365 break;
2366 }
2367
2368 spectre_v2_enabled = spectre_v2_select_retpoline();
2369 break;
2370
2371 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
2372 pr_err(SPECTRE_V2_LFENCE_MSG);
2373 spectre_v2_enabled = SPECTRE_V2_LFENCE;
2374 break;
2375
2376 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
2377 spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
2378 break;
2379
2380 case SPECTRE_V2_CMD_RETPOLINE:
2381 spectre_v2_enabled = spectre_v2_select_retpoline();
2382 break;
2383
2384 case SPECTRE_V2_CMD_IBRS:
2385 spectre_v2_enabled = SPECTRE_V2_IBRS;
2386 break;
2387
2388 case SPECTRE_V2_CMD_EIBRS:
2389 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2390 break;
2391
2392 case SPECTRE_V2_CMD_EIBRS_LFENCE:
2393 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
2394 break;
2395
2396 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
2397 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
2398 break;
2399 }
2400 }
2401
spectre_v2_update_mitigation(void)2402 static void __init spectre_v2_update_mitigation(void)
2403 {
2404 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
2405 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
2406 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
2407 boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2408 retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
2409 retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
2410 boot_cpu_has(X86_FEATURE_IBRS) &&
2411 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
2412 spectre_v2_enabled = SPECTRE_V2_IBRS;
2413 }
2414 }
2415
2416 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2417 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
2418 }
2419
spectre_v2_apply_mitigation(void)2420 static void __init spectre_v2_apply_mitigation(void)
2421 {
2422 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2423 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
2424
2425 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2426 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
2427 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
2428 } else {
2429 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
2430 update_spec_ctrl(x86_spec_ctrl_base);
2431 }
2432 }
2433
2434 switch (spectre_v2_enabled) {
2435 case SPECTRE_V2_NONE:
2436 return;
2437
2438 case SPECTRE_V2_EIBRS:
2439 break;
2440
2441 case SPECTRE_V2_IBRS:
2442 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
2443 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
2444 pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
2445 break;
2446
2447 case SPECTRE_V2_LFENCE:
2448 case SPECTRE_V2_EIBRS_LFENCE:
2449 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
2450 fallthrough;
2451
2452 case SPECTRE_V2_RETPOLINE:
2453 case SPECTRE_V2_EIBRS_RETPOLINE:
2454 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
2455 break;
2456 }
2457
2458 /*
2459 * Disable alternate RSB predictions in kernel when indirect CALLs and
2460 * JMPs gets protection against BHI and Intramode-BTI, but RET
2461 * prediction from a non-RSB predictor is still a risk.
2462 */
2463 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
2464 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
2465 spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
2466 spec_ctrl_disable_kernel_rrsba();
2467
2468 spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
2469
2470 /*
2471 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
2472 * and Enhanced IBRS protect firmware too, so enable IBRS around
2473 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
2474 * otherwise enabled.
2475 *
2476 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
2477 * boot_cpu_has(), because the user might select retpoline on the kernel
2478 * command line and if the CPU supports Enhanced IBRS, kernel might
2479 * un-intentionally not enable IBRS around firmware calls.
2480 */
2481 if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2482 boot_cpu_has(X86_FEATURE_IBPB) &&
2483 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2484 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
2485
2486 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
2487 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
2488 pr_info("Enabling Speculation Barrier for firmware calls\n");
2489 }
2490
2491 } else if (boot_cpu_has(X86_FEATURE_IBRS) &&
2492 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2493 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2494 pr_info("Enabling Restricted Speculation for firmware calls\n");
2495 }
2496 }
2497
update_stibp_msr(void * __unused)2498 static void update_stibp_msr(void * __unused)
2499 {
2500 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
2501 update_spec_ctrl(val);
2502 }
2503
2504 /* Update x86_spec_ctrl_base in case SMT state changed. */
update_stibp_strict(void)2505 static void update_stibp_strict(void)
2506 {
2507 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
2508
2509 if (sched_smt_active())
2510 mask |= SPEC_CTRL_STIBP;
2511
2512 if (mask == x86_spec_ctrl_base)
2513 return;
2514
2515 pr_info("Update user space SMT mitigation: STIBP %s\n",
2516 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
2517 x86_spec_ctrl_base = mask;
2518 on_each_cpu(update_stibp_msr, NULL, 1);
2519 }
2520
2521 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
update_indir_branch_cond(void)2522 static void update_indir_branch_cond(void)
2523 {
2524 if (sched_smt_active())
2525 static_branch_enable(&switch_to_cond_stibp);
2526 else
2527 static_branch_disable(&switch_to_cond_stibp);
2528 }
2529
2530 #undef pr_fmt
2531 #define pr_fmt(fmt) fmt
2532
2533 /* Update the static key controlling the MDS CPU buffer clear in idle */
update_mds_branch_idle(void)2534 static void update_mds_branch_idle(void)
2535 {
2536 /*
2537 * Enable the idle clearing if SMT is active on CPUs which are
2538 * affected only by MSBDS and not any other MDS variant.
2539 *
2540 * The other variants cannot be mitigated when SMT is enabled, so
2541 * clearing the buffers on idle just to prevent the Store Buffer
2542 * repartitioning leak would be a window dressing exercise.
2543 */
2544 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
2545 return;
2546
2547 if (sched_smt_active()) {
2548 static_branch_enable(&cpu_buf_idle_clear);
2549 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
2550 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2551 static_branch_disable(&cpu_buf_idle_clear);
2552 }
2553 }
2554
2555 #undef pr_fmt
2556 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
2557
2558 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
2559
2560 /* The kernel command line selection */
2561 enum ssb_mitigation_cmd {
2562 SPEC_STORE_BYPASS_CMD_NONE,
2563 SPEC_STORE_BYPASS_CMD_AUTO,
2564 SPEC_STORE_BYPASS_CMD_ON,
2565 SPEC_STORE_BYPASS_CMD_PRCTL,
2566 SPEC_STORE_BYPASS_CMD_SECCOMP,
2567 };
2568
2569 static const char * const ssb_strings[] = {
2570 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
2571 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
2572 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
2573 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2574 };
2575
2576 static const struct {
2577 const char *option;
2578 enum ssb_mitigation_cmd cmd;
2579 } ssb_mitigation_options[] __initconst = {
2580 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
2581 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
2582 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
2583 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
2584 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
2585 };
2586
ssb_parse_cmdline(void)2587 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
2588 {
2589 enum ssb_mitigation_cmd cmd;
2590 char arg[20];
2591 int ret, i;
2592
2593 cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ?
2594 SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE;
2595 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
2596 cpu_mitigations_off()) {
2597 return SPEC_STORE_BYPASS_CMD_NONE;
2598 } else {
2599 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
2600 arg, sizeof(arg));
2601 if (ret < 0)
2602 return cmd;
2603
2604 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
2605 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
2606 continue;
2607
2608 cmd = ssb_mitigation_options[i].cmd;
2609 break;
2610 }
2611
2612 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
2613 pr_err("unknown option (%s). Switching to default mode\n", arg);
2614 return cmd;
2615 }
2616 }
2617
2618 return cmd;
2619 }
2620
ssb_select_mitigation(void)2621 static void __init ssb_select_mitigation(void)
2622 {
2623 enum ssb_mitigation_cmd cmd;
2624
2625 if (!boot_cpu_has(X86_FEATURE_SSBD))
2626 goto out;
2627
2628 cmd = ssb_parse_cmdline();
2629 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
2630 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
2631 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
2632 return;
2633
2634 switch (cmd) {
2635 case SPEC_STORE_BYPASS_CMD_SECCOMP:
2636 /*
2637 * Choose prctl+seccomp as the default mode if seccomp is
2638 * enabled.
2639 */
2640 if (IS_ENABLED(CONFIG_SECCOMP))
2641 ssb_mode = SPEC_STORE_BYPASS_SECCOMP;
2642 else
2643 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2644 break;
2645 case SPEC_STORE_BYPASS_CMD_ON:
2646 ssb_mode = SPEC_STORE_BYPASS_DISABLE;
2647 break;
2648 case SPEC_STORE_BYPASS_CMD_AUTO:
2649 if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
2650 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2651 else
2652 ssb_mode = SPEC_STORE_BYPASS_NONE;
2653 break;
2654 case SPEC_STORE_BYPASS_CMD_PRCTL:
2655 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2656 break;
2657 case SPEC_STORE_BYPASS_CMD_NONE:
2658 break;
2659 }
2660
2661 out:
2662 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2663 pr_info("%s\n", ssb_strings[ssb_mode]);
2664 }
2665
ssb_apply_mitigation(void)2666 static void __init ssb_apply_mitigation(void)
2667 {
2668 /*
2669 * We have three CPU feature flags that are in play here:
2670 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2671 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2672 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2673 */
2674 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
2675 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2676 /*
2677 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2678 * use a completely different MSR and bit dependent on family.
2679 */
2680 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2681 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2682 x86_amd_ssb_disable();
2683 } else {
2684 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2685 update_spec_ctrl(x86_spec_ctrl_base);
2686 }
2687 }
2688 }
2689
2690 #undef pr_fmt
2691 #define pr_fmt(fmt) "Speculation prctl: " fmt
2692
task_update_spec_tif(struct task_struct * tsk)2693 static void task_update_spec_tif(struct task_struct *tsk)
2694 {
2695 /* Force the update of the real TIF bits */
2696 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2697
2698 /*
2699 * Immediately update the speculation control MSRs for the current
2700 * task, but for a non-current task delay setting the CPU
2701 * mitigation until it is scheduled next.
2702 *
2703 * This can only happen for SECCOMP mitigation. For PRCTL it's
2704 * always the current task.
2705 */
2706 if (tsk == current)
2707 speculation_ctrl_update_current();
2708 }
2709
l1d_flush_prctl_set(struct task_struct * task,unsigned long ctrl)2710 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2711 {
2712
2713 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2714 return -EPERM;
2715
2716 switch (ctrl) {
2717 case PR_SPEC_ENABLE:
2718 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2719 return 0;
2720 case PR_SPEC_DISABLE:
2721 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2722 return 0;
2723 default:
2724 return -ERANGE;
2725 }
2726 }
2727
ssb_prctl_set(struct task_struct * task,unsigned long ctrl)2728 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2729 {
2730 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2731 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2732 return -ENXIO;
2733
2734 switch (ctrl) {
2735 case PR_SPEC_ENABLE:
2736 /* If speculation is force disabled, enable is not allowed */
2737 if (task_spec_ssb_force_disable(task))
2738 return -EPERM;
2739 task_clear_spec_ssb_disable(task);
2740 task_clear_spec_ssb_noexec(task);
2741 task_update_spec_tif(task);
2742 break;
2743 case PR_SPEC_DISABLE:
2744 task_set_spec_ssb_disable(task);
2745 task_clear_spec_ssb_noexec(task);
2746 task_update_spec_tif(task);
2747 break;
2748 case PR_SPEC_FORCE_DISABLE:
2749 task_set_spec_ssb_disable(task);
2750 task_set_spec_ssb_force_disable(task);
2751 task_clear_spec_ssb_noexec(task);
2752 task_update_spec_tif(task);
2753 break;
2754 case PR_SPEC_DISABLE_NOEXEC:
2755 if (task_spec_ssb_force_disable(task))
2756 return -EPERM;
2757 task_set_spec_ssb_disable(task);
2758 task_set_spec_ssb_noexec(task);
2759 task_update_spec_tif(task);
2760 break;
2761 default:
2762 return -ERANGE;
2763 }
2764 return 0;
2765 }
2766
is_spec_ib_user_controlled(void)2767 static bool is_spec_ib_user_controlled(void)
2768 {
2769 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2770 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2771 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2772 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2773 }
2774
ib_prctl_set(struct task_struct * task,unsigned long ctrl)2775 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2776 {
2777 switch (ctrl) {
2778 case PR_SPEC_ENABLE:
2779 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2780 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2781 return 0;
2782
2783 /*
2784 * With strict mode for both IBPB and STIBP, the instruction
2785 * code paths avoid checking this task flag and instead,
2786 * unconditionally run the instruction. However, STIBP and IBPB
2787 * are independent and either can be set to conditionally
2788 * enabled regardless of the mode of the other.
2789 *
2790 * If either is set to conditional, allow the task flag to be
2791 * updated, unless it was force-disabled by a previous prctl
2792 * call. Currently, this is possible on an AMD CPU which has the
2793 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2794 * kernel is booted with 'spectre_v2_user=seccomp', then
2795 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2796 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2797 */
2798 if (!is_spec_ib_user_controlled() ||
2799 task_spec_ib_force_disable(task))
2800 return -EPERM;
2801
2802 task_clear_spec_ib_disable(task);
2803 task_update_spec_tif(task);
2804 break;
2805 case PR_SPEC_DISABLE:
2806 case PR_SPEC_FORCE_DISABLE:
2807 /*
2808 * Indirect branch speculation is always allowed when
2809 * mitigation is force disabled.
2810 */
2811 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2812 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2813 return -EPERM;
2814
2815 if (!is_spec_ib_user_controlled())
2816 return 0;
2817
2818 task_set_spec_ib_disable(task);
2819 if (ctrl == PR_SPEC_FORCE_DISABLE)
2820 task_set_spec_ib_force_disable(task);
2821 task_update_spec_tif(task);
2822 if (task == current)
2823 indirect_branch_prediction_barrier();
2824 break;
2825 default:
2826 return -ERANGE;
2827 }
2828 return 0;
2829 }
2830
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)2831 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2832 unsigned long ctrl)
2833 {
2834 switch (which) {
2835 case PR_SPEC_STORE_BYPASS:
2836 return ssb_prctl_set(task, ctrl);
2837 case PR_SPEC_INDIRECT_BRANCH:
2838 return ib_prctl_set(task, ctrl);
2839 case PR_SPEC_L1D_FLUSH:
2840 return l1d_flush_prctl_set(task, ctrl);
2841 default:
2842 return -ENODEV;
2843 }
2844 }
2845
2846 #ifdef CONFIG_SECCOMP
arch_seccomp_spec_mitigate(struct task_struct * task)2847 void arch_seccomp_spec_mitigate(struct task_struct *task)
2848 {
2849 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2850 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2851 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2852 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2853 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2854 }
2855 #endif
2856
l1d_flush_prctl_get(struct task_struct * task)2857 static int l1d_flush_prctl_get(struct task_struct *task)
2858 {
2859 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2860 return PR_SPEC_FORCE_DISABLE;
2861
2862 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2863 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2864 else
2865 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2866 }
2867
ssb_prctl_get(struct task_struct * task)2868 static int ssb_prctl_get(struct task_struct *task)
2869 {
2870 switch (ssb_mode) {
2871 case SPEC_STORE_BYPASS_NONE:
2872 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2873 return PR_SPEC_ENABLE;
2874 return PR_SPEC_NOT_AFFECTED;
2875 case SPEC_STORE_BYPASS_DISABLE:
2876 return PR_SPEC_DISABLE;
2877 case SPEC_STORE_BYPASS_SECCOMP:
2878 case SPEC_STORE_BYPASS_PRCTL:
2879 if (task_spec_ssb_force_disable(task))
2880 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2881 if (task_spec_ssb_noexec(task))
2882 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2883 if (task_spec_ssb_disable(task))
2884 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2885 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2886 }
2887 BUG();
2888 }
2889
ib_prctl_get(struct task_struct * task)2890 static int ib_prctl_get(struct task_struct *task)
2891 {
2892 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2893 return PR_SPEC_NOT_AFFECTED;
2894
2895 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2896 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2897 return PR_SPEC_ENABLE;
2898 else if (is_spec_ib_user_controlled()) {
2899 if (task_spec_ib_force_disable(task))
2900 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2901 if (task_spec_ib_disable(task))
2902 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2903 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2904 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2905 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2906 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2907 return PR_SPEC_DISABLE;
2908 else
2909 return PR_SPEC_NOT_AFFECTED;
2910 }
2911
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)2912 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2913 {
2914 switch (which) {
2915 case PR_SPEC_STORE_BYPASS:
2916 return ssb_prctl_get(task);
2917 case PR_SPEC_INDIRECT_BRANCH:
2918 return ib_prctl_get(task);
2919 case PR_SPEC_L1D_FLUSH:
2920 return l1d_flush_prctl_get(task);
2921 default:
2922 return -ENODEV;
2923 }
2924 }
2925
x86_spec_ctrl_setup_ap(void)2926 void x86_spec_ctrl_setup_ap(void)
2927 {
2928 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2929 update_spec_ctrl(x86_spec_ctrl_base);
2930
2931 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2932 x86_amd_ssb_disable();
2933 }
2934
2935 bool itlb_multihit_kvm_mitigation;
2936 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2937
2938 #undef pr_fmt
2939 #define pr_fmt(fmt) "L1TF: " fmt
2940
2941 /* Default mitigation for L1TF-affected CPUs */
2942 enum l1tf_mitigations l1tf_mitigation __ro_after_init =
2943 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
2944 #if IS_ENABLED(CONFIG_KVM_INTEL)
2945 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2946 #endif
2947 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2948 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2949
2950 /*
2951 * These CPUs all support 44bits physical address space internally in the
2952 * cache but CPUID can report a smaller number of physical address bits.
2953 *
2954 * The L1TF mitigation uses the top most address bit for the inversion of
2955 * non present PTEs. When the installed memory reaches into the top most
2956 * address bit due to memory holes, which has been observed on machines
2957 * which report 36bits physical address bits and have 32G RAM installed,
2958 * then the mitigation range check in l1tf_select_mitigation() triggers.
2959 * This is a false positive because the mitigation is still possible due to
2960 * the fact that the cache uses 44bit internally. Use the cache bits
2961 * instead of the reported physical bits and adjust them on the affected
2962 * machines to 44bit if the reported bits are less than 44.
2963 */
override_cache_bits(struct cpuinfo_x86 * c)2964 static void override_cache_bits(struct cpuinfo_x86 *c)
2965 {
2966 if (c->x86 != 6)
2967 return;
2968
2969 switch (c->x86_vfm) {
2970 case INTEL_NEHALEM:
2971 case INTEL_WESTMERE:
2972 case INTEL_SANDYBRIDGE:
2973 case INTEL_IVYBRIDGE:
2974 case INTEL_HASWELL:
2975 case INTEL_HASWELL_L:
2976 case INTEL_HASWELL_G:
2977 case INTEL_BROADWELL:
2978 case INTEL_BROADWELL_G:
2979 case INTEL_SKYLAKE_L:
2980 case INTEL_SKYLAKE:
2981 case INTEL_KABYLAKE_L:
2982 case INTEL_KABYLAKE:
2983 if (c->x86_cache_bits < 44)
2984 c->x86_cache_bits = 44;
2985 break;
2986 }
2987 }
2988
l1tf_select_mitigation(void)2989 static void __init l1tf_select_mitigation(void)
2990 {
2991 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
2992 l1tf_mitigation = L1TF_MITIGATION_OFF;
2993 return;
2994 }
2995
2996 if (l1tf_mitigation != L1TF_MITIGATION_AUTO)
2997 return;
2998
2999 if (!should_mitigate_vuln(X86_BUG_L1TF)) {
3000 l1tf_mitigation = L1TF_MITIGATION_OFF;
3001 return;
3002 }
3003
3004 if (smt_mitigations == SMT_MITIGATIONS_ON)
3005 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
3006 else
3007 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
3008 }
3009
l1tf_apply_mitigation(void)3010 static void __init l1tf_apply_mitigation(void)
3011 {
3012 u64 half_pa;
3013
3014 if (!boot_cpu_has_bug(X86_BUG_L1TF))
3015 return;
3016
3017 override_cache_bits(&boot_cpu_data);
3018
3019 switch (l1tf_mitigation) {
3020 case L1TF_MITIGATION_OFF:
3021 case L1TF_MITIGATION_FLUSH_NOWARN:
3022 case L1TF_MITIGATION_FLUSH:
3023 case L1TF_MITIGATION_AUTO:
3024 break;
3025 case L1TF_MITIGATION_FLUSH_NOSMT:
3026 case L1TF_MITIGATION_FULL:
3027 cpu_smt_disable(false);
3028 break;
3029 case L1TF_MITIGATION_FULL_FORCE:
3030 cpu_smt_disable(true);
3031 break;
3032 }
3033
3034 #if CONFIG_PGTABLE_LEVELS == 2
3035 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
3036 return;
3037 #endif
3038
3039 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
3040 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
3041 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
3042 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
3043 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
3044 half_pa);
3045 pr_info("However, doing so will make a part of your RAM unusable.\n");
3046 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
3047 return;
3048 }
3049
3050 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
3051 }
3052
l1tf_cmdline(char * str)3053 static int __init l1tf_cmdline(char *str)
3054 {
3055 if (!boot_cpu_has_bug(X86_BUG_L1TF))
3056 return 0;
3057
3058 if (!str)
3059 return -EINVAL;
3060
3061 if (!strcmp(str, "off"))
3062 l1tf_mitigation = L1TF_MITIGATION_OFF;
3063 else if (!strcmp(str, "flush,nowarn"))
3064 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
3065 else if (!strcmp(str, "flush"))
3066 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
3067 else if (!strcmp(str, "flush,nosmt"))
3068 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
3069 else if (!strcmp(str, "full"))
3070 l1tf_mitigation = L1TF_MITIGATION_FULL;
3071 else if (!strcmp(str, "full,force"))
3072 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
3073
3074 return 0;
3075 }
3076 early_param("l1tf", l1tf_cmdline);
3077
3078 #undef pr_fmt
3079 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
3080
3081 static const char * const srso_strings[] = {
3082 [SRSO_MITIGATION_NONE] = "Vulnerable",
3083 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
3084 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
3085 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
3086 [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled",
3087 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
3088 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
3089 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
3090 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
3091 };
3092
srso_parse_cmdline(char * str)3093 static int __init srso_parse_cmdline(char *str)
3094 {
3095 if (!str)
3096 return -EINVAL;
3097
3098 if (!strcmp(str, "off"))
3099 srso_mitigation = SRSO_MITIGATION_NONE;
3100 else if (!strcmp(str, "microcode"))
3101 srso_mitigation = SRSO_MITIGATION_MICROCODE;
3102 else if (!strcmp(str, "safe-ret"))
3103 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3104 else if (!strcmp(str, "ibpb"))
3105 srso_mitigation = SRSO_MITIGATION_IBPB;
3106 else if (!strcmp(str, "ibpb-vmexit"))
3107 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3108 else
3109 pr_err("Ignoring unknown SRSO option (%s).", str);
3110
3111 return 0;
3112 }
3113 early_param("spec_rstack_overflow", srso_parse_cmdline);
3114
3115 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
3116
srso_select_mitigation(void)3117 static void __init srso_select_mitigation(void)
3118 {
3119 if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
3120 srso_mitigation = SRSO_MITIGATION_NONE;
3121 return;
3122 }
3123
3124 if (srso_mitigation == SRSO_MITIGATION_AUTO) {
3125 /*
3126 * Use safe-RET if user->kernel or guest->host protection is
3127 * required. Otherwise the 'microcode' mitigation is sufficient
3128 * to protect the user->user and guest->guest vectors.
3129 */
3130 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
3131 (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&
3132 !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {
3133 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3134 } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
3135 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
3136 srso_mitigation = SRSO_MITIGATION_MICROCODE;
3137 } else {
3138 srso_mitigation = SRSO_MITIGATION_NONE;
3139 return;
3140 }
3141 }
3142
3143 /* Zen1/2 with SMT off aren't vulnerable to SRSO. */
3144 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
3145 srso_mitigation = SRSO_MITIGATION_NOSMT;
3146 return;
3147 }
3148
3149 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) {
3150 pr_warn("IBPB-extending microcode not applied!\n");
3151 pr_warn(SRSO_NOTICE);
3152
3153 /*
3154 * Safe-RET provides partial mitigation without microcode, but
3155 * other mitigations require microcode to provide any
3156 * mitigations.
3157 */
3158 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET)
3159 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
3160 else
3161 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
3162 }
3163
3164 switch (srso_mitigation) {
3165 case SRSO_MITIGATION_SAFE_RET:
3166 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3167 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
3168 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3169 goto ibpb_on_vmexit;
3170 }
3171
3172 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
3173 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
3174 srso_mitigation = SRSO_MITIGATION_NONE;
3175 }
3176 break;
3177 ibpb_on_vmexit:
3178 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3179 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
3180 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
3181 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
3182 break;
3183 }
3184 fallthrough;
3185 case SRSO_MITIGATION_IBPB:
3186 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
3187 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
3188 srso_mitigation = SRSO_MITIGATION_NONE;
3189 }
3190 break;
3191 default:
3192 break;
3193 }
3194 }
3195
srso_update_mitigation(void)3196 static void __init srso_update_mitigation(void)
3197 {
3198 /* If retbleed is using IBPB, that works for SRSO as well */
3199 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
3200 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
3201 srso_mitigation = SRSO_MITIGATION_IBPB;
3202
3203 if (boot_cpu_has_bug(X86_BUG_SRSO) &&
3204 !cpu_mitigations_off())
3205 pr_info("%s\n", srso_strings[srso_mitigation]);
3206 }
3207
srso_apply_mitigation(void)3208 static void __init srso_apply_mitigation(void)
3209 {
3210 /*
3211 * Clear the feature flag if this mitigation is not selected as that
3212 * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
3213 */
3214 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
3215 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
3216
3217 if (srso_mitigation == SRSO_MITIGATION_NONE) {
3218 if (boot_cpu_has(X86_FEATURE_SBPB))
3219 x86_pred_cmd = PRED_CMD_SBPB;
3220 return;
3221 }
3222
3223 switch (srso_mitigation) {
3224 case SRSO_MITIGATION_SAFE_RET:
3225 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3226 /*
3227 * Enable the return thunk for generated code
3228 * like ftrace, static_call, etc.
3229 */
3230 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
3231 setup_force_cpu_cap(X86_FEATURE_UNRET);
3232
3233 if (boot_cpu_data.x86 == 0x19) {
3234 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
3235 set_return_thunk(srso_alias_return_thunk);
3236 } else {
3237 setup_force_cpu_cap(X86_FEATURE_SRSO);
3238 set_return_thunk(srso_return_thunk);
3239 }
3240 break;
3241 case SRSO_MITIGATION_IBPB:
3242 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
3243 /*
3244 * IBPB on entry already obviates the need for
3245 * software-based untraining so clear those in case some
3246 * other mitigation like Retbleed has selected them.
3247 */
3248 setup_clear_cpu_cap(X86_FEATURE_UNRET);
3249 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
3250 fallthrough;
3251 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3252 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
3253 /*
3254 * There is no need for RSB filling: entry_ibpb() ensures
3255 * all predictions, including the RSB, are invalidated,
3256 * regardless of IBPB implementation.
3257 */
3258 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
3259 break;
3260 default:
3261 break;
3262 }
3263 }
3264
3265 #undef pr_fmt
3266 #define pr_fmt(fmt) "VMSCAPE: " fmt
3267
3268 enum vmscape_mitigations {
3269 VMSCAPE_MITIGATION_NONE,
3270 VMSCAPE_MITIGATION_AUTO,
3271 VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
3272 VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
3273 };
3274
3275 static const char * const vmscape_strings[] = {
3276 [VMSCAPE_MITIGATION_NONE] = "Vulnerable",
3277 /* [VMSCAPE_MITIGATION_AUTO] */
3278 [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace",
3279 [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT",
3280 };
3281
3282 static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
3283 IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
3284
vmscape_parse_cmdline(char * str)3285 static int __init vmscape_parse_cmdline(char *str)
3286 {
3287 if (!str)
3288 return -EINVAL;
3289
3290 if (!strcmp(str, "off")) {
3291 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3292 } else if (!strcmp(str, "ibpb")) {
3293 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3294 } else if (!strcmp(str, "force")) {
3295 setup_force_cpu_bug(X86_BUG_VMSCAPE);
3296 vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
3297 } else {
3298 pr_err("Ignoring unknown vmscape=%s option.\n", str);
3299 }
3300
3301 return 0;
3302 }
3303 early_param("vmscape", vmscape_parse_cmdline);
3304
vmscape_select_mitigation(void)3305 static void __init vmscape_select_mitigation(void)
3306 {
3307 if (cpu_mitigations_off() ||
3308 !boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
3309 !boot_cpu_has(X86_FEATURE_IBPB)) {
3310 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3311 return;
3312 }
3313
3314 if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO)
3315 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3316 }
3317
vmscape_update_mitigation(void)3318 static void __init vmscape_update_mitigation(void)
3319 {
3320 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE))
3321 return;
3322
3323 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
3324 srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
3325 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
3326
3327 pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
3328 }
3329
vmscape_apply_mitigation(void)3330 static void __init vmscape_apply_mitigation(void)
3331 {
3332 if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
3333 setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
3334 }
3335
3336 #undef pr_fmt
3337 #define pr_fmt(fmt) fmt
3338
3339 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
3340 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
3341 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
3342 #define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
3343
cpu_bugs_smt_update(void)3344 void cpu_bugs_smt_update(void)
3345 {
3346 mutex_lock(&spec_ctrl_mutex);
3347
3348 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3349 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3350 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
3351
3352 switch (spectre_v2_user_stibp) {
3353 case SPECTRE_V2_USER_NONE:
3354 break;
3355 case SPECTRE_V2_USER_STRICT:
3356 case SPECTRE_V2_USER_STRICT_PREFERRED:
3357 update_stibp_strict();
3358 break;
3359 case SPECTRE_V2_USER_PRCTL:
3360 case SPECTRE_V2_USER_SECCOMP:
3361 update_indir_branch_cond();
3362 break;
3363 }
3364
3365 switch (mds_mitigation) {
3366 case MDS_MITIGATION_FULL:
3367 case MDS_MITIGATION_AUTO:
3368 case MDS_MITIGATION_VMWERV:
3369 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
3370 pr_warn_once(MDS_MSG_SMT);
3371 update_mds_branch_idle();
3372 break;
3373 case MDS_MITIGATION_OFF:
3374 break;
3375 }
3376
3377 switch (taa_mitigation) {
3378 case TAA_MITIGATION_VERW:
3379 case TAA_MITIGATION_AUTO:
3380 case TAA_MITIGATION_UCODE_NEEDED:
3381 if (sched_smt_active())
3382 pr_warn_once(TAA_MSG_SMT);
3383 break;
3384 case TAA_MITIGATION_TSX_DISABLED:
3385 case TAA_MITIGATION_OFF:
3386 break;
3387 }
3388
3389 switch (mmio_mitigation) {
3390 case MMIO_MITIGATION_VERW:
3391 case MMIO_MITIGATION_AUTO:
3392 case MMIO_MITIGATION_UCODE_NEEDED:
3393 if (sched_smt_active())
3394 pr_warn_once(MMIO_MSG_SMT);
3395 break;
3396 case MMIO_MITIGATION_OFF:
3397 break;
3398 }
3399
3400 switch (tsa_mitigation) {
3401 case TSA_MITIGATION_USER_KERNEL:
3402 case TSA_MITIGATION_VM:
3403 case TSA_MITIGATION_AUTO:
3404 case TSA_MITIGATION_FULL:
3405 /*
3406 * TSA-SQ can potentially lead to info leakage between
3407 * SMT threads.
3408 */
3409 if (sched_smt_active())
3410 static_branch_enable(&cpu_buf_idle_clear);
3411 else
3412 static_branch_disable(&cpu_buf_idle_clear);
3413 break;
3414 case TSA_MITIGATION_NONE:
3415 case TSA_MITIGATION_UCODE_NEEDED:
3416 break;
3417 }
3418
3419 switch (vmscape_mitigation) {
3420 case VMSCAPE_MITIGATION_NONE:
3421 case VMSCAPE_MITIGATION_AUTO:
3422 break;
3423 case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
3424 case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
3425 /*
3426 * Hypervisors can be attacked across-threads, warn for SMT when
3427 * STIBP is not already enabled system-wide.
3428 *
3429 * Intel eIBRS (!AUTOIBRS) implies STIBP on.
3430 */
3431 if (!sched_smt_active() ||
3432 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3433 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
3434 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3435 !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
3436 break;
3437 pr_warn_once(VMSCAPE_MSG_SMT);
3438 break;
3439 }
3440
3441 mutex_unlock(&spec_ctrl_mutex);
3442 }
3443
3444 #ifdef CONFIG_SYSFS
3445
3446 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
3447
3448 #if IS_ENABLED(CONFIG_KVM_INTEL)
3449 static const char * const l1tf_vmx_states[] = {
3450 [VMENTER_L1D_FLUSH_AUTO] = "auto",
3451 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
3452 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
3453 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
3454 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
3455 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
3456 };
3457
l1tf_show_state(char * buf)3458 static ssize_t l1tf_show_state(char *buf)
3459 {
3460 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
3461 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3462
3463 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
3464 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
3465 sched_smt_active())) {
3466 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
3467 l1tf_vmx_states[l1tf_vmx_mitigation]);
3468 }
3469
3470 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
3471 l1tf_vmx_states[l1tf_vmx_mitigation],
3472 sched_smt_active() ? "vulnerable" : "disabled");
3473 }
3474
itlb_multihit_show_state(char * buf)3475 static ssize_t itlb_multihit_show_state(char *buf)
3476 {
3477 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
3478 !boot_cpu_has(X86_FEATURE_VMX))
3479 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
3480 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
3481 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
3482 else if (itlb_multihit_kvm_mitigation)
3483 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
3484 else
3485 return sysfs_emit(buf, "KVM: Vulnerable\n");
3486 }
3487 #else
l1tf_show_state(char * buf)3488 static ssize_t l1tf_show_state(char *buf)
3489 {
3490 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3491 }
3492
itlb_multihit_show_state(char * buf)3493 static ssize_t itlb_multihit_show_state(char *buf)
3494 {
3495 return sysfs_emit(buf, "Processor vulnerable\n");
3496 }
3497 #endif
3498
mds_show_state(char * buf)3499 static ssize_t mds_show_state(char *buf)
3500 {
3501 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3502 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3503 mds_strings[mds_mitigation]);
3504 }
3505
3506 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
3507 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3508 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
3509 sched_smt_active() ? "mitigated" : "disabled"));
3510 }
3511
3512 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3513 sched_smt_active() ? "vulnerable" : "disabled");
3514 }
3515
tsx_async_abort_show_state(char * buf)3516 static ssize_t tsx_async_abort_show_state(char *buf)
3517 {
3518 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
3519 (taa_mitigation == TAA_MITIGATION_OFF))
3520 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
3521
3522 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3523 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3524 taa_strings[taa_mitigation]);
3525 }
3526
3527 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
3528 sched_smt_active() ? "vulnerable" : "disabled");
3529 }
3530
mmio_stale_data_show_state(char * buf)3531 static ssize_t mmio_stale_data_show_state(char *buf)
3532 {
3533 if (mmio_mitigation == MMIO_MITIGATION_OFF)
3534 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
3535
3536 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3537 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3538 mmio_strings[mmio_mitigation]);
3539 }
3540
3541 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
3542 sched_smt_active() ? "vulnerable" : "disabled");
3543 }
3544
rfds_show_state(char * buf)3545 static ssize_t rfds_show_state(char *buf)
3546 {
3547 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
3548 }
3549
old_microcode_show_state(char * buf)3550 static ssize_t old_microcode_show_state(char *buf)
3551 {
3552 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
3553 return sysfs_emit(buf, "Unknown: running under hypervisor");
3554
3555 return sysfs_emit(buf, "Vulnerable\n");
3556 }
3557
its_show_state(char * buf)3558 static ssize_t its_show_state(char *buf)
3559 {
3560 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
3561 }
3562
stibp_state(void)3563 static char *stibp_state(void)
3564 {
3565 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3566 !boot_cpu_has(X86_FEATURE_AUTOIBRS))
3567 return "";
3568
3569 switch (spectre_v2_user_stibp) {
3570 case SPECTRE_V2_USER_NONE:
3571 return "; STIBP: disabled";
3572 case SPECTRE_V2_USER_STRICT:
3573 return "; STIBP: forced";
3574 case SPECTRE_V2_USER_STRICT_PREFERRED:
3575 return "; STIBP: always-on";
3576 case SPECTRE_V2_USER_PRCTL:
3577 case SPECTRE_V2_USER_SECCOMP:
3578 if (static_key_enabled(&switch_to_cond_stibp))
3579 return "; STIBP: conditional";
3580 }
3581 return "";
3582 }
3583
ibpb_state(void)3584 static char *ibpb_state(void)
3585 {
3586 if (boot_cpu_has(X86_FEATURE_IBPB)) {
3587 if (static_key_enabled(&switch_mm_always_ibpb))
3588 return "; IBPB: always-on";
3589 if (static_key_enabled(&switch_mm_cond_ibpb))
3590 return "; IBPB: conditional";
3591 return "; IBPB: disabled";
3592 }
3593 return "";
3594 }
3595
pbrsb_eibrs_state(void)3596 static char *pbrsb_eibrs_state(void)
3597 {
3598 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
3599 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
3600 boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
3601 return "; PBRSB-eIBRS: SW sequence";
3602 else
3603 return "; PBRSB-eIBRS: Vulnerable";
3604 } else {
3605 return "; PBRSB-eIBRS: Not affected";
3606 }
3607 }
3608
spectre_bhi_state(void)3609 static const char *spectre_bhi_state(void)
3610 {
3611 if (!boot_cpu_has_bug(X86_BUG_BHI))
3612 return "; BHI: Not affected";
3613 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
3614 return "; BHI: BHI_DIS_S";
3615 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
3616 return "; BHI: SW loop, KVM: SW loop";
3617 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
3618 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
3619 rrsba_disabled)
3620 return "; BHI: Retpoline";
3621 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
3622 return "; BHI: Vulnerable, KVM: SW loop";
3623
3624 return "; BHI: Vulnerable";
3625 }
3626
spectre_v2_show_state(char * buf)3627 static ssize_t spectre_v2_show_state(char *buf)
3628 {
3629 if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
3630 return sysfs_emit(buf, "Vulnerable: LFENCE\n");
3631
3632 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
3633 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
3634
3635 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3636 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3637 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
3638
3639 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
3640 spectre_v2_strings[spectre_v2_enabled],
3641 ibpb_state(),
3642 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
3643 stibp_state(),
3644 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
3645 pbrsb_eibrs_state(),
3646 spectre_bhi_state(),
3647 /* this should always be at the end */
3648 spectre_v2_module_string());
3649 }
3650
srbds_show_state(char * buf)3651 static ssize_t srbds_show_state(char *buf)
3652 {
3653 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
3654 }
3655
retbleed_show_state(char * buf)3656 static ssize_t retbleed_show_state(char *buf)
3657 {
3658 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
3659 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
3660 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
3661 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
3662 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
3663
3664 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
3665 !sched_smt_active() ? "disabled" :
3666 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3667 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
3668 "enabled with STIBP protection" : "vulnerable");
3669 }
3670
3671 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
3672 }
3673
srso_show_state(char * buf)3674 static ssize_t srso_show_state(char *buf)
3675 {
3676 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
3677 }
3678
gds_show_state(char * buf)3679 static ssize_t gds_show_state(char *buf)
3680 {
3681 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
3682 }
3683
tsa_show_state(char * buf)3684 static ssize_t tsa_show_state(char *buf)
3685 {
3686 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
3687 }
3688
vmscape_show_state(char * buf)3689 static ssize_t vmscape_show_state(char *buf)
3690 {
3691 return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
3692 }
3693
cpu_show_common(struct device * dev,struct device_attribute * attr,char * buf,unsigned int bug)3694 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
3695 char *buf, unsigned int bug)
3696 {
3697 if (!boot_cpu_has_bug(bug))
3698 return sysfs_emit(buf, "Not affected\n");
3699
3700 switch (bug) {
3701 case X86_BUG_CPU_MELTDOWN:
3702 if (boot_cpu_has(X86_FEATURE_PTI))
3703 return sysfs_emit(buf, "Mitigation: PTI\n");
3704
3705 if (hypervisor_is_type(X86_HYPER_XEN_PV))
3706 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
3707
3708 break;
3709
3710 case X86_BUG_SPECTRE_V1:
3711 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
3712
3713 case X86_BUG_SPECTRE_V2:
3714 return spectre_v2_show_state(buf);
3715
3716 case X86_BUG_SPEC_STORE_BYPASS:
3717 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
3718
3719 case X86_BUG_L1TF:
3720 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
3721 return l1tf_show_state(buf);
3722 break;
3723
3724 case X86_BUG_MDS:
3725 return mds_show_state(buf);
3726
3727 case X86_BUG_TAA:
3728 return tsx_async_abort_show_state(buf);
3729
3730 case X86_BUG_ITLB_MULTIHIT:
3731 return itlb_multihit_show_state(buf);
3732
3733 case X86_BUG_SRBDS:
3734 return srbds_show_state(buf);
3735
3736 case X86_BUG_MMIO_STALE_DATA:
3737 return mmio_stale_data_show_state(buf);
3738
3739 case X86_BUG_RETBLEED:
3740 return retbleed_show_state(buf);
3741
3742 case X86_BUG_SRSO:
3743 return srso_show_state(buf);
3744
3745 case X86_BUG_GDS:
3746 return gds_show_state(buf);
3747
3748 case X86_BUG_RFDS:
3749 return rfds_show_state(buf);
3750
3751 case X86_BUG_OLD_MICROCODE:
3752 return old_microcode_show_state(buf);
3753
3754 case X86_BUG_ITS:
3755 return its_show_state(buf);
3756
3757 case X86_BUG_TSA:
3758 return tsa_show_state(buf);
3759
3760 case X86_BUG_VMSCAPE:
3761 return vmscape_show_state(buf);
3762
3763 default:
3764 break;
3765 }
3766
3767 return sysfs_emit(buf, "Vulnerable\n");
3768 }
3769
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)3770 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
3771 {
3772 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
3773 }
3774
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)3775 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
3776 {
3777 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
3778 }
3779
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)3780 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
3781 {
3782 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
3783 }
3784
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)3785 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
3786 {
3787 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
3788 }
3789
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)3790 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
3791 {
3792 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
3793 }
3794
cpu_show_mds(struct device * dev,struct device_attribute * attr,char * buf)3795 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
3796 {
3797 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
3798 }
3799
cpu_show_tsx_async_abort(struct device * dev,struct device_attribute * attr,char * buf)3800 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
3801 {
3802 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
3803 }
3804
cpu_show_itlb_multihit(struct device * dev,struct device_attribute * attr,char * buf)3805 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
3806 {
3807 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
3808 }
3809
cpu_show_srbds(struct device * dev,struct device_attribute * attr,char * buf)3810 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
3811 {
3812 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
3813 }
3814
cpu_show_mmio_stale_data(struct device * dev,struct device_attribute * attr,char * buf)3815 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
3816 {
3817 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3818 }
3819
cpu_show_retbleed(struct device * dev,struct device_attribute * attr,char * buf)3820 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3821 {
3822 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3823 }
3824
cpu_show_spec_rstack_overflow(struct device * dev,struct device_attribute * attr,char * buf)3825 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3826 {
3827 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3828 }
3829
cpu_show_gds(struct device * dev,struct device_attribute * attr,char * buf)3830 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3831 {
3832 return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3833 }
3834
cpu_show_reg_file_data_sampling(struct device * dev,struct device_attribute * attr,char * buf)3835 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3836 {
3837 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3838 }
3839
cpu_show_old_microcode(struct device * dev,struct device_attribute * attr,char * buf)3840 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
3841 {
3842 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
3843 }
3844
cpu_show_indirect_target_selection(struct device * dev,struct device_attribute * attr,char * buf)3845 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
3846 {
3847 return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
3848 }
3849
cpu_show_tsa(struct device * dev,struct device_attribute * attr,char * buf)3850 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
3851 {
3852 return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
3853 }
3854
cpu_show_vmscape(struct device * dev,struct device_attribute * attr,char * buf)3855 ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
3856 {
3857 return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
3858 }
3859 #endif
3860
__warn_thunk(void)3861 void __warn_thunk(void)
3862 {
3863 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3864 }
3865