1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34
35 #include "cpu.h"
36
37 /*
38 * Speculation Vulnerability Handling
39 *
40 * Each vulnerability is handled with the following functions:
41 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
42 * take into account all relevant command line
43 * options.
44 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
45 * selected a mitigation, in case the selection
46 * may want to change based on other choices
47 * made. This function is optional.
48 * <vuln>_apply_mitigation() -- Enable the selected mitigation.
49 *
50 * The compile-time mitigation in all cases should be AUTO. An explicit
51 * command-line option can override AUTO. If no such option is
52 * provided, <vuln>_select_mitigation() will override AUTO to the best
53 * mitigation option.
54 */
55
56 static void __init spectre_v1_select_mitigation(void);
57 static void __init spectre_v1_apply_mitigation(void);
58 static void __init spectre_v2_select_mitigation(void);
59 static void __init spectre_v2_update_mitigation(void);
60 static void __init spectre_v2_apply_mitigation(void);
61 static void __init retbleed_select_mitigation(void);
62 static void __init retbleed_update_mitigation(void);
63 static void __init retbleed_apply_mitigation(void);
64 static void __init spectre_v2_user_select_mitigation(void);
65 static void __init spectre_v2_user_update_mitigation(void);
66 static void __init spectre_v2_user_apply_mitigation(void);
67 static void __init ssb_select_mitigation(void);
68 static void __init ssb_apply_mitigation(void);
69 static void __init l1tf_select_mitigation(void);
70 static void __init l1tf_apply_mitigation(void);
71 static void __init mds_select_mitigation(void);
72 static void __init mds_update_mitigation(void);
73 static void __init mds_apply_mitigation(void);
74 static void __init taa_select_mitigation(void);
75 static void __init taa_update_mitigation(void);
76 static void __init taa_apply_mitigation(void);
77 static void __init mmio_select_mitigation(void);
78 static void __init mmio_update_mitigation(void);
79 static void __init mmio_apply_mitigation(void);
80 static void __init rfds_select_mitigation(void);
81 static void __init rfds_update_mitigation(void);
82 static void __init rfds_apply_mitigation(void);
83 static void __init srbds_select_mitigation(void);
84 static void __init srbds_apply_mitigation(void);
85 static void __init l1d_flush_select_mitigation(void);
86 static void __init srso_select_mitigation(void);
87 static void __init srso_update_mitigation(void);
88 static void __init srso_apply_mitigation(void);
89 static void __init gds_select_mitigation(void);
90 static void __init gds_apply_mitigation(void);
91 static void __init bhi_select_mitigation(void);
92 static void __init bhi_update_mitigation(void);
93 static void __init bhi_apply_mitigation(void);
94 static void __init its_select_mitigation(void);
95 static void __init its_update_mitigation(void);
96 static void __init its_apply_mitigation(void);
97 static void __init tsa_select_mitigation(void);
98 static void __init tsa_apply_mitigation(void);
99
100 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
101 u64 x86_spec_ctrl_base;
102 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
103
104 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
105 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
106 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
107
108 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
109
110 static u64 __ro_after_init x86_arch_cap_msr;
111
112 static DEFINE_MUTEX(spec_ctrl_mutex);
113
114 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
115
set_return_thunk(void * thunk)116 static void __init set_return_thunk(void *thunk)
117 {
118 x86_return_thunk = thunk;
119
120 pr_info("active return thunk: %ps\n", thunk);
121 }
122
123 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
update_spec_ctrl(u64 val)124 static void update_spec_ctrl(u64 val)
125 {
126 this_cpu_write(x86_spec_ctrl_current, val);
127 wrmsrq(MSR_IA32_SPEC_CTRL, val);
128 }
129
130 /*
131 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
132 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
133 */
update_spec_ctrl_cond(u64 val)134 void update_spec_ctrl_cond(u64 val)
135 {
136 if (this_cpu_read(x86_spec_ctrl_current) == val)
137 return;
138
139 this_cpu_write(x86_spec_ctrl_current, val);
140
141 /*
142 * When KERNEL_IBRS this MSR is written on return-to-user, unless
143 * forced the update can be delayed until that time.
144 */
145 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
146 wrmsrq(MSR_IA32_SPEC_CTRL, val);
147 }
148
spec_ctrl_current(void)149 noinstr u64 spec_ctrl_current(void)
150 {
151 return this_cpu_read(x86_spec_ctrl_current);
152 }
153 EXPORT_SYMBOL_GPL(spec_ctrl_current);
154
155 /*
156 * AMD specific MSR info for Speculative Store Bypass control.
157 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
158 */
159 u64 __ro_after_init x86_amd_ls_cfg_base;
160 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
161
162 /* Control conditional STIBP in switch_to() */
163 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
164 /* Control conditional IBPB in switch_mm() */
165 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
166 /* Control unconditional IBPB in switch_mm() */
167 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
168
169 /* Control IBPB on vCPU load */
170 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
171 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
172
173 /* Control CPU buffer clear before idling (halt, mwait) */
174 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
175 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
176
177 /*
178 * Controls whether l1d flush based mitigations are enabled,
179 * based on hw features and admin setting via boot parameter
180 * defaults to false
181 */
182 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
183
184 /*
185 * Controls CPU Fill buffer clear before VMenter. This is a subset of
186 * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
187 * mitigation is required.
188 */
189 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
190 EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
191
192 #undef pr_fmt
193 #define pr_fmt(fmt) "mitigations: " fmt
194
cpu_print_attack_vectors(void)195 static void __init cpu_print_attack_vectors(void)
196 {
197 pr_info("Enabled attack vectors: ");
198
199 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
200 pr_cont("user_kernel, ");
201
202 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER))
203 pr_cont("user_user, ");
204
205 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST))
206 pr_cont("guest_host, ");
207
208 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST))
209 pr_cont("guest_guest, ");
210
211 pr_cont("SMT mitigations: ");
212
213 switch (smt_mitigations) {
214 case SMT_MITIGATIONS_OFF:
215 pr_cont("off\n");
216 break;
217 case SMT_MITIGATIONS_AUTO:
218 pr_cont("auto\n");
219 break;
220 case SMT_MITIGATIONS_ON:
221 pr_cont("on\n");
222 }
223 }
224
cpu_select_mitigations(void)225 void __init cpu_select_mitigations(void)
226 {
227 /*
228 * Read the SPEC_CTRL MSR to account for reserved bits which may
229 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
230 * init code as it is not enumerated and depends on the family.
231 */
232 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
233 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
234
235 /*
236 * Previously running kernel (kexec), may have some controls
237 * turned ON. Clear them and let the mitigations setup below
238 * rediscover them based on configuration.
239 */
240 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
241 }
242
243 x86_arch_cap_msr = x86_read_arch_cap_msr();
244
245 cpu_print_attack_vectors();
246
247 /* Select the proper CPU mitigations before patching alternatives: */
248 spectre_v1_select_mitigation();
249 spectre_v2_select_mitigation();
250 retbleed_select_mitigation();
251 spectre_v2_user_select_mitigation();
252 ssb_select_mitigation();
253 l1tf_select_mitigation();
254 mds_select_mitigation();
255 taa_select_mitigation();
256 mmio_select_mitigation();
257 rfds_select_mitigation();
258 srbds_select_mitigation();
259 l1d_flush_select_mitigation();
260 srso_select_mitigation();
261 gds_select_mitigation();
262 its_select_mitigation();
263 bhi_select_mitigation();
264 tsa_select_mitigation();
265
266 /*
267 * After mitigations are selected, some may need to update their
268 * choices.
269 */
270 spectre_v2_update_mitigation();
271 /*
272 * retbleed_update_mitigation() relies on the state set by
273 * spectre_v2_update_mitigation(); specifically it wants to know about
274 * spectre_v2=ibrs.
275 */
276 retbleed_update_mitigation();
277 /*
278 * its_update_mitigation() depends on spectre_v2_update_mitigation()
279 * and retbleed_update_mitigation().
280 */
281 its_update_mitigation();
282
283 /*
284 * spectre_v2_user_update_mitigation() depends on
285 * retbleed_update_mitigation(), specifically the STIBP
286 * selection is forced for UNRET or IBPB.
287 */
288 spectre_v2_user_update_mitigation();
289 mds_update_mitigation();
290 taa_update_mitigation();
291 mmio_update_mitigation();
292 rfds_update_mitigation();
293 bhi_update_mitigation();
294 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
295 srso_update_mitigation();
296
297 spectre_v1_apply_mitigation();
298 spectre_v2_apply_mitigation();
299 retbleed_apply_mitigation();
300 spectre_v2_user_apply_mitigation();
301 ssb_apply_mitigation();
302 l1tf_apply_mitigation();
303 mds_apply_mitigation();
304 taa_apply_mitigation();
305 mmio_apply_mitigation();
306 rfds_apply_mitigation();
307 srbds_apply_mitigation();
308 srso_apply_mitigation();
309 gds_apply_mitigation();
310 its_apply_mitigation();
311 bhi_apply_mitigation();
312 tsa_apply_mitigation();
313 }
314
315 /*
316 * NOTE: This function is *only* called for SVM, since Intel uses
317 * MSR_IA32_SPEC_CTRL for SSBD.
318 */
319 void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl,bool setguest)320 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
321 {
322 u64 guestval, hostval;
323 struct thread_info *ti = current_thread_info();
324
325 /*
326 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
327 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
328 */
329 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
330 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
331 return;
332
333 /*
334 * If the host has SSBD mitigation enabled, force it in the host's
335 * virtual MSR value. If its not permanently enabled, evaluate
336 * current's TIF_SSBD thread flag.
337 */
338 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
339 hostval = SPEC_CTRL_SSBD;
340 else
341 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
342
343 /* Sanitize the guest value */
344 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
345
346 if (hostval != guestval) {
347 unsigned long tif;
348
349 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
350 ssbd_spec_ctrl_to_tif(hostval);
351
352 speculation_ctrl_update(tif);
353 }
354 }
355 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
356
x86_amd_ssb_disable(void)357 static void x86_amd_ssb_disable(void)
358 {
359 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
360
361 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
362 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
363 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
364 wrmsrq(MSR_AMD64_LS_CFG, msrval);
365 }
366
367 #undef pr_fmt
368 #define pr_fmt(fmt) "MDS: " fmt
369
370 /*
371 * Returns true if vulnerability should be mitigated based on the
372 * selected attack vector controls.
373 *
374 * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
375 */
should_mitigate_vuln(unsigned int bug)376 static bool __init should_mitigate_vuln(unsigned int bug)
377 {
378 switch (bug) {
379 /*
380 * The only runtime-selected spectre_v1 mitigations in the kernel are
381 * related to SWAPGS protection on kernel entry. Therefore, protection
382 * is only required for the user->kernel attack vector.
383 */
384 case X86_BUG_SPECTRE_V1:
385 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL);
386
387 case X86_BUG_SPECTRE_V2:
388 case X86_BUG_RETBLEED:
389 case X86_BUG_SRSO:
390 case X86_BUG_L1TF:
391 case X86_BUG_ITS:
392 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
393 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
394
395 case X86_BUG_SPECTRE_V2_USER:
396 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
397 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
398
399 /*
400 * All the vulnerabilities below allow potentially leaking data
401 * across address spaces. Therefore, mitigation is required for
402 * any of these 4 attack vectors.
403 */
404 case X86_BUG_MDS:
405 case X86_BUG_TAA:
406 case X86_BUG_MMIO_STALE_DATA:
407 case X86_BUG_RFDS:
408 case X86_BUG_SRBDS:
409 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
410 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
411 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
412 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
413
414 case X86_BUG_GDS:
415 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
416 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
417 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
418 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
419 (smt_mitigations != SMT_MITIGATIONS_OFF);
420 default:
421 WARN(1, "Unknown bug %x\n", bug);
422 return false;
423 }
424 }
425
426 /* Default mitigation for MDS-affected CPUs */
427 static enum mds_mitigations mds_mitigation __ro_after_init =
428 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
429 static bool mds_nosmt __ro_after_init = false;
430
431 static const char * const mds_strings[] = {
432 [MDS_MITIGATION_OFF] = "Vulnerable",
433 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
434 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
435 };
436
437 enum taa_mitigations {
438 TAA_MITIGATION_OFF,
439 TAA_MITIGATION_AUTO,
440 TAA_MITIGATION_UCODE_NEEDED,
441 TAA_MITIGATION_VERW,
442 TAA_MITIGATION_TSX_DISABLED,
443 };
444
445 /* Default mitigation for TAA-affected CPUs */
446 static enum taa_mitigations taa_mitigation __ro_after_init =
447 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
448
449 enum mmio_mitigations {
450 MMIO_MITIGATION_OFF,
451 MMIO_MITIGATION_AUTO,
452 MMIO_MITIGATION_UCODE_NEEDED,
453 MMIO_MITIGATION_VERW,
454 };
455
456 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
457 static enum mmio_mitigations mmio_mitigation __ro_after_init =
458 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
459
460 enum rfds_mitigations {
461 RFDS_MITIGATION_OFF,
462 RFDS_MITIGATION_AUTO,
463 RFDS_MITIGATION_VERW,
464 RFDS_MITIGATION_UCODE_NEEDED,
465 };
466
467 /* Default mitigation for Register File Data Sampling */
468 static enum rfds_mitigations rfds_mitigation __ro_after_init =
469 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
470
471 /*
472 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
473 * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
474 */
475 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
476
mds_select_mitigation(void)477 static void __init mds_select_mitigation(void)
478 {
479 if (!boot_cpu_has_bug(X86_BUG_MDS)) {
480 mds_mitigation = MDS_MITIGATION_OFF;
481 return;
482 }
483
484 if (mds_mitigation == MDS_MITIGATION_AUTO) {
485 if (should_mitigate_vuln(X86_BUG_MDS))
486 mds_mitigation = MDS_MITIGATION_FULL;
487 else
488 mds_mitigation = MDS_MITIGATION_OFF;
489 }
490
491 if (mds_mitigation == MDS_MITIGATION_OFF)
492 return;
493
494 verw_clear_cpu_buf_mitigation_selected = true;
495 }
496
mds_update_mitigation(void)497 static void __init mds_update_mitigation(void)
498 {
499 if (!boot_cpu_has_bug(X86_BUG_MDS))
500 return;
501
502 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
503 if (verw_clear_cpu_buf_mitigation_selected)
504 mds_mitigation = MDS_MITIGATION_FULL;
505
506 if (mds_mitigation == MDS_MITIGATION_FULL) {
507 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
508 mds_mitigation = MDS_MITIGATION_VMWERV;
509 }
510
511 pr_info("%s\n", mds_strings[mds_mitigation]);
512 }
513
mds_apply_mitigation(void)514 static void __init mds_apply_mitigation(void)
515 {
516 if (mds_mitigation == MDS_MITIGATION_FULL ||
517 mds_mitigation == MDS_MITIGATION_VMWERV) {
518 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
519 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
520 (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
521 cpu_smt_disable(false);
522 }
523 }
524
mds_cmdline(char * str)525 static int __init mds_cmdline(char *str)
526 {
527 if (!boot_cpu_has_bug(X86_BUG_MDS))
528 return 0;
529
530 if (!str)
531 return -EINVAL;
532
533 if (!strcmp(str, "off"))
534 mds_mitigation = MDS_MITIGATION_OFF;
535 else if (!strcmp(str, "full"))
536 mds_mitigation = MDS_MITIGATION_FULL;
537 else if (!strcmp(str, "full,nosmt")) {
538 mds_mitigation = MDS_MITIGATION_FULL;
539 mds_nosmt = true;
540 }
541
542 return 0;
543 }
544 early_param("mds", mds_cmdline);
545
546 #undef pr_fmt
547 #define pr_fmt(fmt) "TAA: " fmt
548
549 static bool taa_nosmt __ro_after_init;
550
551 static const char * const taa_strings[] = {
552 [TAA_MITIGATION_OFF] = "Vulnerable",
553 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
554 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
555 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
556 };
557
taa_vulnerable(void)558 static bool __init taa_vulnerable(void)
559 {
560 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
561 }
562
taa_select_mitigation(void)563 static void __init taa_select_mitigation(void)
564 {
565 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
566 taa_mitigation = TAA_MITIGATION_OFF;
567 return;
568 }
569
570 /* TSX previously disabled by tsx=off */
571 if (!boot_cpu_has(X86_FEATURE_RTM)) {
572 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
573 return;
574 }
575
576 /* Microcode will be checked in taa_update_mitigation(). */
577 if (taa_mitigation == TAA_MITIGATION_AUTO) {
578 if (should_mitigate_vuln(X86_BUG_TAA))
579 taa_mitigation = TAA_MITIGATION_VERW;
580 else
581 taa_mitigation = TAA_MITIGATION_OFF;
582 }
583
584 if (taa_mitigation != TAA_MITIGATION_OFF)
585 verw_clear_cpu_buf_mitigation_selected = true;
586 }
587
taa_update_mitigation(void)588 static void __init taa_update_mitigation(void)
589 {
590 if (!taa_vulnerable())
591 return;
592
593 if (verw_clear_cpu_buf_mitigation_selected)
594 taa_mitigation = TAA_MITIGATION_VERW;
595
596 if (taa_mitigation == TAA_MITIGATION_VERW) {
597 /* Check if the requisite ucode is available. */
598 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
599 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
600
601 /*
602 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
603 * A microcode update fixes this behavior to clear CPU buffers. It also
604 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
605 * ARCH_CAP_TSX_CTRL_MSR bit.
606 *
607 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
608 * update is required.
609 */
610 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
611 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
612 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
613 }
614
615 pr_info("%s\n", taa_strings[taa_mitigation]);
616 }
617
taa_apply_mitigation(void)618 static void __init taa_apply_mitigation(void)
619 {
620 if (taa_mitigation == TAA_MITIGATION_VERW ||
621 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
622 /*
623 * TSX is enabled, select alternate mitigation for TAA which is
624 * the same as MDS. Enable MDS static branch to clear CPU buffers.
625 *
626 * For guests that can't determine whether the correct microcode is
627 * present on host, enable the mitigation for UCODE_NEEDED as well.
628 */
629 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
630
631 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
632 cpu_smt_disable(false);
633 }
634 }
635
tsx_async_abort_parse_cmdline(char * str)636 static int __init tsx_async_abort_parse_cmdline(char *str)
637 {
638 if (!boot_cpu_has_bug(X86_BUG_TAA))
639 return 0;
640
641 if (!str)
642 return -EINVAL;
643
644 if (!strcmp(str, "off")) {
645 taa_mitigation = TAA_MITIGATION_OFF;
646 } else if (!strcmp(str, "full")) {
647 taa_mitigation = TAA_MITIGATION_VERW;
648 } else if (!strcmp(str, "full,nosmt")) {
649 taa_mitigation = TAA_MITIGATION_VERW;
650 taa_nosmt = true;
651 }
652
653 return 0;
654 }
655 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
656
657 #undef pr_fmt
658 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
659
660 static bool mmio_nosmt __ro_after_init = false;
661
662 static const char * const mmio_strings[] = {
663 [MMIO_MITIGATION_OFF] = "Vulnerable",
664 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
665 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
666 };
667
mmio_select_mitigation(void)668 static void __init mmio_select_mitigation(void)
669 {
670 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
671 cpu_mitigations_off()) {
672 mmio_mitigation = MMIO_MITIGATION_OFF;
673 return;
674 }
675
676 /* Microcode will be checked in mmio_update_mitigation(). */
677 if (mmio_mitigation == MMIO_MITIGATION_AUTO) {
678 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA))
679 mmio_mitigation = MMIO_MITIGATION_VERW;
680 else
681 mmio_mitigation = MMIO_MITIGATION_OFF;
682 }
683
684 if (mmio_mitigation == MMIO_MITIGATION_OFF)
685 return;
686
687 /*
688 * Enable CPU buffer clear mitigation for host and VMM, if also affected
689 * by MDS or TAA.
690 */
691 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
692 verw_clear_cpu_buf_mitigation_selected = true;
693 }
694
mmio_update_mitigation(void)695 static void __init mmio_update_mitigation(void)
696 {
697 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
698 return;
699
700 if (verw_clear_cpu_buf_mitigation_selected)
701 mmio_mitigation = MMIO_MITIGATION_VERW;
702
703 if (mmio_mitigation == MMIO_MITIGATION_VERW) {
704 /*
705 * Check if the system has the right microcode.
706 *
707 * CPU Fill buffer clear mitigation is enumerated by either an explicit
708 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
709 * affected systems.
710 */
711 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
712 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
713 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
714 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
715 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
716 }
717
718 pr_info("%s\n", mmio_strings[mmio_mitigation]);
719 }
720
mmio_apply_mitigation(void)721 static void __init mmio_apply_mitigation(void)
722 {
723 if (mmio_mitigation == MMIO_MITIGATION_OFF)
724 return;
725
726 /*
727 * Only enable the VMM mitigation if the CPU buffer clear mitigation is
728 * not being used.
729 */
730 if (verw_clear_cpu_buf_mitigation_selected) {
731 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
732 static_branch_disable(&cpu_buf_vm_clear);
733 } else {
734 static_branch_enable(&cpu_buf_vm_clear);
735 }
736
737 /*
738 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
739 * be propagated to uncore buffers, clearing the Fill buffers on idle
740 * is required irrespective of SMT state.
741 */
742 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
743 static_branch_enable(&cpu_buf_idle_clear);
744
745 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
746 cpu_smt_disable(false);
747 }
748
mmio_stale_data_parse_cmdline(char * str)749 static int __init mmio_stale_data_parse_cmdline(char *str)
750 {
751 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
752 return 0;
753
754 if (!str)
755 return -EINVAL;
756
757 if (!strcmp(str, "off")) {
758 mmio_mitigation = MMIO_MITIGATION_OFF;
759 } else if (!strcmp(str, "full")) {
760 mmio_mitigation = MMIO_MITIGATION_VERW;
761 } else if (!strcmp(str, "full,nosmt")) {
762 mmio_mitigation = MMIO_MITIGATION_VERW;
763 mmio_nosmt = true;
764 }
765
766 return 0;
767 }
768 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
769
770 #undef pr_fmt
771 #define pr_fmt(fmt) "Register File Data Sampling: " fmt
772
773 static const char * const rfds_strings[] = {
774 [RFDS_MITIGATION_OFF] = "Vulnerable",
775 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
776 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
777 };
778
verw_clears_cpu_reg_file(void)779 static inline bool __init verw_clears_cpu_reg_file(void)
780 {
781 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
782 }
783
rfds_select_mitigation(void)784 static void __init rfds_select_mitigation(void)
785 {
786 if (!boot_cpu_has_bug(X86_BUG_RFDS)) {
787 rfds_mitigation = RFDS_MITIGATION_OFF;
788 return;
789 }
790
791 if (rfds_mitigation == RFDS_MITIGATION_AUTO) {
792 if (should_mitigate_vuln(X86_BUG_RFDS))
793 rfds_mitigation = RFDS_MITIGATION_VERW;
794 else
795 rfds_mitigation = RFDS_MITIGATION_OFF;
796 }
797
798 if (rfds_mitigation == RFDS_MITIGATION_OFF)
799 return;
800
801 if (verw_clears_cpu_reg_file())
802 verw_clear_cpu_buf_mitigation_selected = true;
803 }
804
rfds_update_mitigation(void)805 static void __init rfds_update_mitigation(void)
806 {
807 if (!boot_cpu_has_bug(X86_BUG_RFDS))
808 return;
809
810 if (verw_clear_cpu_buf_mitigation_selected)
811 rfds_mitigation = RFDS_MITIGATION_VERW;
812
813 if (rfds_mitigation == RFDS_MITIGATION_VERW) {
814 if (!verw_clears_cpu_reg_file())
815 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
816 }
817
818 pr_info("%s\n", rfds_strings[rfds_mitigation]);
819 }
820
rfds_apply_mitigation(void)821 static void __init rfds_apply_mitigation(void)
822 {
823 if (rfds_mitigation == RFDS_MITIGATION_VERW)
824 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
825 }
826
rfds_parse_cmdline(char * str)827 static __init int rfds_parse_cmdline(char *str)
828 {
829 if (!str)
830 return -EINVAL;
831
832 if (!boot_cpu_has_bug(X86_BUG_RFDS))
833 return 0;
834
835 if (!strcmp(str, "off"))
836 rfds_mitigation = RFDS_MITIGATION_OFF;
837 else if (!strcmp(str, "on"))
838 rfds_mitigation = RFDS_MITIGATION_VERW;
839
840 return 0;
841 }
842 early_param("reg_file_data_sampling", rfds_parse_cmdline);
843
844 #undef pr_fmt
845 #define pr_fmt(fmt) "SRBDS: " fmt
846
847 enum srbds_mitigations {
848 SRBDS_MITIGATION_OFF,
849 SRBDS_MITIGATION_AUTO,
850 SRBDS_MITIGATION_UCODE_NEEDED,
851 SRBDS_MITIGATION_FULL,
852 SRBDS_MITIGATION_TSX_OFF,
853 SRBDS_MITIGATION_HYPERVISOR,
854 };
855
856 static enum srbds_mitigations srbds_mitigation __ro_after_init =
857 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
858
859 static const char * const srbds_strings[] = {
860 [SRBDS_MITIGATION_OFF] = "Vulnerable",
861 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
862 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
863 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
864 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
865 };
866
867 static bool srbds_off;
868
update_srbds_msr(void)869 void update_srbds_msr(void)
870 {
871 u64 mcu_ctrl;
872
873 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
874 return;
875
876 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
877 return;
878
879 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
880 return;
881
882 /*
883 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
884 * being disabled and it hasn't received the SRBDS MSR microcode.
885 */
886 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
887 return;
888
889 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
890
891 switch (srbds_mitigation) {
892 case SRBDS_MITIGATION_OFF:
893 case SRBDS_MITIGATION_TSX_OFF:
894 mcu_ctrl |= RNGDS_MITG_DIS;
895 break;
896 case SRBDS_MITIGATION_FULL:
897 mcu_ctrl &= ~RNGDS_MITG_DIS;
898 break;
899 default:
900 break;
901 }
902
903 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
904 }
905
srbds_select_mitigation(void)906 static void __init srbds_select_mitigation(void)
907 {
908 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) {
909 srbds_mitigation = SRBDS_MITIGATION_OFF;
910 return;
911 }
912
913 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) {
914 if (should_mitigate_vuln(X86_BUG_SRBDS))
915 srbds_mitigation = SRBDS_MITIGATION_FULL;
916 else {
917 srbds_mitigation = SRBDS_MITIGATION_OFF;
918 return;
919 }
920 }
921
922 /*
923 * Check to see if this is one of the MDS_NO systems supporting TSX that
924 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
925 * by Processor MMIO Stale Data vulnerability.
926 */
927 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
928 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
929 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
930 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
931 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
932 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
933 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
934 else if (srbds_off)
935 srbds_mitigation = SRBDS_MITIGATION_OFF;
936
937 pr_info("%s\n", srbds_strings[srbds_mitigation]);
938 }
939
srbds_apply_mitigation(void)940 static void __init srbds_apply_mitigation(void)
941 {
942 update_srbds_msr();
943 }
944
srbds_parse_cmdline(char * str)945 static int __init srbds_parse_cmdline(char *str)
946 {
947 if (!str)
948 return -EINVAL;
949
950 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
951 return 0;
952
953 srbds_off = !strcmp(str, "off");
954 return 0;
955 }
956 early_param("srbds", srbds_parse_cmdline);
957
958 #undef pr_fmt
959 #define pr_fmt(fmt) "L1D Flush : " fmt
960
961 enum l1d_flush_mitigations {
962 L1D_FLUSH_OFF = 0,
963 L1D_FLUSH_ON,
964 };
965
966 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
967
l1d_flush_select_mitigation(void)968 static void __init l1d_flush_select_mitigation(void)
969 {
970 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
971 return;
972
973 static_branch_enable(&switch_mm_cond_l1d_flush);
974 pr_info("Conditional flush on switch_mm() enabled\n");
975 }
976
l1d_flush_parse_cmdline(char * str)977 static int __init l1d_flush_parse_cmdline(char *str)
978 {
979 if (!strcmp(str, "on"))
980 l1d_flush_mitigation = L1D_FLUSH_ON;
981
982 return 0;
983 }
984 early_param("l1d_flush", l1d_flush_parse_cmdline);
985
986 #undef pr_fmt
987 #define pr_fmt(fmt) "GDS: " fmt
988
989 enum gds_mitigations {
990 GDS_MITIGATION_OFF,
991 GDS_MITIGATION_AUTO,
992 GDS_MITIGATION_UCODE_NEEDED,
993 GDS_MITIGATION_FORCE,
994 GDS_MITIGATION_FULL,
995 GDS_MITIGATION_FULL_LOCKED,
996 GDS_MITIGATION_HYPERVISOR,
997 };
998
999 static enum gds_mitigations gds_mitigation __ro_after_init =
1000 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
1001
1002 static const char * const gds_strings[] = {
1003 [GDS_MITIGATION_OFF] = "Vulnerable",
1004 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1005 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
1006 [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
1007 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
1008 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
1009 };
1010
gds_ucode_mitigated(void)1011 bool gds_ucode_mitigated(void)
1012 {
1013 return (gds_mitigation == GDS_MITIGATION_FULL ||
1014 gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
1015 }
1016 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
1017
update_gds_msr(void)1018 void update_gds_msr(void)
1019 {
1020 u64 mcu_ctrl_after;
1021 u64 mcu_ctrl;
1022
1023 switch (gds_mitigation) {
1024 case GDS_MITIGATION_OFF:
1025 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1026 mcu_ctrl |= GDS_MITG_DIS;
1027 break;
1028 case GDS_MITIGATION_FULL_LOCKED:
1029 /*
1030 * The LOCKED state comes from the boot CPU. APs might not have
1031 * the same state. Make sure the mitigation is enabled on all
1032 * CPUs.
1033 */
1034 case GDS_MITIGATION_FULL:
1035 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1036 mcu_ctrl &= ~GDS_MITG_DIS;
1037 break;
1038 case GDS_MITIGATION_FORCE:
1039 case GDS_MITIGATION_UCODE_NEEDED:
1040 case GDS_MITIGATION_HYPERVISOR:
1041 case GDS_MITIGATION_AUTO:
1042 return;
1043 }
1044
1045 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1046
1047 /*
1048 * Check to make sure that the WRMSR value was not ignored. Writes to
1049 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
1050 * processor was not.
1051 */
1052 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
1053 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
1054 }
1055
gds_select_mitigation(void)1056 static void __init gds_select_mitigation(void)
1057 {
1058 u64 mcu_ctrl;
1059
1060 if (!boot_cpu_has_bug(X86_BUG_GDS))
1061 return;
1062
1063 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1064 gds_mitigation = GDS_MITIGATION_HYPERVISOR;
1065 return;
1066 }
1067
1068 /* Will verify below that mitigation _can_ be disabled */
1069 if (gds_mitigation == GDS_MITIGATION_AUTO) {
1070 if (should_mitigate_vuln(X86_BUG_GDS))
1071 gds_mitigation = GDS_MITIGATION_FULL;
1072 else {
1073 gds_mitigation = GDS_MITIGATION_OFF;
1074 return;
1075 }
1076 }
1077
1078 /* No microcode */
1079 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
1080 if (gds_mitigation != GDS_MITIGATION_FORCE)
1081 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
1082 return;
1083 }
1084
1085 /* Microcode has mitigation, use it */
1086 if (gds_mitigation == GDS_MITIGATION_FORCE)
1087 gds_mitigation = GDS_MITIGATION_FULL;
1088
1089 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1090 if (mcu_ctrl & GDS_MITG_LOCKED) {
1091 if (gds_mitigation == GDS_MITIGATION_OFF)
1092 pr_warn("Mitigation locked. Disable failed.\n");
1093
1094 /*
1095 * The mitigation is selected from the boot CPU. All other CPUs
1096 * _should_ have the same state. If the boot CPU isn't locked
1097 * but others are then update_gds_msr() will WARN() of the state
1098 * mismatch. If the boot CPU is locked update_gds_msr() will
1099 * ensure the other CPUs have the mitigation enabled.
1100 */
1101 gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
1102 }
1103 }
1104
gds_apply_mitigation(void)1105 static void __init gds_apply_mitigation(void)
1106 {
1107 if (!boot_cpu_has_bug(X86_BUG_GDS))
1108 return;
1109
1110 /* Microcode is present */
1111 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
1112 update_gds_msr();
1113 else if (gds_mitigation == GDS_MITIGATION_FORCE) {
1114 /*
1115 * This only needs to be done on the boot CPU so do it
1116 * here rather than in update_gds_msr()
1117 */
1118 setup_clear_cpu_cap(X86_FEATURE_AVX);
1119 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
1120 }
1121
1122 pr_info("%s\n", gds_strings[gds_mitigation]);
1123 }
1124
gds_parse_cmdline(char * str)1125 static int __init gds_parse_cmdline(char *str)
1126 {
1127 if (!str)
1128 return -EINVAL;
1129
1130 if (!boot_cpu_has_bug(X86_BUG_GDS))
1131 return 0;
1132
1133 if (!strcmp(str, "off"))
1134 gds_mitigation = GDS_MITIGATION_OFF;
1135 else if (!strcmp(str, "force"))
1136 gds_mitigation = GDS_MITIGATION_FORCE;
1137
1138 return 0;
1139 }
1140 early_param("gather_data_sampling", gds_parse_cmdline);
1141
1142 #undef pr_fmt
1143 #define pr_fmt(fmt) "Spectre V1 : " fmt
1144
1145 enum spectre_v1_mitigation {
1146 SPECTRE_V1_MITIGATION_NONE,
1147 SPECTRE_V1_MITIGATION_AUTO,
1148 };
1149
1150 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
1151 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
1152 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
1153
1154 static const char * const spectre_v1_strings[] = {
1155 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
1156 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
1157 };
1158
1159 /*
1160 * Does SMAP provide full mitigation against speculative kernel access to
1161 * userspace?
1162 */
smap_works_speculatively(void)1163 static bool smap_works_speculatively(void)
1164 {
1165 if (!boot_cpu_has(X86_FEATURE_SMAP))
1166 return false;
1167
1168 /*
1169 * On CPUs which are vulnerable to Meltdown, SMAP does not
1170 * prevent speculative access to user data in the L1 cache.
1171 * Consider SMAP to be non-functional as a mitigation on these
1172 * CPUs.
1173 */
1174 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
1175 return false;
1176
1177 return true;
1178 }
1179
spectre_v1_select_mitigation(void)1180 static void __init spectre_v1_select_mitigation(void)
1181 {
1182 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1183 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1184
1185 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1))
1186 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1187 }
1188
spectre_v1_apply_mitigation(void)1189 static void __init spectre_v1_apply_mitigation(void)
1190 {
1191 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1192 return;
1193
1194 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
1195 /*
1196 * With Spectre v1, a user can speculatively control either
1197 * path of a conditional swapgs with a user-controlled GS
1198 * value. The mitigation is to add lfences to both code paths.
1199 *
1200 * If FSGSBASE is enabled, the user can put a kernel address in
1201 * GS, in which case SMAP provides no protection.
1202 *
1203 * If FSGSBASE is disabled, the user can only put a user space
1204 * address in GS. That makes an attack harder, but still
1205 * possible if there's no SMAP protection.
1206 */
1207 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
1208 !smap_works_speculatively()) {
1209 /*
1210 * Mitigation can be provided from SWAPGS itself or
1211 * PTI as the CR3 write in the Meltdown mitigation
1212 * is serializing.
1213 *
1214 * If neither is there, mitigate with an LFENCE to
1215 * stop speculation through swapgs.
1216 */
1217 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
1218 !boot_cpu_has(X86_FEATURE_PTI))
1219 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
1220
1221 /*
1222 * Enable lfences in the kernel entry (non-swapgs)
1223 * paths, to prevent user entry from speculatively
1224 * skipping swapgs.
1225 */
1226 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
1227 }
1228 }
1229
1230 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1231 }
1232
nospectre_v1_cmdline(char * str)1233 static int __init nospectre_v1_cmdline(char *str)
1234 {
1235 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1236 return 0;
1237 }
1238 early_param("nospectre_v1", nospectre_v1_cmdline);
1239
1240 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
1241
1242 /* Depends on spectre_v2 mitigation selected already */
cdt_possible(enum spectre_v2_mitigation mode)1243 static inline bool cdt_possible(enum spectre_v2_mitigation mode)
1244 {
1245 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) ||
1246 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE))
1247 return false;
1248
1249 if (mode == SPECTRE_V2_RETPOLINE ||
1250 mode == SPECTRE_V2_EIBRS_RETPOLINE)
1251 return true;
1252
1253 return false;
1254 }
1255
1256 #undef pr_fmt
1257 #define pr_fmt(fmt) "RETBleed: " fmt
1258
1259 enum its_mitigation {
1260 ITS_MITIGATION_OFF,
1261 ITS_MITIGATION_AUTO,
1262 ITS_MITIGATION_VMEXIT_ONLY,
1263 ITS_MITIGATION_ALIGNED_THUNKS,
1264 ITS_MITIGATION_RETPOLINE_STUFF,
1265 };
1266
1267 static enum its_mitigation its_mitigation __ro_after_init =
1268 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
1269
1270 enum retbleed_mitigation {
1271 RETBLEED_MITIGATION_NONE,
1272 RETBLEED_MITIGATION_AUTO,
1273 RETBLEED_MITIGATION_UNRET,
1274 RETBLEED_MITIGATION_IBPB,
1275 RETBLEED_MITIGATION_IBRS,
1276 RETBLEED_MITIGATION_EIBRS,
1277 RETBLEED_MITIGATION_STUFF,
1278 };
1279
1280 static const char * const retbleed_strings[] = {
1281 [RETBLEED_MITIGATION_NONE] = "Vulnerable",
1282 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
1283 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
1284 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
1285 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
1286 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
1287 };
1288
1289 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
1290 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
1291
1292 static int __ro_after_init retbleed_nosmt = false;
1293
1294 enum srso_mitigation {
1295 SRSO_MITIGATION_NONE,
1296 SRSO_MITIGATION_AUTO,
1297 SRSO_MITIGATION_UCODE_NEEDED,
1298 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
1299 SRSO_MITIGATION_MICROCODE,
1300 SRSO_MITIGATION_NOSMT,
1301 SRSO_MITIGATION_SAFE_RET,
1302 SRSO_MITIGATION_IBPB,
1303 SRSO_MITIGATION_IBPB_ON_VMEXIT,
1304 SRSO_MITIGATION_BP_SPEC_REDUCE,
1305 };
1306
1307 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
1308
retbleed_parse_cmdline(char * str)1309 static int __init retbleed_parse_cmdline(char *str)
1310 {
1311 if (!str)
1312 return -EINVAL;
1313
1314 while (str) {
1315 char *next = strchr(str, ',');
1316 if (next) {
1317 *next = 0;
1318 next++;
1319 }
1320
1321 if (!strcmp(str, "off")) {
1322 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1323 } else if (!strcmp(str, "auto")) {
1324 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1325 } else if (!strcmp(str, "unret")) {
1326 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1327 } else if (!strcmp(str, "ibpb")) {
1328 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1329 } else if (!strcmp(str, "stuff")) {
1330 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1331 } else if (!strcmp(str, "nosmt")) {
1332 retbleed_nosmt = true;
1333 } else if (!strcmp(str, "force")) {
1334 setup_force_cpu_bug(X86_BUG_RETBLEED);
1335 } else {
1336 pr_err("Ignoring unknown retbleed option (%s).", str);
1337 }
1338
1339 str = next;
1340 }
1341
1342 return 0;
1343 }
1344 early_param("retbleed", retbleed_parse_cmdline);
1345
1346 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1347 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1348
retbleed_select_mitigation(void)1349 static void __init retbleed_select_mitigation(void)
1350 {
1351 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) {
1352 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1353 return;
1354 }
1355
1356 switch (retbleed_mitigation) {
1357 case RETBLEED_MITIGATION_UNRET:
1358 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1359 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1360 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1361 }
1362 break;
1363 case RETBLEED_MITIGATION_IBPB:
1364 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1365 pr_err("WARNING: CPU does not support IBPB.\n");
1366 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1367 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1368 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1369 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1370 }
1371 break;
1372 case RETBLEED_MITIGATION_STUFF:
1373 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1374 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1375 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1376 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1377 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
1378 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1379 }
1380 break;
1381 default:
1382 break;
1383 }
1384
1385 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
1386 return;
1387
1388 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) {
1389 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1390 return;
1391 }
1392
1393 /* Intel mitigation selected in retbleed_update_mitigation() */
1394 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1395 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1396 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1397 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1398 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1399 boot_cpu_has(X86_FEATURE_IBPB))
1400 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1401 else
1402 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1403 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1404 /* Final mitigation depends on spectre-v2 selection */
1405 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1406 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1407 else if (boot_cpu_has(X86_FEATURE_IBRS))
1408 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1409 else
1410 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1411 }
1412 }
1413
retbleed_update_mitigation(void)1414 static void __init retbleed_update_mitigation(void)
1415 {
1416 if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
1417 return;
1418
1419 /* ITS can also enable stuffing */
1420 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF)
1421 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1422
1423 /* If SRSO is using IBPB, that works for retbleed too */
1424 if (srso_mitigation == SRSO_MITIGATION_IBPB)
1425 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1426
1427 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF &&
1428 !cdt_possible(spectre_v2_enabled)) {
1429 pr_err("WARNING: retbleed=stuff depends on retpoline\n");
1430 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1431 }
1432
1433 /*
1434 * Let IBRS trump all on Intel without affecting the effects of the
1435 * retbleed= cmdline option except for call depth based stuffing
1436 */
1437 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1438 switch (spectre_v2_enabled) {
1439 case SPECTRE_V2_IBRS:
1440 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1441 break;
1442 case SPECTRE_V2_EIBRS:
1443 case SPECTRE_V2_EIBRS_RETPOLINE:
1444 case SPECTRE_V2_EIBRS_LFENCE:
1445 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1446 break;
1447 default:
1448 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1449 pr_err(RETBLEED_INTEL_MSG);
1450 }
1451 }
1452
1453 pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1454 }
1455
retbleed_apply_mitigation(void)1456 static void __init retbleed_apply_mitigation(void)
1457 {
1458 bool mitigate_smt = false;
1459
1460 switch (retbleed_mitigation) {
1461 case RETBLEED_MITIGATION_NONE:
1462 return;
1463
1464 case RETBLEED_MITIGATION_UNRET:
1465 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1466 setup_force_cpu_cap(X86_FEATURE_UNRET);
1467
1468 set_return_thunk(retbleed_return_thunk);
1469
1470 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1471 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1472 pr_err(RETBLEED_UNTRAIN_MSG);
1473
1474 mitigate_smt = true;
1475 break;
1476
1477 case RETBLEED_MITIGATION_IBPB:
1478 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1479 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1480 mitigate_smt = true;
1481
1482 /*
1483 * IBPB on entry already obviates the need for
1484 * software-based untraining so clear those in case some
1485 * other mitigation like SRSO has selected them.
1486 */
1487 setup_clear_cpu_cap(X86_FEATURE_UNRET);
1488 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1489
1490 /*
1491 * There is no need for RSB filling: write_ibpb() ensures
1492 * all predictions, including the RSB, are invalidated,
1493 * regardless of IBPB implementation.
1494 */
1495 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1496
1497 break;
1498
1499 case RETBLEED_MITIGATION_STUFF:
1500 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1501 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1502
1503 set_return_thunk(call_depth_return_thunk);
1504 break;
1505
1506 default:
1507 break;
1508 }
1509
1510 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1511 (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
1512 cpu_smt_disable(false);
1513 }
1514
1515 #undef pr_fmt
1516 #define pr_fmt(fmt) "ITS: " fmt
1517
1518 static const char * const its_strings[] = {
1519 [ITS_MITIGATION_OFF] = "Vulnerable",
1520 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
1521 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
1522 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
1523 };
1524
its_parse_cmdline(char * str)1525 static int __init its_parse_cmdline(char *str)
1526 {
1527 if (!str)
1528 return -EINVAL;
1529
1530 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
1531 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
1532 return 0;
1533 }
1534
1535 if (!strcmp(str, "off")) {
1536 its_mitigation = ITS_MITIGATION_OFF;
1537 } else if (!strcmp(str, "on")) {
1538 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1539 } else if (!strcmp(str, "force")) {
1540 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1541 setup_force_cpu_bug(X86_BUG_ITS);
1542 } else if (!strcmp(str, "vmexit")) {
1543 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
1544 } else if (!strcmp(str, "stuff")) {
1545 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1546 } else {
1547 pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
1548 }
1549
1550 return 0;
1551 }
1552 early_param("indirect_target_selection", its_parse_cmdline);
1553
its_select_mitigation(void)1554 static void __init its_select_mitigation(void)
1555 {
1556 if (!boot_cpu_has_bug(X86_BUG_ITS)) {
1557 its_mitigation = ITS_MITIGATION_OFF;
1558 return;
1559 }
1560
1561 if (its_mitigation == ITS_MITIGATION_AUTO) {
1562 if (should_mitigate_vuln(X86_BUG_ITS))
1563 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1564 else
1565 its_mitigation = ITS_MITIGATION_OFF;
1566 }
1567
1568 if (its_mitigation == ITS_MITIGATION_OFF)
1569 return;
1570
1571 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
1572 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
1573 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
1574 its_mitigation = ITS_MITIGATION_OFF;
1575 return;
1576 }
1577
1578 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
1579 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
1580 its_mitigation = ITS_MITIGATION_OFF;
1581 return;
1582 }
1583
1584 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1585 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1586 pr_err("RSB stuff mitigation not supported, using default\n");
1587 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1588 }
1589
1590 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
1591 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
1592 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1593 }
1594
its_update_mitigation(void)1595 static void __init its_update_mitigation(void)
1596 {
1597 if (!boot_cpu_has_bug(X86_BUG_ITS))
1598 return;
1599
1600 switch (spectre_v2_enabled) {
1601 case SPECTRE_V2_NONE:
1602 if (its_mitigation != ITS_MITIGATION_OFF)
1603 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
1604 its_mitigation = ITS_MITIGATION_OFF;
1605 break;
1606 case SPECTRE_V2_RETPOLINE:
1607 case SPECTRE_V2_EIBRS_RETPOLINE:
1608 /* Retpoline+CDT mitigates ITS */
1609 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
1610 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1611 break;
1612 case SPECTRE_V2_LFENCE:
1613 case SPECTRE_V2_EIBRS_LFENCE:
1614 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
1615 its_mitigation = ITS_MITIGATION_OFF;
1616 break;
1617 default:
1618 break;
1619 }
1620
1621 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1622 !cdt_possible(spectre_v2_enabled))
1623 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1624
1625 pr_info("%s\n", its_strings[its_mitigation]);
1626 }
1627
its_apply_mitigation(void)1628 static void __init its_apply_mitigation(void)
1629 {
1630 switch (its_mitigation) {
1631 case ITS_MITIGATION_OFF:
1632 case ITS_MITIGATION_AUTO:
1633 case ITS_MITIGATION_VMEXIT_ONLY:
1634 break;
1635 case ITS_MITIGATION_ALIGNED_THUNKS:
1636 if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
1637 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
1638
1639 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1640 set_return_thunk(its_return_thunk);
1641 break;
1642 case ITS_MITIGATION_RETPOLINE_STUFF:
1643 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1644 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1645 set_return_thunk(call_depth_return_thunk);
1646 break;
1647 }
1648 }
1649
1650 #undef pr_fmt
1651 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
1652
1653 enum tsa_mitigations {
1654 TSA_MITIGATION_NONE,
1655 TSA_MITIGATION_AUTO,
1656 TSA_MITIGATION_UCODE_NEEDED,
1657 TSA_MITIGATION_USER_KERNEL,
1658 TSA_MITIGATION_VM,
1659 TSA_MITIGATION_FULL,
1660 };
1661
1662 static const char * const tsa_strings[] = {
1663 [TSA_MITIGATION_NONE] = "Vulnerable",
1664 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1665 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
1666 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
1667 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
1668 };
1669
1670 static enum tsa_mitigations tsa_mitigation __ro_after_init =
1671 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
1672
tsa_parse_cmdline(char * str)1673 static int __init tsa_parse_cmdline(char *str)
1674 {
1675 if (!str)
1676 return -EINVAL;
1677
1678 if (!strcmp(str, "off"))
1679 tsa_mitigation = TSA_MITIGATION_NONE;
1680 else if (!strcmp(str, "on"))
1681 tsa_mitigation = TSA_MITIGATION_FULL;
1682 else if (!strcmp(str, "user"))
1683 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1684 else if (!strcmp(str, "vm"))
1685 tsa_mitigation = TSA_MITIGATION_VM;
1686 else
1687 pr_err("Ignoring unknown tsa=%s option.\n", str);
1688
1689 return 0;
1690 }
1691 early_param("tsa", tsa_parse_cmdline);
1692
tsa_select_mitigation(void)1693 static void __init tsa_select_mitigation(void)
1694 {
1695 if (!boot_cpu_has_bug(X86_BUG_TSA)) {
1696 tsa_mitigation = TSA_MITIGATION_NONE;
1697 return;
1698 }
1699
1700 if (tsa_mitigation == TSA_MITIGATION_AUTO) {
1701 bool vm = false, uk = false;
1702
1703 tsa_mitigation = TSA_MITIGATION_NONE;
1704
1705 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
1706 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) {
1707 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1708 uk = true;
1709 }
1710
1711 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
1712 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
1713 tsa_mitigation = TSA_MITIGATION_VM;
1714 vm = true;
1715 }
1716
1717 if (uk && vm)
1718 tsa_mitigation = TSA_MITIGATION_FULL;
1719 }
1720
1721 if (tsa_mitigation == TSA_MITIGATION_NONE)
1722 return;
1723
1724 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
1725 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
1726
1727 /*
1728 * No need to set verw_clear_cpu_buf_mitigation_selected - it
1729 * doesn't fit all cases here and it is not needed because this
1730 * is the only VERW-based mitigation on AMD.
1731 */
1732 pr_info("%s\n", tsa_strings[tsa_mitigation]);
1733 }
1734
tsa_apply_mitigation(void)1735 static void __init tsa_apply_mitigation(void)
1736 {
1737 switch (tsa_mitigation) {
1738 case TSA_MITIGATION_USER_KERNEL:
1739 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1740 break;
1741 case TSA_MITIGATION_VM:
1742 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1743 break;
1744 case TSA_MITIGATION_FULL:
1745 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1746 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1747 break;
1748 default:
1749 break;
1750 }
1751 }
1752
1753 #undef pr_fmt
1754 #define pr_fmt(fmt) "Spectre V2 : " fmt
1755
1756 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1757 SPECTRE_V2_USER_NONE;
1758 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1759 SPECTRE_V2_USER_NONE;
1760
1761 #ifdef CONFIG_MITIGATION_RETPOLINE
1762 static bool spectre_v2_bad_module;
1763
retpoline_module_ok(bool has_retpoline)1764 bool retpoline_module_ok(bool has_retpoline)
1765 {
1766 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1767 return true;
1768
1769 pr_err("System may be vulnerable to spectre v2\n");
1770 spectre_v2_bad_module = true;
1771 return false;
1772 }
1773
spectre_v2_module_string(void)1774 static inline const char *spectre_v2_module_string(void)
1775 {
1776 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1777 }
1778 #else
spectre_v2_module_string(void)1779 static inline const char *spectre_v2_module_string(void) { return ""; }
1780 #endif
1781
1782 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1783 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1784 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1785 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1786
1787 #ifdef CONFIG_BPF_SYSCALL
unpriv_ebpf_notify(int new_state)1788 void unpriv_ebpf_notify(int new_state)
1789 {
1790 if (new_state)
1791 return;
1792
1793 /* Unprivileged eBPF is enabled */
1794
1795 switch (spectre_v2_enabled) {
1796 case SPECTRE_V2_EIBRS:
1797 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1798 break;
1799 case SPECTRE_V2_EIBRS_LFENCE:
1800 if (sched_smt_active())
1801 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1802 break;
1803 default:
1804 break;
1805 }
1806 }
1807 #endif
1808
match_option(const char * arg,int arglen,const char * opt)1809 static inline bool match_option(const char *arg, int arglen, const char *opt)
1810 {
1811 int len = strlen(opt);
1812
1813 return len == arglen && !strncmp(arg, opt, len);
1814 }
1815
1816 /* The kernel command line selection for spectre v2 */
1817 enum spectre_v2_mitigation_cmd {
1818 SPECTRE_V2_CMD_NONE,
1819 SPECTRE_V2_CMD_AUTO,
1820 SPECTRE_V2_CMD_FORCE,
1821 SPECTRE_V2_CMD_RETPOLINE,
1822 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1823 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1824 SPECTRE_V2_CMD_EIBRS,
1825 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1826 SPECTRE_V2_CMD_EIBRS_LFENCE,
1827 SPECTRE_V2_CMD_IBRS,
1828 };
1829
1830 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO;
1831
1832 enum spectre_v2_user_cmd {
1833 SPECTRE_V2_USER_CMD_NONE,
1834 SPECTRE_V2_USER_CMD_AUTO,
1835 SPECTRE_V2_USER_CMD_FORCE,
1836 SPECTRE_V2_USER_CMD_PRCTL,
1837 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1838 SPECTRE_V2_USER_CMD_SECCOMP,
1839 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1840 };
1841
1842 static const char * const spectre_v2_user_strings[] = {
1843 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
1844 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
1845 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
1846 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
1847 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
1848 };
1849
1850 static const struct {
1851 const char *option;
1852 enum spectre_v2_user_cmd cmd;
1853 bool secure;
1854 } v2_user_options[] __initconst = {
1855 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
1856 { "off", SPECTRE_V2_USER_CMD_NONE, false },
1857 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
1858 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
1859 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
1860 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
1861 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
1862 };
1863
spec_v2_user_print_cond(const char * reason,bool secure)1864 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1865 {
1866 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1867 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1868 }
1869
spectre_v2_parse_user_cmdline(void)1870 static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void)
1871 {
1872 char arg[20];
1873 int ret, i;
1874
1875 if (!IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2))
1876 return SPECTRE_V2_USER_CMD_NONE;
1877
1878 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1879 arg, sizeof(arg));
1880 if (ret < 0)
1881 return SPECTRE_V2_USER_CMD_AUTO;
1882
1883 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1884 if (match_option(arg, ret, v2_user_options[i].option)) {
1885 spec_v2_user_print_cond(v2_user_options[i].option,
1886 v2_user_options[i].secure);
1887 return v2_user_options[i].cmd;
1888 }
1889 }
1890
1891 pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
1892 return SPECTRE_V2_USER_CMD_AUTO;
1893 }
1894
spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)1895 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1896 {
1897 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1898 }
1899
spectre_v2_user_select_mitigation(void)1900 static void __init spectre_v2_user_select_mitigation(void)
1901 {
1902 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1903 return;
1904
1905 switch (spectre_v2_parse_user_cmdline()) {
1906 case SPECTRE_V2_USER_CMD_NONE:
1907 return;
1908 case SPECTRE_V2_USER_CMD_FORCE:
1909 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1910 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1911 break;
1912 case SPECTRE_V2_USER_CMD_AUTO:
1913 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER))
1914 break;
1915 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1916 if (smt_mitigations == SMT_MITIGATIONS_OFF)
1917 break;
1918 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1919 break;
1920 case SPECTRE_V2_USER_CMD_PRCTL:
1921 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1922 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1923 break;
1924 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1925 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1926 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1927 break;
1928 case SPECTRE_V2_USER_CMD_SECCOMP:
1929 if (IS_ENABLED(CONFIG_SECCOMP))
1930 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
1931 else
1932 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1933 spectre_v2_user_stibp = spectre_v2_user_ibpb;
1934 break;
1935 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1936 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1937 if (IS_ENABLED(CONFIG_SECCOMP))
1938 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
1939 else
1940 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1941 break;
1942 }
1943
1944 /*
1945 * At this point, an STIBP mode other than "off" has been set.
1946 * If STIBP support is not being forced, check if STIBP always-on
1947 * is preferred.
1948 */
1949 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1950 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
1951 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1952 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1953
1954 if (!boot_cpu_has(X86_FEATURE_IBPB))
1955 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1956
1957 if (!boot_cpu_has(X86_FEATURE_STIBP))
1958 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1959 }
1960
spectre_v2_user_update_mitigation(void)1961 static void __init spectre_v2_user_update_mitigation(void)
1962 {
1963 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1964 return;
1965
1966 /* The spectre_v2 cmd line can override spectre_v2_user options */
1967 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
1968 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1969 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1970 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
1971 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1972 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1973 }
1974
1975 /*
1976 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1977 * is not required.
1978 *
1979 * Intel's Enhanced IBRS also protects against cross-thread branch target
1980 * injection in user-mode as the IBRS bit remains always set which
1981 * implicitly enables cross-thread protections. However, in legacy IBRS
1982 * mode, the IBRS bit is set only on kernel entry and cleared on return
1983 * to userspace. AMD Automatic IBRS also does not protect userspace.
1984 * These modes therefore disable the implicit cross-thread protection,
1985 * so allow for STIBP to be selected in those cases.
1986 */
1987 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1988 !cpu_smt_possible() ||
1989 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1990 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
1991 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1992 return;
1993 }
1994
1995 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
1996 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1997 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
1998 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
1999 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
2000 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
2001 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
2002 }
2003 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
2004 }
2005
spectre_v2_user_apply_mitigation(void)2006 static void __init spectre_v2_user_apply_mitigation(void)
2007 {
2008 /* Initialize Indirect Branch Prediction Barrier */
2009 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
2010 static_branch_enable(&switch_vcpu_ibpb);
2011
2012 switch (spectre_v2_user_ibpb) {
2013 case SPECTRE_V2_USER_STRICT:
2014 static_branch_enable(&switch_mm_always_ibpb);
2015 break;
2016 case SPECTRE_V2_USER_PRCTL:
2017 case SPECTRE_V2_USER_SECCOMP:
2018 static_branch_enable(&switch_mm_cond_ibpb);
2019 break;
2020 default:
2021 break;
2022 }
2023
2024 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
2025 static_key_enabled(&switch_mm_always_ibpb) ?
2026 "always-on" : "conditional");
2027 }
2028 }
2029
2030 static const char * const spectre_v2_strings[] = {
2031 [SPECTRE_V2_NONE] = "Vulnerable",
2032 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
2033 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
2034 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
2035 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
2036 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
2037 [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
2038 };
2039
2040 static const struct {
2041 const char *option;
2042 enum spectre_v2_mitigation_cmd cmd;
2043 bool secure;
2044 } mitigation_options[] __initconst = {
2045 { "off", SPECTRE_V2_CMD_NONE, false },
2046 { "on", SPECTRE_V2_CMD_FORCE, true },
2047 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
2048 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
2049 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
2050 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
2051 { "eibrs", SPECTRE_V2_CMD_EIBRS, false },
2052 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
2053 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
2054 { "auto", SPECTRE_V2_CMD_AUTO, false },
2055 { "ibrs", SPECTRE_V2_CMD_IBRS, false },
2056 };
2057
spec_v2_print_cond(const char * reason,bool secure)2058 static void __init spec_v2_print_cond(const char *reason, bool secure)
2059 {
2060 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
2061 pr_info("%s selected on command line.\n", reason);
2062 }
2063
spectre_v2_parse_cmdline(void)2064 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
2065 {
2066 enum spectre_v2_mitigation_cmd cmd;
2067 char arg[20];
2068 int ret, i;
2069
2070 cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
2071 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
2072 return SPECTRE_V2_CMD_NONE;
2073
2074 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
2075 if (ret < 0)
2076 return cmd;
2077
2078 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
2079 if (!match_option(arg, ret, mitigation_options[i].option))
2080 continue;
2081 cmd = mitigation_options[i].cmd;
2082 break;
2083 }
2084
2085 if (i >= ARRAY_SIZE(mitigation_options)) {
2086 pr_err("unknown option (%s). Switching to default mode\n", arg);
2087 return cmd;
2088 }
2089
2090 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
2091 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2092 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
2093 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2094 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2095 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2096 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
2097 mitigation_options[i].option);
2098 return SPECTRE_V2_CMD_AUTO;
2099 }
2100
2101 if ((cmd == SPECTRE_V2_CMD_EIBRS ||
2102 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2103 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2104 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2105 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
2106 mitigation_options[i].option);
2107 return SPECTRE_V2_CMD_AUTO;
2108 }
2109
2110 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2111 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
2112 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
2113 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
2114 mitigation_options[i].option);
2115 return SPECTRE_V2_CMD_AUTO;
2116 }
2117
2118 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
2119 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
2120 mitigation_options[i].option);
2121 return SPECTRE_V2_CMD_AUTO;
2122 }
2123
2124 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
2125 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
2126 mitigation_options[i].option);
2127 return SPECTRE_V2_CMD_AUTO;
2128 }
2129
2130 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
2131 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
2132 mitigation_options[i].option);
2133 return SPECTRE_V2_CMD_AUTO;
2134 }
2135
2136 if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
2137 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
2138 mitigation_options[i].option);
2139 return SPECTRE_V2_CMD_AUTO;
2140 }
2141
2142 spec_v2_print_cond(mitigation_options[i].option,
2143 mitigation_options[i].secure);
2144 return cmd;
2145 }
2146
spectre_v2_select_retpoline(void)2147 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
2148 {
2149 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2150 pr_err("Kernel not compiled with retpoline; no mitigation available!");
2151 return SPECTRE_V2_NONE;
2152 }
2153
2154 return SPECTRE_V2_RETPOLINE;
2155 }
2156
2157 static bool __ro_after_init rrsba_disabled;
2158
2159 /* Disable in-kernel use of non-RSB RET predictors */
spec_ctrl_disable_kernel_rrsba(void)2160 static void __init spec_ctrl_disable_kernel_rrsba(void)
2161 {
2162 if (rrsba_disabled)
2163 return;
2164
2165 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
2166 rrsba_disabled = true;
2167 return;
2168 }
2169
2170 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
2171 return;
2172
2173 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
2174 update_spec_ctrl(x86_spec_ctrl_base);
2175 rrsba_disabled = true;
2176 }
2177
spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)2178 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
2179 {
2180 /*
2181 * WARNING! There are many subtleties to consider when changing *any*
2182 * code related to RSB-related mitigations. Before doing so, carefully
2183 * read the following document, and update if necessary:
2184 *
2185 * Documentation/admin-guide/hw-vuln/rsb.rst
2186 *
2187 * In an overly simplified nutshell:
2188 *
2189 * - User->user RSB attacks are conditionally mitigated during
2190 * context switches by cond_mitigation -> write_ibpb().
2191 *
2192 * - User->kernel and guest->host attacks are mitigated by eIBRS or
2193 * RSB filling.
2194 *
2195 * Though, depending on config, note that other alternative
2196 * mitigations may end up getting used instead, e.g., IBPB on
2197 * entry/vmexit, call depth tracking, or return thunks.
2198 */
2199
2200 switch (mode) {
2201 case SPECTRE_V2_NONE:
2202 break;
2203
2204 case SPECTRE_V2_EIBRS:
2205 case SPECTRE_V2_EIBRS_LFENCE:
2206 case SPECTRE_V2_EIBRS_RETPOLINE:
2207 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2208 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
2209 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
2210 }
2211 break;
2212
2213 case SPECTRE_V2_RETPOLINE:
2214 case SPECTRE_V2_LFENCE:
2215 case SPECTRE_V2_IBRS:
2216 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
2217 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
2218 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
2219 break;
2220
2221 default:
2222 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
2223 dump_stack();
2224 break;
2225 }
2226 }
2227
2228 /*
2229 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
2230 * branch history in userspace. Not needed if BHI_NO is set.
2231 */
spec_ctrl_bhi_dis(void)2232 static bool __init spec_ctrl_bhi_dis(void)
2233 {
2234 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
2235 return false;
2236
2237 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
2238 update_spec_ctrl(x86_spec_ctrl_base);
2239 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
2240
2241 return true;
2242 }
2243
2244 enum bhi_mitigations {
2245 BHI_MITIGATION_OFF,
2246 BHI_MITIGATION_AUTO,
2247 BHI_MITIGATION_ON,
2248 BHI_MITIGATION_VMEXIT_ONLY,
2249 };
2250
2251 static enum bhi_mitigations bhi_mitigation __ro_after_init =
2252 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
2253
spectre_bhi_parse_cmdline(char * str)2254 static int __init spectre_bhi_parse_cmdline(char *str)
2255 {
2256 if (!str)
2257 return -EINVAL;
2258
2259 if (!strcmp(str, "off"))
2260 bhi_mitigation = BHI_MITIGATION_OFF;
2261 else if (!strcmp(str, "on"))
2262 bhi_mitigation = BHI_MITIGATION_ON;
2263 else if (!strcmp(str, "vmexit"))
2264 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2265 else
2266 pr_err("Ignoring unknown spectre_bhi option (%s)", str);
2267
2268 return 0;
2269 }
2270 early_param("spectre_bhi", spectre_bhi_parse_cmdline);
2271
bhi_select_mitigation(void)2272 static void __init bhi_select_mitigation(void)
2273 {
2274 if (!boot_cpu_has(X86_BUG_BHI))
2275 bhi_mitigation = BHI_MITIGATION_OFF;
2276
2277 if (bhi_mitigation != BHI_MITIGATION_AUTO)
2278 return;
2279
2280 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) {
2281 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
2282 bhi_mitigation = BHI_MITIGATION_ON;
2283 else
2284 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2285 } else {
2286 bhi_mitigation = BHI_MITIGATION_OFF;
2287 }
2288 }
2289
bhi_update_mitigation(void)2290 static void __init bhi_update_mitigation(void)
2291 {
2292 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
2293 bhi_mitigation = BHI_MITIGATION_OFF;
2294
2295 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2296 spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)
2297 bhi_mitigation = BHI_MITIGATION_OFF;
2298 }
2299
bhi_apply_mitigation(void)2300 static void __init bhi_apply_mitigation(void)
2301 {
2302 if (bhi_mitigation == BHI_MITIGATION_OFF)
2303 return;
2304
2305 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
2306 if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2307 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
2308 spec_ctrl_disable_kernel_rrsba();
2309 if (rrsba_disabled)
2310 return;
2311 }
2312
2313 if (!IS_ENABLED(CONFIG_X86_64))
2314 return;
2315
2316 /* Mitigate in hardware if supported */
2317 if (spec_ctrl_bhi_dis())
2318 return;
2319
2320 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
2321 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
2322 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2323 return;
2324 }
2325
2326 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
2327 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
2328 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2329 }
2330
spectre_v2_select_mitigation(void)2331 static void __init spectre_v2_select_mitigation(void)
2332 {
2333 spectre_v2_cmd = spectre_v2_parse_cmdline();
2334
2335 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2336 (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO))
2337 return;
2338
2339 switch (spectre_v2_cmd) {
2340 case SPECTRE_V2_CMD_NONE:
2341 return;
2342
2343 case SPECTRE_V2_CMD_AUTO:
2344 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2))
2345 break;
2346 fallthrough;
2347 case SPECTRE_V2_CMD_FORCE:
2348 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2349 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2350 break;
2351 }
2352
2353 spectre_v2_enabled = spectre_v2_select_retpoline();
2354 break;
2355
2356 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
2357 pr_err(SPECTRE_V2_LFENCE_MSG);
2358 spectre_v2_enabled = SPECTRE_V2_LFENCE;
2359 break;
2360
2361 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
2362 spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
2363 break;
2364
2365 case SPECTRE_V2_CMD_RETPOLINE:
2366 spectre_v2_enabled = spectre_v2_select_retpoline();
2367 break;
2368
2369 case SPECTRE_V2_CMD_IBRS:
2370 spectre_v2_enabled = SPECTRE_V2_IBRS;
2371 break;
2372
2373 case SPECTRE_V2_CMD_EIBRS:
2374 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2375 break;
2376
2377 case SPECTRE_V2_CMD_EIBRS_LFENCE:
2378 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
2379 break;
2380
2381 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
2382 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
2383 break;
2384 }
2385 }
2386
spectre_v2_update_mitigation(void)2387 static void __init spectre_v2_update_mitigation(void)
2388 {
2389 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
2390 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
2391 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
2392 boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2393 retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
2394 retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
2395 boot_cpu_has(X86_FEATURE_IBRS) &&
2396 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
2397 spectre_v2_enabled = SPECTRE_V2_IBRS;
2398 }
2399 }
2400
2401 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2402 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
2403 }
2404
spectre_v2_apply_mitigation(void)2405 static void __init spectre_v2_apply_mitigation(void)
2406 {
2407 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2408 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
2409
2410 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2411 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
2412 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
2413 } else {
2414 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
2415 update_spec_ctrl(x86_spec_ctrl_base);
2416 }
2417 }
2418
2419 switch (spectre_v2_enabled) {
2420 case SPECTRE_V2_NONE:
2421 return;
2422
2423 case SPECTRE_V2_EIBRS:
2424 break;
2425
2426 case SPECTRE_V2_IBRS:
2427 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
2428 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
2429 pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
2430 break;
2431
2432 case SPECTRE_V2_LFENCE:
2433 case SPECTRE_V2_EIBRS_LFENCE:
2434 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
2435 fallthrough;
2436
2437 case SPECTRE_V2_RETPOLINE:
2438 case SPECTRE_V2_EIBRS_RETPOLINE:
2439 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
2440 break;
2441 }
2442
2443 /*
2444 * Disable alternate RSB predictions in kernel when indirect CALLs and
2445 * JMPs gets protection against BHI and Intramode-BTI, but RET
2446 * prediction from a non-RSB predictor is still a risk.
2447 */
2448 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
2449 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
2450 spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
2451 spec_ctrl_disable_kernel_rrsba();
2452
2453 spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
2454
2455 /*
2456 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
2457 * and Enhanced IBRS protect firmware too, so enable IBRS around
2458 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
2459 * otherwise enabled.
2460 *
2461 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
2462 * boot_cpu_has(), because the user might select retpoline on the kernel
2463 * command line and if the CPU supports Enhanced IBRS, kernel might
2464 * un-intentionally not enable IBRS around firmware calls.
2465 */
2466 if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2467 boot_cpu_has(X86_FEATURE_IBPB) &&
2468 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2469 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
2470
2471 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
2472 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
2473 pr_info("Enabling Speculation Barrier for firmware calls\n");
2474 }
2475
2476 } else if (boot_cpu_has(X86_FEATURE_IBRS) &&
2477 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2478 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2479 pr_info("Enabling Restricted Speculation for firmware calls\n");
2480 }
2481 }
2482
update_stibp_msr(void * __unused)2483 static void update_stibp_msr(void * __unused)
2484 {
2485 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
2486 update_spec_ctrl(val);
2487 }
2488
2489 /* Update x86_spec_ctrl_base in case SMT state changed. */
update_stibp_strict(void)2490 static void update_stibp_strict(void)
2491 {
2492 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
2493
2494 if (sched_smt_active())
2495 mask |= SPEC_CTRL_STIBP;
2496
2497 if (mask == x86_spec_ctrl_base)
2498 return;
2499
2500 pr_info("Update user space SMT mitigation: STIBP %s\n",
2501 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
2502 x86_spec_ctrl_base = mask;
2503 on_each_cpu(update_stibp_msr, NULL, 1);
2504 }
2505
2506 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
update_indir_branch_cond(void)2507 static void update_indir_branch_cond(void)
2508 {
2509 if (sched_smt_active())
2510 static_branch_enable(&switch_to_cond_stibp);
2511 else
2512 static_branch_disable(&switch_to_cond_stibp);
2513 }
2514
2515 #undef pr_fmt
2516 #define pr_fmt(fmt) fmt
2517
2518 /* Update the static key controlling the MDS CPU buffer clear in idle */
update_mds_branch_idle(void)2519 static void update_mds_branch_idle(void)
2520 {
2521 /*
2522 * Enable the idle clearing if SMT is active on CPUs which are
2523 * affected only by MSBDS and not any other MDS variant.
2524 *
2525 * The other variants cannot be mitigated when SMT is enabled, so
2526 * clearing the buffers on idle just to prevent the Store Buffer
2527 * repartitioning leak would be a window dressing exercise.
2528 */
2529 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
2530 return;
2531
2532 if (sched_smt_active()) {
2533 static_branch_enable(&cpu_buf_idle_clear);
2534 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
2535 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2536 static_branch_disable(&cpu_buf_idle_clear);
2537 }
2538 }
2539
2540 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
2541 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
2542 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
2543
cpu_bugs_smt_update(void)2544 void cpu_bugs_smt_update(void)
2545 {
2546 mutex_lock(&spec_ctrl_mutex);
2547
2548 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2549 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2550 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
2551
2552 switch (spectre_v2_user_stibp) {
2553 case SPECTRE_V2_USER_NONE:
2554 break;
2555 case SPECTRE_V2_USER_STRICT:
2556 case SPECTRE_V2_USER_STRICT_PREFERRED:
2557 update_stibp_strict();
2558 break;
2559 case SPECTRE_V2_USER_PRCTL:
2560 case SPECTRE_V2_USER_SECCOMP:
2561 update_indir_branch_cond();
2562 break;
2563 }
2564
2565 switch (mds_mitigation) {
2566 case MDS_MITIGATION_FULL:
2567 case MDS_MITIGATION_AUTO:
2568 case MDS_MITIGATION_VMWERV:
2569 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
2570 pr_warn_once(MDS_MSG_SMT);
2571 update_mds_branch_idle();
2572 break;
2573 case MDS_MITIGATION_OFF:
2574 break;
2575 }
2576
2577 switch (taa_mitigation) {
2578 case TAA_MITIGATION_VERW:
2579 case TAA_MITIGATION_AUTO:
2580 case TAA_MITIGATION_UCODE_NEEDED:
2581 if (sched_smt_active())
2582 pr_warn_once(TAA_MSG_SMT);
2583 break;
2584 case TAA_MITIGATION_TSX_DISABLED:
2585 case TAA_MITIGATION_OFF:
2586 break;
2587 }
2588
2589 switch (mmio_mitigation) {
2590 case MMIO_MITIGATION_VERW:
2591 case MMIO_MITIGATION_AUTO:
2592 case MMIO_MITIGATION_UCODE_NEEDED:
2593 if (sched_smt_active())
2594 pr_warn_once(MMIO_MSG_SMT);
2595 break;
2596 case MMIO_MITIGATION_OFF:
2597 break;
2598 }
2599
2600 switch (tsa_mitigation) {
2601 case TSA_MITIGATION_USER_KERNEL:
2602 case TSA_MITIGATION_VM:
2603 case TSA_MITIGATION_AUTO:
2604 case TSA_MITIGATION_FULL:
2605 /*
2606 * TSA-SQ can potentially lead to info leakage between
2607 * SMT threads.
2608 */
2609 if (sched_smt_active())
2610 static_branch_enable(&cpu_buf_idle_clear);
2611 else
2612 static_branch_disable(&cpu_buf_idle_clear);
2613 break;
2614 case TSA_MITIGATION_NONE:
2615 case TSA_MITIGATION_UCODE_NEEDED:
2616 break;
2617 }
2618
2619 mutex_unlock(&spec_ctrl_mutex);
2620 }
2621
2622 #undef pr_fmt
2623 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
2624
2625 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
2626
2627 /* The kernel command line selection */
2628 enum ssb_mitigation_cmd {
2629 SPEC_STORE_BYPASS_CMD_NONE,
2630 SPEC_STORE_BYPASS_CMD_AUTO,
2631 SPEC_STORE_BYPASS_CMD_ON,
2632 SPEC_STORE_BYPASS_CMD_PRCTL,
2633 SPEC_STORE_BYPASS_CMD_SECCOMP,
2634 };
2635
2636 static const char * const ssb_strings[] = {
2637 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
2638 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
2639 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
2640 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2641 };
2642
2643 static const struct {
2644 const char *option;
2645 enum ssb_mitigation_cmd cmd;
2646 } ssb_mitigation_options[] __initconst = {
2647 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
2648 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
2649 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
2650 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
2651 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
2652 };
2653
ssb_parse_cmdline(void)2654 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
2655 {
2656 enum ssb_mitigation_cmd cmd;
2657 char arg[20];
2658 int ret, i;
2659
2660 cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ?
2661 SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE;
2662 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
2663 cpu_mitigations_off()) {
2664 return SPEC_STORE_BYPASS_CMD_NONE;
2665 } else {
2666 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
2667 arg, sizeof(arg));
2668 if (ret < 0)
2669 return cmd;
2670
2671 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
2672 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
2673 continue;
2674
2675 cmd = ssb_mitigation_options[i].cmd;
2676 break;
2677 }
2678
2679 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
2680 pr_err("unknown option (%s). Switching to default mode\n", arg);
2681 return cmd;
2682 }
2683 }
2684
2685 return cmd;
2686 }
2687
ssb_select_mitigation(void)2688 static void __init ssb_select_mitigation(void)
2689 {
2690 enum ssb_mitigation_cmd cmd;
2691
2692 if (!boot_cpu_has(X86_FEATURE_SSBD))
2693 goto out;
2694
2695 cmd = ssb_parse_cmdline();
2696 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
2697 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
2698 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
2699 return;
2700
2701 switch (cmd) {
2702 case SPEC_STORE_BYPASS_CMD_SECCOMP:
2703 /*
2704 * Choose prctl+seccomp as the default mode if seccomp is
2705 * enabled.
2706 */
2707 if (IS_ENABLED(CONFIG_SECCOMP))
2708 ssb_mode = SPEC_STORE_BYPASS_SECCOMP;
2709 else
2710 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2711 break;
2712 case SPEC_STORE_BYPASS_CMD_ON:
2713 ssb_mode = SPEC_STORE_BYPASS_DISABLE;
2714 break;
2715 case SPEC_STORE_BYPASS_CMD_AUTO:
2716 case SPEC_STORE_BYPASS_CMD_PRCTL:
2717 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2718 break;
2719 case SPEC_STORE_BYPASS_CMD_NONE:
2720 break;
2721 }
2722
2723 out:
2724 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2725 pr_info("%s\n", ssb_strings[ssb_mode]);
2726 }
2727
ssb_apply_mitigation(void)2728 static void __init ssb_apply_mitigation(void)
2729 {
2730 /*
2731 * We have three CPU feature flags that are in play here:
2732 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2733 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2734 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2735 */
2736 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
2737 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2738 /*
2739 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2740 * use a completely different MSR and bit dependent on family.
2741 */
2742 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2743 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2744 x86_amd_ssb_disable();
2745 } else {
2746 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2747 update_spec_ctrl(x86_spec_ctrl_base);
2748 }
2749 }
2750 }
2751
2752 #undef pr_fmt
2753 #define pr_fmt(fmt) "Speculation prctl: " fmt
2754
task_update_spec_tif(struct task_struct * tsk)2755 static void task_update_spec_tif(struct task_struct *tsk)
2756 {
2757 /* Force the update of the real TIF bits */
2758 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2759
2760 /*
2761 * Immediately update the speculation control MSRs for the current
2762 * task, but for a non-current task delay setting the CPU
2763 * mitigation until it is scheduled next.
2764 *
2765 * This can only happen for SECCOMP mitigation. For PRCTL it's
2766 * always the current task.
2767 */
2768 if (tsk == current)
2769 speculation_ctrl_update_current();
2770 }
2771
l1d_flush_prctl_set(struct task_struct * task,unsigned long ctrl)2772 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2773 {
2774
2775 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2776 return -EPERM;
2777
2778 switch (ctrl) {
2779 case PR_SPEC_ENABLE:
2780 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2781 return 0;
2782 case PR_SPEC_DISABLE:
2783 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2784 return 0;
2785 default:
2786 return -ERANGE;
2787 }
2788 }
2789
ssb_prctl_set(struct task_struct * task,unsigned long ctrl)2790 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2791 {
2792 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2793 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2794 return -ENXIO;
2795
2796 switch (ctrl) {
2797 case PR_SPEC_ENABLE:
2798 /* If speculation is force disabled, enable is not allowed */
2799 if (task_spec_ssb_force_disable(task))
2800 return -EPERM;
2801 task_clear_spec_ssb_disable(task);
2802 task_clear_spec_ssb_noexec(task);
2803 task_update_spec_tif(task);
2804 break;
2805 case PR_SPEC_DISABLE:
2806 task_set_spec_ssb_disable(task);
2807 task_clear_spec_ssb_noexec(task);
2808 task_update_spec_tif(task);
2809 break;
2810 case PR_SPEC_FORCE_DISABLE:
2811 task_set_spec_ssb_disable(task);
2812 task_set_spec_ssb_force_disable(task);
2813 task_clear_spec_ssb_noexec(task);
2814 task_update_spec_tif(task);
2815 break;
2816 case PR_SPEC_DISABLE_NOEXEC:
2817 if (task_spec_ssb_force_disable(task))
2818 return -EPERM;
2819 task_set_spec_ssb_disable(task);
2820 task_set_spec_ssb_noexec(task);
2821 task_update_spec_tif(task);
2822 break;
2823 default:
2824 return -ERANGE;
2825 }
2826 return 0;
2827 }
2828
is_spec_ib_user_controlled(void)2829 static bool is_spec_ib_user_controlled(void)
2830 {
2831 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2832 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2833 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2834 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2835 }
2836
ib_prctl_set(struct task_struct * task,unsigned long ctrl)2837 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2838 {
2839 switch (ctrl) {
2840 case PR_SPEC_ENABLE:
2841 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2842 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2843 return 0;
2844
2845 /*
2846 * With strict mode for both IBPB and STIBP, the instruction
2847 * code paths avoid checking this task flag and instead,
2848 * unconditionally run the instruction. However, STIBP and IBPB
2849 * are independent and either can be set to conditionally
2850 * enabled regardless of the mode of the other.
2851 *
2852 * If either is set to conditional, allow the task flag to be
2853 * updated, unless it was force-disabled by a previous prctl
2854 * call. Currently, this is possible on an AMD CPU which has the
2855 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2856 * kernel is booted with 'spectre_v2_user=seccomp', then
2857 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2858 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2859 */
2860 if (!is_spec_ib_user_controlled() ||
2861 task_spec_ib_force_disable(task))
2862 return -EPERM;
2863
2864 task_clear_spec_ib_disable(task);
2865 task_update_spec_tif(task);
2866 break;
2867 case PR_SPEC_DISABLE:
2868 case PR_SPEC_FORCE_DISABLE:
2869 /*
2870 * Indirect branch speculation is always allowed when
2871 * mitigation is force disabled.
2872 */
2873 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2874 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2875 return -EPERM;
2876
2877 if (!is_spec_ib_user_controlled())
2878 return 0;
2879
2880 task_set_spec_ib_disable(task);
2881 if (ctrl == PR_SPEC_FORCE_DISABLE)
2882 task_set_spec_ib_force_disable(task);
2883 task_update_spec_tif(task);
2884 if (task == current)
2885 indirect_branch_prediction_barrier();
2886 break;
2887 default:
2888 return -ERANGE;
2889 }
2890 return 0;
2891 }
2892
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)2893 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2894 unsigned long ctrl)
2895 {
2896 switch (which) {
2897 case PR_SPEC_STORE_BYPASS:
2898 return ssb_prctl_set(task, ctrl);
2899 case PR_SPEC_INDIRECT_BRANCH:
2900 return ib_prctl_set(task, ctrl);
2901 case PR_SPEC_L1D_FLUSH:
2902 return l1d_flush_prctl_set(task, ctrl);
2903 default:
2904 return -ENODEV;
2905 }
2906 }
2907
2908 #ifdef CONFIG_SECCOMP
arch_seccomp_spec_mitigate(struct task_struct * task)2909 void arch_seccomp_spec_mitigate(struct task_struct *task)
2910 {
2911 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2912 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2913 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2914 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2915 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2916 }
2917 #endif
2918
l1d_flush_prctl_get(struct task_struct * task)2919 static int l1d_flush_prctl_get(struct task_struct *task)
2920 {
2921 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2922 return PR_SPEC_FORCE_DISABLE;
2923
2924 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2925 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2926 else
2927 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2928 }
2929
ssb_prctl_get(struct task_struct * task)2930 static int ssb_prctl_get(struct task_struct *task)
2931 {
2932 switch (ssb_mode) {
2933 case SPEC_STORE_BYPASS_NONE:
2934 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2935 return PR_SPEC_ENABLE;
2936 return PR_SPEC_NOT_AFFECTED;
2937 case SPEC_STORE_BYPASS_DISABLE:
2938 return PR_SPEC_DISABLE;
2939 case SPEC_STORE_BYPASS_SECCOMP:
2940 case SPEC_STORE_BYPASS_PRCTL:
2941 if (task_spec_ssb_force_disable(task))
2942 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2943 if (task_spec_ssb_noexec(task))
2944 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2945 if (task_spec_ssb_disable(task))
2946 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2947 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2948 }
2949 BUG();
2950 }
2951
ib_prctl_get(struct task_struct * task)2952 static int ib_prctl_get(struct task_struct *task)
2953 {
2954 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2955 return PR_SPEC_NOT_AFFECTED;
2956
2957 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2958 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2959 return PR_SPEC_ENABLE;
2960 else if (is_spec_ib_user_controlled()) {
2961 if (task_spec_ib_force_disable(task))
2962 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2963 if (task_spec_ib_disable(task))
2964 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2965 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2966 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2967 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2968 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2969 return PR_SPEC_DISABLE;
2970 else
2971 return PR_SPEC_NOT_AFFECTED;
2972 }
2973
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)2974 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2975 {
2976 switch (which) {
2977 case PR_SPEC_STORE_BYPASS:
2978 return ssb_prctl_get(task);
2979 case PR_SPEC_INDIRECT_BRANCH:
2980 return ib_prctl_get(task);
2981 case PR_SPEC_L1D_FLUSH:
2982 return l1d_flush_prctl_get(task);
2983 default:
2984 return -ENODEV;
2985 }
2986 }
2987
x86_spec_ctrl_setup_ap(void)2988 void x86_spec_ctrl_setup_ap(void)
2989 {
2990 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2991 update_spec_ctrl(x86_spec_ctrl_base);
2992
2993 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2994 x86_amd_ssb_disable();
2995 }
2996
2997 bool itlb_multihit_kvm_mitigation;
2998 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2999
3000 #undef pr_fmt
3001 #define pr_fmt(fmt) "L1TF: " fmt
3002
3003 /* Default mitigation for L1TF-affected CPUs */
3004 enum l1tf_mitigations l1tf_mitigation __ro_after_init =
3005 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
3006 #if IS_ENABLED(CONFIG_KVM_INTEL)
3007 EXPORT_SYMBOL_GPL(l1tf_mitigation);
3008 #endif
3009 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
3010 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
3011
3012 /*
3013 * These CPUs all support 44bits physical address space internally in the
3014 * cache but CPUID can report a smaller number of physical address bits.
3015 *
3016 * The L1TF mitigation uses the top most address bit for the inversion of
3017 * non present PTEs. When the installed memory reaches into the top most
3018 * address bit due to memory holes, which has been observed on machines
3019 * which report 36bits physical address bits and have 32G RAM installed,
3020 * then the mitigation range check in l1tf_select_mitigation() triggers.
3021 * This is a false positive because the mitigation is still possible due to
3022 * the fact that the cache uses 44bit internally. Use the cache bits
3023 * instead of the reported physical bits and adjust them on the affected
3024 * machines to 44bit if the reported bits are less than 44.
3025 */
override_cache_bits(struct cpuinfo_x86 * c)3026 static void override_cache_bits(struct cpuinfo_x86 *c)
3027 {
3028 if (c->x86 != 6)
3029 return;
3030
3031 switch (c->x86_vfm) {
3032 case INTEL_NEHALEM:
3033 case INTEL_WESTMERE:
3034 case INTEL_SANDYBRIDGE:
3035 case INTEL_IVYBRIDGE:
3036 case INTEL_HASWELL:
3037 case INTEL_HASWELL_L:
3038 case INTEL_HASWELL_G:
3039 case INTEL_BROADWELL:
3040 case INTEL_BROADWELL_G:
3041 case INTEL_SKYLAKE_L:
3042 case INTEL_SKYLAKE:
3043 case INTEL_KABYLAKE_L:
3044 case INTEL_KABYLAKE:
3045 if (c->x86_cache_bits < 44)
3046 c->x86_cache_bits = 44;
3047 break;
3048 }
3049 }
3050
l1tf_select_mitigation(void)3051 static void __init l1tf_select_mitigation(void)
3052 {
3053 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
3054 l1tf_mitigation = L1TF_MITIGATION_OFF;
3055 return;
3056 }
3057
3058 if (l1tf_mitigation != L1TF_MITIGATION_AUTO)
3059 return;
3060
3061 if (!should_mitigate_vuln(X86_BUG_L1TF)) {
3062 l1tf_mitigation = L1TF_MITIGATION_OFF;
3063 return;
3064 }
3065
3066 if (smt_mitigations == SMT_MITIGATIONS_ON)
3067 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
3068 else
3069 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
3070 }
3071
l1tf_apply_mitigation(void)3072 static void __init l1tf_apply_mitigation(void)
3073 {
3074 u64 half_pa;
3075
3076 if (!boot_cpu_has_bug(X86_BUG_L1TF))
3077 return;
3078
3079 override_cache_bits(&boot_cpu_data);
3080
3081 switch (l1tf_mitigation) {
3082 case L1TF_MITIGATION_OFF:
3083 case L1TF_MITIGATION_FLUSH_NOWARN:
3084 case L1TF_MITIGATION_FLUSH:
3085 case L1TF_MITIGATION_AUTO:
3086 break;
3087 case L1TF_MITIGATION_FLUSH_NOSMT:
3088 case L1TF_MITIGATION_FULL:
3089 cpu_smt_disable(false);
3090 break;
3091 case L1TF_MITIGATION_FULL_FORCE:
3092 cpu_smt_disable(true);
3093 break;
3094 }
3095
3096 #if CONFIG_PGTABLE_LEVELS == 2
3097 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
3098 return;
3099 #endif
3100
3101 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
3102 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
3103 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
3104 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
3105 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
3106 half_pa);
3107 pr_info("However, doing so will make a part of your RAM unusable.\n");
3108 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
3109 return;
3110 }
3111
3112 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
3113 }
3114
l1tf_cmdline(char * str)3115 static int __init l1tf_cmdline(char *str)
3116 {
3117 if (!boot_cpu_has_bug(X86_BUG_L1TF))
3118 return 0;
3119
3120 if (!str)
3121 return -EINVAL;
3122
3123 if (!strcmp(str, "off"))
3124 l1tf_mitigation = L1TF_MITIGATION_OFF;
3125 else if (!strcmp(str, "flush,nowarn"))
3126 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
3127 else if (!strcmp(str, "flush"))
3128 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
3129 else if (!strcmp(str, "flush,nosmt"))
3130 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
3131 else if (!strcmp(str, "full"))
3132 l1tf_mitigation = L1TF_MITIGATION_FULL;
3133 else if (!strcmp(str, "full,force"))
3134 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
3135
3136 return 0;
3137 }
3138 early_param("l1tf", l1tf_cmdline);
3139
3140 #undef pr_fmt
3141 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
3142
3143 static const char * const srso_strings[] = {
3144 [SRSO_MITIGATION_NONE] = "Vulnerable",
3145 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
3146 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
3147 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
3148 [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled",
3149 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
3150 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
3151 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
3152 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
3153 };
3154
srso_parse_cmdline(char * str)3155 static int __init srso_parse_cmdline(char *str)
3156 {
3157 if (!str)
3158 return -EINVAL;
3159
3160 if (!strcmp(str, "off"))
3161 srso_mitigation = SRSO_MITIGATION_NONE;
3162 else if (!strcmp(str, "microcode"))
3163 srso_mitigation = SRSO_MITIGATION_MICROCODE;
3164 else if (!strcmp(str, "safe-ret"))
3165 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3166 else if (!strcmp(str, "ibpb"))
3167 srso_mitigation = SRSO_MITIGATION_IBPB;
3168 else if (!strcmp(str, "ibpb-vmexit"))
3169 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3170 else
3171 pr_err("Ignoring unknown SRSO option (%s).", str);
3172
3173 return 0;
3174 }
3175 early_param("spec_rstack_overflow", srso_parse_cmdline);
3176
3177 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
3178
srso_select_mitigation(void)3179 static void __init srso_select_mitigation(void)
3180 {
3181 if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
3182 srso_mitigation = SRSO_MITIGATION_NONE;
3183 return;
3184 }
3185
3186 if (srso_mitigation == SRSO_MITIGATION_AUTO) {
3187 if (should_mitigate_vuln(X86_BUG_SRSO)) {
3188 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3189 } else {
3190 srso_mitigation = SRSO_MITIGATION_NONE;
3191 return;
3192 }
3193 }
3194
3195 /* Zen1/2 with SMT off aren't vulnerable to SRSO. */
3196 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
3197 srso_mitigation = SRSO_MITIGATION_NOSMT;
3198 return;
3199 }
3200
3201 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) {
3202 pr_warn("IBPB-extending microcode not applied!\n");
3203 pr_warn(SRSO_NOTICE);
3204
3205 /*
3206 * Safe-RET provides partial mitigation without microcode, but
3207 * other mitigations require microcode to provide any
3208 * mitigations.
3209 */
3210 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET)
3211 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
3212 else
3213 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
3214 }
3215
3216 switch (srso_mitigation) {
3217 case SRSO_MITIGATION_SAFE_RET:
3218 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3219 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
3220 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3221 goto ibpb_on_vmexit;
3222 }
3223
3224 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
3225 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
3226 srso_mitigation = SRSO_MITIGATION_NONE;
3227 }
3228 break;
3229 ibpb_on_vmexit:
3230 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3231 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
3232 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
3233 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
3234 break;
3235 }
3236 fallthrough;
3237 case SRSO_MITIGATION_IBPB:
3238 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
3239 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
3240 srso_mitigation = SRSO_MITIGATION_NONE;
3241 }
3242 break;
3243 default:
3244 break;
3245 }
3246 }
3247
srso_update_mitigation(void)3248 static void __init srso_update_mitigation(void)
3249 {
3250 /* If retbleed is using IBPB, that works for SRSO as well */
3251 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
3252 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
3253 srso_mitigation = SRSO_MITIGATION_IBPB;
3254
3255 if (boot_cpu_has_bug(X86_BUG_SRSO) &&
3256 !cpu_mitigations_off())
3257 pr_info("%s\n", srso_strings[srso_mitigation]);
3258 }
3259
srso_apply_mitigation(void)3260 static void __init srso_apply_mitigation(void)
3261 {
3262 /*
3263 * Clear the feature flag if this mitigation is not selected as that
3264 * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
3265 */
3266 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
3267 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
3268
3269 if (srso_mitigation == SRSO_MITIGATION_NONE) {
3270 if (boot_cpu_has(X86_FEATURE_SBPB))
3271 x86_pred_cmd = PRED_CMD_SBPB;
3272 return;
3273 }
3274
3275 switch (srso_mitigation) {
3276 case SRSO_MITIGATION_SAFE_RET:
3277 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3278 /*
3279 * Enable the return thunk for generated code
3280 * like ftrace, static_call, etc.
3281 */
3282 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
3283 setup_force_cpu_cap(X86_FEATURE_UNRET);
3284
3285 if (boot_cpu_data.x86 == 0x19) {
3286 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
3287 set_return_thunk(srso_alias_return_thunk);
3288 } else {
3289 setup_force_cpu_cap(X86_FEATURE_SRSO);
3290 set_return_thunk(srso_return_thunk);
3291 }
3292 break;
3293 case SRSO_MITIGATION_IBPB:
3294 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
3295 /*
3296 * IBPB on entry already obviates the need for
3297 * software-based untraining so clear those in case some
3298 * other mitigation like Retbleed has selected them.
3299 */
3300 setup_clear_cpu_cap(X86_FEATURE_UNRET);
3301 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
3302 fallthrough;
3303 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3304 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
3305 /*
3306 * There is no need for RSB filling: entry_ibpb() ensures
3307 * all predictions, including the RSB, are invalidated,
3308 * regardless of IBPB implementation.
3309 */
3310 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
3311 break;
3312 default:
3313 break;
3314 }
3315 }
3316
3317 #undef pr_fmt
3318 #define pr_fmt(fmt) fmt
3319
3320 #ifdef CONFIG_SYSFS
3321
3322 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
3323
3324 #if IS_ENABLED(CONFIG_KVM_INTEL)
3325 static const char * const l1tf_vmx_states[] = {
3326 [VMENTER_L1D_FLUSH_AUTO] = "auto",
3327 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
3328 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
3329 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
3330 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
3331 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
3332 };
3333
l1tf_show_state(char * buf)3334 static ssize_t l1tf_show_state(char *buf)
3335 {
3336 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
3337 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3338
3339 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
3340 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
3341 sched_smt_active())) {
3342 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
3343 l1tf_vmx_states[l1tf_vmx_mitigation]);
3344 }
3345
3346 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
3347 l1tf_vmx_states[l1tf_vmx_mitigation],
3348 sched_smt_active() ? "vulnerable" : "disabled");
3349 }
3350
itlb_multihit_show_state(char * buf)3351 static ssize_t itlb_multihit_show_state(char *buf)
3352 {
3353 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
3354 !boot_cpu_has(X86_FEATURE_VMX))
3355 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
3356 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
3357 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
3358 else if (itlb_multihit_kvm_mitigation)
3359 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
3360 else
3361 return sysfs_emit(buf, "KVM: Vulnerable\n");
3362 }
3363 #else
l1tf_show_state(char * buf)3364 static ssize_t l1tf_show_state(char *buf)
3365 {
3366 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3367 }
3368
itlb_multihit_show_state(char * buf)3369 static ssize_t itlb_multihit_show_state(char *buf)
3370 {
3371 return sysfs_emit(buf, "Processor vulnerable\n");
3372 }
3373 #endif
3374
mds_show_state(char * buf)3375 static ssize_t mds_show_state(char *buf)
3376 {
3377 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3378 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3379 mds_strings[mds_mitigation]);
3380 }
3381
3382 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
3383 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3384 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
3385 sched_smt_active() ? "mitigated" : "disabled"));
3386 }
3387
3388 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3389 sched_smt_active() ? "vulnerable" : "disabled");
3390 }
3391
tsx_async_abort_show_state(char * buf)3392 static ssize_t tsx_async_abort_show_state(char *buf)
3393 {
3394 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
3395 (taa_mitigation == TAA_MITIGATION_OFF))
3396 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
3397
3398 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3399 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3400 taa_strings[taa_mitigation]);
3401 }
3402
3403 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
3404 sched_smt_active() ? "vulnerable" : "disabled");
3405 }
3406
mmio_stale_data_show_state(char * buf)3407 static ssize_t mmio_stale_data_show_state(char *buf)
3408 {
3409 if (mmio_mitigation == MMIO_MITIGATION_OFF)
3410 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
3411
3412 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3413 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3414 mmio_strings[mmio_mitigation]);
3415 }
3416
3417 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
3418 sched_smt_active() ? "vulnerable" : "disabled");
3419 }
3420
rfds_show_state(char * buf)3421 static ssize_t rfds_show_state(char *buf)
3422 {
3423 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
3424 }
3425
old_microcode_show_state(char * buf)3426 static ssize_t old_microcode_show_state(char *buf)
3427 {
3428 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
3429 return sysfs_emit(buf, "Unknown: running under hypervisor");
3430
3431 return sysfs_emit(buf, "Vulnerable\n");
3432 }
3433
its_show_state(char * buf)3434 static ssize_t its_show_state(char *buf)
3435 {
3436 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
3437 }
3438
stibp_state(void)3439 static char *stibp_state(void)
3440 {
3441 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3442 !boot_cpu_has(X86_FEATURE_AUTOIBRS))
3443 return "";
3444
3445 switch (spectre_v2_user_stibp) {
3446 case SPECTRE_V2_USER_NONE:
3447 return "; STIBP: disabled";
3448 case SPECTRE_V2_USER_STRICT:
3449 return "; STIBP: forced";
3450 case SPECTRE_V2_USER_STRICT_PREFERRED:
3451 return "; STIBP: always-on";
3452 case SPECTRE_V2_USER_PRCTL:
3453 case SPECTRE_V2_USER_SECCOMP:
3454 if (static_key_enabled(&switch_to_cond_stibp))
3455 return "; STIBP: conditional";
3456 }
3457 return "";
3458 }
3459
ibpb_state(void)3460 static char *ibpb_state(void)
3461 {
3462 if (boot_cpu_has(X86_FEATURE_IBPB)) {
3463 if (static_key_enabled(&switch_mm_always_ibpb))
3464 return "; IBPB: always-on";
3465 if (static_key_enabled(&switch_mm_cond_ibpb))
3466 return "; IBPB: conditional";
3467 return "; IBPB: disabled";
3468 }
3469 return "";
3470 }
3471
pbrsb_eibrs_state(void)3472 static char *pbrsb_eibrs_state(void)
3473 {
3474 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
3475 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
3476 boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
3477 return "; PBRSB-eIBRS: SW sequence";
3478 else
3479 return "; PBRSB-eIBRS: Vulnerable";
3480 } else {
3481 return "; PBRSB-eIBRS: Not affected";
3482 }
3483 }
3484
spectre_bhi_state(void)3485 static const char *spectre_bhi_state(void)
3486 {
3487 if (!boot_cpu_has_bug(X86_BUG_BHI))
3488 return "; BHI: Not affected";
3489 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
3490 return "; BHI: BHI_DIS_S";
3491 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
3492 return "; BHI: SW loop, KVM: SW loop";
3493 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
3494 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
3495 rrsba_disabled)
3496 return "; BHI: Retpoline";
3497 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
3498 return "; BHI: Vulnerable, KVM: SW loop";
3499
3500 return "; BHI: Vulnerable";
3501 }
3502
spectre_v2_show_state(char * buf)3503 static ssize_t spectre_v2_show_state(char *buf)
3504 {
3505 if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
3506 return sysfs_emit(buf, "Vulnerable: LFENCE\n");
3507
3508 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
3509 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
3510
3511 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3512 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3513 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
3514
3515 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
3516 spectre_v2_strings[spectre_v2_enabled],
3517 ibpb_state(),
3518 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
3519 stibp_state(),
3520 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
3521 pbrsb_eibrs_state(),
3522 spectre_bhi_state(),
3523 /* this should always be at the end */
3524 spectre_v2_module_string());
3525 }
3526
srbds_show_state(char * buf)3527 static ssize_t srbds_show_state(char *buf)
3528 {
3529 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
3530 }
3531
retbleed_show_state(char * buf)3532 static ssize_t retbleed_show_state(char *buf)
3533 {
3534 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
3535 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
3536 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
3537 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
3538 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
3539
3540 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
3541 !sched_smt_active() ? "disabled" :
3542 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3543 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
3544 "enabled with STIBP protection" : "vulnerable");
3545 }
3546
3547 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
3548 }
3549
srso_show_state(char * buf)3550 static ssize_t srso_show_state(char *buf)
3551 {
3552 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
3553 }
3554
gds_show_state(char * buf)3555 static ssize_t gds_show_state(char *buf)
3556 {
3557 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
3558 }
3559
tsa_show_state(char * buf)3560 static ssize_t tsa_show_state(char *buf)
3561 {
3562 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
3563 }
3564
cpu_show_common(struct device * dev,struct device_attribute * attr,char * buf,unsigned int bug)3565 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
3566 char *buf, unsigned int bug)
3567 {
3568 if (!boot_cpu_has_bug(bug))
3569 return sysfs_emit(buf, "Not affected\n");
3570
3571 switch (bug) {
3572 case X86_BUG_CPU_MELTDOWN:
3573 if (boot_cpu_has(X86_FEATURE_PTI))
3574 return sysfs_emit(buf, "Mitigation: PTI\n");
3575
3576 if (hypervisor_is_type(X86_HYPER_XEN_PV))
3577 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
3578
3579 break;
3580
3581 case X86_BUG_SPECTRE_V1:
3582 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
3583
3584 case X86_BUG_SPECTRE_V2:
3585 return spectre_v2_show_state(buf);
3586
3587 case X86_BUG_SPEC_STORE_BYPASS:
3588 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
3589
3590 case X86_BUG_L1TF:
3591 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
3592 return l1tf_show_state(buf);
3593 break;
3594
3595 case X86_BUG_MDS:
3596 return mds_show_state(buf);
3597
3598 case X86_BUG_TAA:
3599 return tsx_async_abort_show_state(buf);
3600
3601 case X86_BUG_ITLB_MULTIHIT:
3602 return itlb_multihit_show_state(buf);
3603
3604 case X86_BUG_SRBDS:
3605 return srbds_show_state(buf);
3606
3607 case X86_BUG_MMIO_STALE_DATA:
3608 return mmio_stale_data_show_state(buf);
3609
3610 case X86_BUG_RETBLEED:
3611 return retbleed_show_state(buf);
3612
3613 case X86_BUG_SRSO:
3614 return srso_show_state(buf);
3615
3616 case X86_BUG_GDS:
3617 return gds_show_state(buf);
3618
3619 case X86_BUG_RFDS:
3620 return rfds_show_state(buf);
3621
3622 case X86_BUG_OLD_MICROCODE:
3623 return old_microcode_show_state(buf);
3624
3625 case X86_BUG_ITS:
3626 return its_show_state(buf);
3627
3628 case X86_BUG_TSA:
3629 return tsa_show_state(buf);
3630
3631 default:
3632 break;
3633 }
3634
3635 return sysfs_emit(buf, "Vulnerable\n");
3636 }
3637
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)3638 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
3639 {
3640 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
3641 }
3642
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)3643 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
3644 {
3645 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
3646 }
3647
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)3648 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
3649 {
3650 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
3651 }
3652
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)3653 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
3654 {
3655 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
3656 }
3657
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)3658 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
3659 {
3660 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
3661 }
3662
cpu_show_mds(struct device * dev,struct device_attribute * attr,char * buf)3663 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
3664 {
3665 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
3666 }
3667
cpu_show_tsx_async_abort(struct device * dev,struct device_attribute * attr,char * buf)3668 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
3669 {
3670 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
3671 }
3672
cpu_show_itlb_multihit(struct device * dev,struct device_attribute * attr,char * buf)3673 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
3674 {
3675 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
3676 }
3677
cpu_show_srbds(struct device * dev,struct device_attribute * attr,char * buf)3678 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
3679 {
3680 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
3681 }
3682
cpu_show_mmio_stale_data(struct device * dev,struct device_attribute * attr,char * buf)3683 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
3684 {
3685 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3686 }
3687
cpu_show_retbleed(struct device * dev,struct device_attribute * attr,char * buf)3688 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3689 {
3690 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3691 }
3692
cpu_show_spec_rstack_overflow(struct device * dev,struct device_attribute * attr,char * buf)3693 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3694 {
3695 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3696 }
3697
cpu_show_gds(struct device * dev,struct device_attribute * attr,char * buf)3698 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3699 {
3700 return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3701 }
3702
cpu_show_reg_file_data_sampling(struct device * dev,struct device_attribute * attr,char * buf)3703 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3704 {
3705 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3706 }
3707
cpu_show_old_microcode(struct device * dev,struct device_attribute * attr,char * buf)3708 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
3709 {
3710 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
3711 }
3712
cpu_show_indirect_target_selection(struct device * dev,struct device_attribute * attr,char * buf)3713 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
3714 {
3715 return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
3716 }
3717
cpu_show_tsa(struct device * dev,struct device_attribute * attr,char * buf)3718 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
3719 {
3720 return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
3721 }
3722 #endif
3723
__warn_thunk(void)3724 void __warn_thunk(void)
3725 {
3726 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3727 }
3728