1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34
35 #include "cpu.h"
36
37 /*
38 * Speculation Vulnerability Handling
39 *
40 * Each vulnerability is handled with the following functions:
41 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
42 * take into account all relevant command line
43 * options.
44 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
45 * selected a mitigation, in case the selection
46 * may want to change based on other choices
47 * made. This function is optional.
48 * <vuln>_apply_mitigation() -- Enable the selected mitigation.
49 *
50 * The compile-time mitigation in all cases should be AUTO. An explicit
51 * command-line option can override AUTO. If no such option is
52 * provided, <vuln>_select_mitigation() will override AUTO to the best
53 * mitigation option.
54 */
55
56 static void __init spectre_v1_select_mitigation(void);
57 static void __init spectre_v1_apply_mitigation(void);
58 static void __init spectre_v2_select_mitigation(void);
59 static void __init spectre_v2_update_mitigation(void);
60 static void __init spectre_v2_apply_mitigation(void);
61 static void __init retbleed_select_mitigation(void);
62 static void __init retbleed_update_mitigation(void);
63 static void __init retbleed_apply_mitigation(void);
64 static void __init spectre_v2_user_select_mitigation(void);
65 static void __init spectre_v2_user_update_mitigation(void);
66 static void __init spectre_v2_user_apply_mitigation(void);
67 static void __init ssb_select_mitigation(void);
68 static void __init ssb_apply_mitigation(void);
69 static void __init l1tf_select_mitigation(void);
70 static void __init l1tf_apply_mitigation(void);
71 static void __init mds_select_mitigation(void);
72 static void __init mds_update_mitigation(void);
73 static void __init mds_apply_mitigation(void);
74 static void __init taa_select_mitigation(void);
75 static void __init taa_update_mitigation(void);
76 static void __init taa_apply_mitigation(void);
77 static void __init mmio_select_mitigation(void);
78 static void __init mmio_update_mitigation(void);
79 static void __init mmio_apply_mitigation(void);
80 static void __init rfds_select_mitigation(void);
81 static void __init rfds_update_mitigation(void);
82 static void __init rfds_apply_mitigation(void);
83 static void __init srbds_select_mitigation(void);
84 static void __init srbds_apply_mitigation(void);
85 static void __init l1d_flush_select_mitigation(void);
86 static void __init srso_select_mitigation(void);
87 static void __init srso_update_mitigation(void);
88 static void __init srso_apply_mitigation(void);
89 static void __init gds_select_mitigation(void);
90 static void __init gds_apply_mitigation(void);
91 static void __init bhi_select_mitigation(void);
92 static void __init bhi_update_mitigation(void);
93 static void __init bhi_apply_mitigation(void);
94 static void __init its_select_mitigation(void);
95 static void __init its_update_mitigation(void);
96 static void __init its_apply_mitigation(void);
97 static void __init tsa_select_mitigation(void);
98 static void __init tsa_apply_mitigation(void);
99 static void __init vmscape_select_mitigation(void);
100 static void __init vmscape_update_mitigation(void);
101 static void __init vmscape_apply_mitigation(void);
102
103 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
104 u64 x86_spec_ctrl_base;
105 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
106
107 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
108 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
109 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
110
111 /*
112 * Set when the CPU has run a potentially malicious guest. An IBPB will
113 * be needed to before running userspace. That IBPB will flush the branch
114 * predictor content.
115 */
116 DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
117 EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
118
119 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
120
121 static u64 __ro_after_init x86_arch_cap_msr;
122
123 static DEFINE_MUTEX(spec_ctrl_mutex);
124
125 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
126
set_return_thunk(void * thunk)127 static void __init set_return_thunk(void *thunk)
128 {
129 x86_return_thunk = thunk;
130
131 pr_info("active return thunk: %ps\n", thunk);
132 }
133
134 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
update_spec_ctrl(u64 val)135 static void update_spec_ctrl(u64 val)
136 {
137 this_cpu_write(x86_spec_ctrl_current, val);
138 wrmsrq(MSR_IA32_SPEC_CTRL, val);
139 }
140
141 /*
142 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
143 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
144 */
update_spec_ctrl_cond(u64 val)145 void update_spec_ctrl_cond(u64 val)
146 {
147 if (this_cpu_read(x86_spec_ctrl_current) == val)
148 return;
149
150 this_cpu_write(x86_spec_ctrl_current, val);
151
152 /*
153 * When KERNEL_IBRS this MSR is written on return-to-user, unless
154 * forced the update can be delayed until that time.
155 */
156 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
157 wrmsrq(MSR_IA32_SPEC_CTRL, val);
158 }
159
spec_ctrl_current(void)160 noinstr u64 spec_ctrl_current(void)
161 {
162 return this_cpu_read(x86_spec_ctrl_current);
163 }
164 EXPORT_SYMBOL_GPL(spec_ctrl_current);
165
166 /*
167 * AMD specific MSR info for Speculative Store Bypass control.
168 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
169 */
170 u64 __ro_after_init x86_amd_ls_cfg_base;
171 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
172
173 /* Control conditional STIBP in switch_to() */
174 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
175 /* Control conditional IBPB in switch_mm() */
176 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
177 /* Control unconditional IBPB in switch_mm() */
178 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
179
180 /* Control IBPB on vCPU load */
181 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
182 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
183
184 /* Control CPU buffer clear before idling (halt, mwait) */
185 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
186 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
187
188 /*
189 * Controls whether l1d flush based mitigations are enabled,
190 * based on hw features and admin setting via boot parameter
191 * defaults to false
192 */
193 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
194
195 /*
196 * Controls CPU Fill buffer clear before VMenter. This is a subset of
197 * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
198 * mitigation is required.
199 */
200 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
201 EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
202
203 #undef pr_fmt
204 #define pr_fmt(fmt) "mitigations: " fmt
205
cpu_print_attack_vectors(void)206 static void __init cpu_print_attack_vectors(void)
207 {
208 pr_info("Enabled attack vectors: ");
209
210 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
211 pr_cont("user_kernel, ");
212
213 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER))
214 pr_cont("user_user, ");
215
216 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST))
217 pr_cont("guest_host, ");
218
219 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST))
220 pr_cont("guest_guest, ");
221
222 pr_cont("SMT mitigations: ");
223
224 switch (smt_mitigations) {
225 case SMT_MITIGATIONS_OFF:
226 pr_cont("off\n");
227 break;
228 case SMT_MITIGATIONS_AUTO:
229 pr_cont("auto\n");
230 break;
231 case SMT_MITIGATIONS_ON:
232 pr_cont("on\n");
233 }
234 }
235
cpu_select_mitigations(void)236 void __init cpu_select_mitigations(void)
237 {
238 /*
239 * Read the SPEC_CTRL MSR to account for reserved bits which may
240 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
241 * init code as it is not enumerated and depends on the family.
242 */
243 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
244 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
245
246 /*
247 * Previously running kernel (kexec), may have some controls
248 * turned ON. Clear them and let the mitigations setup below
249 * rediscover them based on configuration.
250 */
251 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
252 }
253
254 x86_arch_cap_msr = x86_read_arch_cap_msr();
255
256 cpu_print_attack_vectors();
257
258 /* Select the proper CPU mitigations before patching alternatives: */
259 spectre_v1_select_mitigation();
260 spectre_v2_select_mitigation();
261 retbleed_select_mitigation();
262 spectre_v2_user_select_mitigation();
263 ssb_select_mitigation();
264 l1tf_select_mitigation();
265 mds_select_mitigation();
266 taa_select_mitigation();
267 mmio_select_mitigation();
268 rfds_select_mitigation();
269 srbds_select_mitigation();
270 l1d_flush_select_mitigation();
271 srso_select_mitigation();
272 gds_select_mitigation();
273 its_select_mitigation();
274 bhi_select_mitigation();
275 tsa_select_mitigation();
276 vmscape_select_mitigation();
277
278 /*
279 * After mitigations are selected, some may need to update their
280 * choices.
281 */
282 spectre_v2_update_mitigation();
283 /*
284 * retbleed_update_mitigation() relies on the state set by
285 * spectre_v2_update_mitigation(); specifically it wants to know about
286 * spectre_v2=ibrs.
287 */
288 retbleed_update_mitigation();
289 /*
290 * its_update_mitigation() depends on spectre_v2_update_mitigation()
291 * and retbleed_update_mitigation().
292 */
293 its_update_mitigation();
294
295 /*
296 * spectre_v2_user_update_mitigation() depends on
297 * retbleed_update_mitigation(), specifically the STIBP
298 * selection is forced for UNRET or IBPB.
299 */
300 spectre_v2_user_update_mitigation();
301 mds_update_mitigation();
302 taa_update_mitigation();
303 mmio_update_mitigation();
304 rfds_update_mitigation();
305 bhi_update_mitigation();
306 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
307 srso_update_mitigation();
308 vmscape_update_mitigation();
309
310 spectre_v1_apply_mitigation();
311 spectre_v2_apply_mitigation();
312 retbleed_apply_mitigation();
313 spectre_v2_user_apply_mitigation();
314 ssb_apply_mitigation();
315 l1tf_apply_mitigation();
316 mds_apply_mitigation();
317 taa_apply_mitigation();
318 mmio_apply_mitigation();
319 rfds_apply_mitigation();
320 srbds_apply_mitigation();
321 srso_apply_mitigation();
322 gds_apply_mitigation();
323 its_apply_mitigation();
324 bhi_apply_mitigation();
325 tsa_apply_mitigation();
326 vmscape_apply_mitigation();
327 }
328
329 /*
330 * NOTE: This function is *only* called for SVM, since Intel uses
331 * MSR_IA32_SPEC_CTRL for SSBD.
332 */
333 void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl,bool setguest)334 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
335 {
336 u64 guestval, hostval;
337 struct thread_info *ti = current_thread_info();
338
339 /*
340 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
341 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
342 */
343 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
344 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
345 return;
346
347 /*
348 * If the host has SSBD mitigation enabled, force it in the host's
349 * virtual MSR value. If its not permanently enabled, evaluate
350 * current's TIF_SSBD thread flag.
351 */
352 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
353 hostval = SPEC_CTRL_SSBD;
354 else
355 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
356
357 /* Sanitize the guest value */
358 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
359
360 if (hostval != guestval) {
361 unsigned long tif;
362
363 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
364 ssbd_spec_ctrl_to_tif(hostval);
365
366 speculation_ctrl_update(tif);
367 }
368 }
369 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
370
x86_amd_ssb_disable(void)371 static void x86_amd_ssb_disable(void)
372 {
373 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
374
375 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
376 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
377 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
378 wrmsrq(MSR_AMD64_LS_CFG, msrval);
379 }
380
381 #undef pr_fmt
382 #define pr_fmt(fmt) "MDS: " fmt
383
384 /*
385 * Returns true if vulnerability should be mitigated based on the
386 * selected attack vector controls.
387 *
388 * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
389 */
should_mitigate_vuln(unsigned int bug)390 static bool __init should_mitigate_vuln(unsigned int bug)
391 {
392 switch (bug) {
393 /*
394 * The only runtime-selected spectre_v1 mitigations in the kernel are
395 * related to SWAPGS protection on kernel entry. Therefore, protection
396 * is only required for the user->kernel attack vector.
397 */
398 case X86_BUG_SPECTRE_V1:
399 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL);
400
401 case X86_BUG_SPECTRE_V2:
402 case X86_BUG_RETBLEED:
403 case X86_BUG_L1TF:
404 case X86_BUG_ITS:
405 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
406 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
407
408 case X86_BUG_SPECTRE_V2_USER:
409 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
410 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
411
412 /*
413 * All the vulnerabilities below allow potentially leaking data
414 * across address spaces. Therefore, mitigation is required for
415 * any of these 4 attack vectors.
416 */
417 case X86_BUG_MDS:
418 case X86_BUG_TAA:
419 case X86_BUG_MMIO_STALE_DATA:
420 case X86_BUG_RFDS:
421 case X86_BUG_SRBDS:
422 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
423 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
424 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
425 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
426
427 case X86_BUG_GDS:
428 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
429 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
430 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
431 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
432 (smt_mitigations != SMT_MITIGATIONS_OFF);
433
434 case X86_BUG_SPEC_STORE_BYPASS:
435 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
436
437 case X86_BUG_VMSCAPE:
438 return cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
439
440 default:
441 WARN(1, "Unknown bug %x\n", bug);
442 return false;
443 }
444 }
445
446 /* Default mitigation for MDS-affected CPUs */
447 static enum mds_mitigations mds_mitigation __ro_after_init =
448 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
449 static bool mds_nosmt __ro_after_init = false;
450
451 static const char * const mds_strings[] = {
452 [MDS_MITIGATION_OFF] = "Vulnerable",
453 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
454 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
455 };
456
457 enum taa_mitigations {
458 TAA_MITIGATION_OFF,
459 TAA_MITIGATION_AUTO,
460 TAA_MITIGATION_UCODE_NEEDED,
461 TAA_MITIGATION_VERW,
462 TAA_MITIGATION_TSX_DISABLED,
463 };
464
465 /* Default mitigation for TAA-affected CPUs */
466 static enum taa_mitigations taa_mitigation __ro_after_init =
467 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
468
469 enum mmio_mitigations {
470 MMIO_MITIGATION_OFF,
471 MMIO_MITIGATION_AUTO,
472 MMIO_MITIGATION_UCODE_NEEDED,
473 MMIO_MITIGATION_VERW,
474 };
475
476 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
477 static enum mmio_mitigations mmio_mitigation __ro_after_init =
478 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
479
480 enum rfds_mitigations {
481 RFDS_MITIGATION_OFF,
482 RFDS_MITIGATION_AUTO,
483 RFDS_MITIGATION_VERW,
484 RFDS_MITIGATION_UCODE_NEEDED,
485 };
486
487 /* Default mitigation for Register File Data Sampling */
488 static enum rfds_mitigations rfds_mitigation __ro_after_init =
489 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
490
491 /*
492 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
493 * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
494 */
495 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
496
mds_select_mitigation(void)497 static void __init mds_select_mitigation(void)
498 {
499 if (!boot_cpu_has_bug(X86_BUG_MDS)) {
500 mds_mitigation = MDS_MITIGATION_OFF;
501 return;
502 }
503
504 if (mds_mitigation == MDS_MITIGATION_AUTO) {
505 if (should_mitigate_vuln(X86_BUG_MDS))
506 mds_mitigation = MDS_MITIGATION_FULL;
507 else
508 mds_mitigation = MDS_MITIGATION_OFF;
509 }
510
511 if (mds_mitigation == MDS_MITIGATION_OFF)
512 return;
513
514 verw_clear_cpu_buf_mitigation_selected = true;
515 }
516
mds_update_mitigation(void)517 static void __init mds_update_mitigation(void)
518 {
519 if (!boot_cpu_has_bug(X86_BUG_MDS))
520 return;
521
522 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
523 if (verw_clear_cpu_buf_mitigation_selected)
524 mds_mitigation = MDS_MITIGATION_FULL;
525
526 if (mds_mitigation == MDS_MITIGATION_FULL) {
527 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
528 mds_mitigation = MDS_MITIGATION_VMWERV;
529 }
530
531 pr_info("%s\n", mds_strings[mds_mitigation]);
532 }
533
mds_apply_mitigation(void)534 static void __init mds_apply_mitigation(void)
535 {
536 if (mds_mitigation == MDS_MITIGATION_FULL ||
537 mds_mitigation == MDS_MITIGATION_VMWERV) {
538 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
539 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
540 (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
541 cpu_smt_disable(false);
542 }
543 }
544
mds_cmdline(char * str)545 static int __init mds_cmdline(char *str)
546 {
547 if (!boot_cpu_has_bug(X86_BUG_MDS))
548 return 0;
549
550 if (!str)
551 return -EINVAL;
552
553 if (!strcmp(str, "off"))
554 mds_mitigation = MDS_MITIGATION_OFF;
555 else if (!strcmp(str, "full"))
556 mds_mitigation = MDS_MITIGATION_FULL;
557 else if (!strcmp(str, "full,nosmt")) {
558 mds_mitigation = MDS_MITIGATION_FULL;
559 mds_nosmt = true;
560 }
561
562 return 0;
563 }
564 early_param("mds", mds_cmdline);
565
566 #undef pr_fmt
567 #define pr_fmt(fmt) "TAA: " fmt
568
569 static bool taa_nosmt __ro_after_init;
570
571 static const char * const taa_strings[] = {
572 [TAA_MITIGATION_OFF] = "Vulnerable",
573 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
574 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
575 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
576 };
577
taa_vulnerable(void)578 static bool __init taa_vulnerable(void)
579 {
580 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
581 }
582
taa_select_mitigation(void)583 static void __init taa_select_mitigation(void)
584 {
585 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
586 taa_mitigation = TAA_MITIGATION_OFF;
587 return;
588 }
589
590 /* TSX previously disabled by tsx=off */
591 if (!boot_cpu_has(X86_FEATURE_RTM)) {
592 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
593 return;
594 }
595
596 /* Microcode will be checked in taa_update_mitigation(). */
597 if (taa_mitigation == TAA_MITIGATION_AUTO) {
598 if (should_mitigate_vuln(X86_BUG_TAA))
599 taa_mitigation = TAA_MITIGATION_VERW;
600 else
601 taa_mitigation = TAA_MITIGATION_OFF;
602 }
603
604 if (taa_mitigation != TAA_MITIGATION_OFF)
605 verw_clear_cpu_buf_mitigation_selected = true;
606 }
607
taa_update_mitigation(void)608 static void __init taa_update_mitigation(void)
609 {
610 if (!taa_vulnerable())
611 return;
612
613 if (verw_clear_cpu_buf_mitigation_selected)
614 taa_mitigation = TAA_MITIGATION_VERW;
615
616 if (taa_mitigation == TAA_MITIGATION_VERW) {
617 /* Check if the requisite ucode is available. */
618 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
619 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
620
621 /*
622 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
623 * A microcode update fixes this behavior to clear CPU buffers. It also
624 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
625 * ARCH_CAP_TSX_CTRL_MSR bit.
626 *
627 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
628 * update is required.
629 */
630 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
631 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
632 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
633 }
634
635 pr_info("%s\n", taa_strings[taa_mitigation]);
636 }
637
taa_apply_mitigation(void)638 static void __init taa_apply_mitigation(void)
639 {
640 if (taa_mitigation == TAA_MITIGATION_VERW ||
641 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
642 /*
643 * TSX is enabled, select alternate mitigation for TAA which is
644 * the same as MDS. Enable MDS static branch to clear CPU buffers.
645 *
646 * For guests that can't determine whether the correct microcode is
647 * present on host, enable the mitigation for UCODE_NEEDED as well.
648 */
649 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
650
651 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
652 cpu_smt_disable(false);
653 }
654 }
655
tsx_async_abort_parse_cmdline(char * str)656 static int __init tsx_async_abort_parse_cmdline(char *str)
657 {
658 if (!boot_cpu_has_bug(X86_BUG_TAA))
659 return 0;
660
661 if (!str)
662 return -EINVAL;
663
664 if (!strcmp(str, "off")) {
665 taa_mitigation = TAA_MITIGATION_OFF;
666 } else if (!strcmp(str, "full")) {
667 taa_mitigation = TAA_MITIGATION_VERW;
668 } else if (!strcmp(str, "full,nosmt")) {
669 taa_mitigation = TAA_MITIGATION_VERW;
670 taa_nosmt = true;
671 }
672
673 return 0;
674 }
675 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
676
677 #undef pr_fmt
678 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
679
680 static bool mmio_nosmt __ro_after_init = false;
681
682 static const char * const mmio_strings[] = {
683 [MMIO_MITIGATION_OFF] = "Vulnerable",
684 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
685 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
686 };
687
mmio_select_mitigation(void)688 static void __init mmio_select_mitigation(void)
689 {
690 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
691 mmio_mitigation = MMIO_MITIGATION_OFF;
692 return;
693 }
694
695 /* Microcode will be checked in mmio_update_mitigation(). */
696 if (mmio_mitigation == MMIO_MITIGATION_AUTO) {
697 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA))
698 mmio_mitigation = MMIO_MITIGATION_VERW;
699 else
700 mmio_mitigation = MMIO_MITIGATION_OFF;
701 }
702
703 if (mmio_mitigation == MMIO_MITIGATION_OFF)
704 return;
705
706 /*
707 * Enable CPU buffer clear mitigation for host and VMM, if also affected
708 * by MDS or TAA.
709 */
710 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
711 verw_clear_cpu_buf_mitigation_selected = true;
712 }
713
mmio_update_mitigation(void)714 static void __init mmio_update_mitigation(void)
715 {
716 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
717 return;
718
719 if (verw_clear_cpu_buf_mitigation_selected)
720 mmio_mitigation = MMIO_MITIGATION_VERW;
721
722 if (mmio_mitigation == MMIO_MITIGATION_VERW) {
723 /*
724 * Check if the system has the right microcode.
725 *
726 * CPU Fill buffer clear mitigation is enumerated by either an explicit
727 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
728 * affected systems.
729 */
730 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
731 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
732 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
733 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
734 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
735 }
736
737 pr_info("%s\n", mmio_strings[mmio_mitigation]);
738 }
739
mmio_apply_mitigation(void)740 static void __init mmio_apply_mitigation(void)
741 {
742 if (mmio_mitigation == MMIO_MITIGATION_OFF)
743 return;
744
745 /*
746 * Only enable the VMM mitigation if the CPU buffer clear mitigation is
747 * not being used.
748 */
749 if (verw_clear_cpu_buf_mitigation_selected) {
750 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
751 static_branch_disable(&cpu_buf_vm_clear);
752 } else {
753 static_branch_enable(&cpu_buf_vm_clear);
754 }
755
756 /*
757 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
758 * be propagated to uncore buffers, clearing the Fill buffers on idle
759 * is required irrespective of SMT state.
760 */
761 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
762 static_branch_enable(&cpu_buf_idle_clear);
763
764 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
765 cpu_smt_disable(false);
766 }
767
mmio_stale_data_parse_cmdline(char * str)768 static int __init mmio_stale_data_parse_cmdline(char *str)
769 {
770 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
771 return 0;
772
773 if (!str)
774 return -EINVAL;
775
776 if (!strcmp(str, "off")) {
777 mmio_mitigation = MMIO_MITIGATION_OFF;
778 } else if (!strcmp(str, "full")) {
779 mmio_mitigation = MMIO_MITIGATION_VERW;
780 } else if (!strcmp(str, "full,nosmt")) {
781 mmio_mitigation = MMIO_MITIGATION_VERW;
782 mmio_nosmt = true;
783 }
784
785 return 0;
786 }
787 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
788
789 #undef pr_fmt
790 #define pr_fmt(fmt) "Register File Data Sampling: " fmt
791
792 static const char * const rfds_strings[] = {
793 [RFDS_MITIGATION_OFF] = "Vulnerable",
794 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
795 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
796 };
797
verw_clears_cpu_reg_file(void)798 static inline bool __init verw_clears_cpu_reg_file(void)
799 {
800 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
801 }
802
rfds_select_mitigation(void)803 static void __init rfds_select_mitigation(void)
804 {
805 if (!boot_cpu_has_bug(X86_BUG_RFDS)) {
806 rfds_mitigation = RFDS_MITIGATION_OFF;
807 return;
808 }
809
810 if (rfds_mitigation == RFDS_MITIGATION_AUTO) {
811 if (should_mitigate_vuln(X86_BUG_RFDS))
812 rfds_mitigation = RFDS_MITIGATION_VERW;
813 else
814 rfds_mitigation = RFDS_MITIGATION_OFF;
815 }
816
817 if (rfds_mitigation == RFDS_MITIGATION_OFF)
818 return;
819
820 if (verw_clears_cpu_reg_file())
821 verw_clear_cpu_buf_mitigation_selected = true;
822 }
823
rfds_update_mitigation(void)824 static void __init rfds_update_mitigation(void)
825 {
826 if (!boot_cpu_has_bug(X86_BUG_RFDS))
827 return;
828
829 if (verw_clear_cpu_buf_mitigation_selected)
830 rfds_mitigation = RFDS_MITIGATION_VERW;
831
832 if (rfds_mitigation == RFDS_MITIGATION_VERW) {
833 if (!verw_clears_cpu_reg_file())
834 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
835 }
836
837 pr_info("%s\n", rfds_strings[rfds_mitigation]);
838 }
839
rfds_apply_mitigation(void)840 static void __init rfds_apply_mitigation(void)
841 {
842 if (rfds_mitigation == RFDS_MITIGATION_VERW)
843 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
844 }
845
rfds_parse_cmdline(char * str)846 static __init int rfds_parse_cmdline(char *str)
847 {
848 if (!str)
849 return -EINVAL;
850
851 if (!boot_cpu_has_bug(X86_BUG_RFDS))
852 return 0;
853
854 if (!strcmp(str, "off"))
855 rfds_mitigation = RFDS_MITIGATION_OFF;
856 else if (!strcmp(str, "on"))
857 rfds_mitigation = RFDS_MITIGATION_VERW;
858
859 return 0;
860 }
861 early_param("reg_file_data_sampling", rfds_parse_cmdline);
862
863 #undef pr_fmt
864 #define pr_fmt(fmt) "SRBDS: " fmt
865
866 enum srbds_mitigations {
867 SRBDS_MITIGATION_OFF,
868 SRBDS_MITIGATION_AUTO,
869 SRBDS_MITIGATION_UCODE_NEEDED,
870 SRBDS_MITIGATION_FULL,
871 SRBDS_MITIGATION_TSX_OFF,
872 SRBDS_MITIGATION_HYPERVISOR,
873 };
874
875 static enum srbds_mitigations srbds_mitigation __ro_after_init =
876 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
877
878 static const char * const srbds_strings[] = {
879 [SRBDS_MITIGATION_OFF] = "Vulnerable",
880 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
881 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
882 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
883 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
884 };
885
886 static bool srbds_off;
887
update_srbds_msr(void)888 void update_srbds_msr(void)
889 {
890 u64 mcu_ctrl;
891
892 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
893 return;
894
895 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
896 return;
897
898 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
899 return;
900
901 /*
902 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
903 * being disabled and it hasn't received the SRBDS MSR microcode.
904 */
905 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
906 return;
907
908 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
909
910 switch (srbds_mitigation) {
911 case SRBDS_MITIGATION_OFF:
912 case SRBDS_MITIGATION_TSX_OFF:
913 mcu_ctrl |= RNGDS_MITG_DIS;
914 break;
915 case SRBDS_MITIGATION_FULL:
916 mcu_ctrl &= ~RNGDS_MITG_DIS;
917 break;
918 default:
919 break;
920 }
921
922 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
923 }
924
srbds_select_mitigation(void)925 static void __init srbds_select_mitigation(void)
926 {
927 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) {
928 srbds_mitigation = SRBDS_MITIGATION_OFF;
929 return;
930 }
931
932 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) {
933 if (should_mitigate_vuln(X86_BUG_SRBDS))
934 srbds_mitigation = SRBDS_MITIGATION_FULL;
935 else {
936 srbds_mitigation = SRBDS_MITIGATION_OFF;
937 return;
938 }
939 }
940
941 /*
942 * Check to see if this is one of the MDS_NO systems supporting TSX that
943 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
944 * by Processor MMIO Stale Data vulnerability.
945 */
946 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
947 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
948 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
949 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
950 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
951 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
952 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
953 else if (srbds_off)
954 srbds_mitigation = SRBDS_MITIGATION_OFF;
955
956 pr_info("%s\n", srbds_strings[srbds_mitigation]);
957 }
958
srbds_apply_mitigation(void)959 static void __init srbds_apply_mitigation(void)
960 {
961 update_srbds_msr();
962 }
963
srbds_parse_cmdline(char * str)964 static int __init srbds_parse_cmdline(char *str)
965 {
966 if (!str)
967 return -EINVAL;
968
969 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
970 return 0;
971
972 srbds_off = !strcmp(str, "off");
973 return 0;
974 }
975 early_param("srbds", srbds_parse_cmdline);
976
977 #undef pr_fmt
978 #define pr_fmt(fmt) "L1D Flush : " fmt
979
980 enum l1d_flush_mitigations {
981 L1D_FLUSH_OFF = 0,
982 L1D_FLUSH_ON,
983 };
984
985 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
986
l1d_flush_select_mitigation(void)987 static void __init l1d_flush_select_mitigation(void)
988 {
989 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
990 return;
991
992 static_branch_enable(&switch_mm_cond_l1d_flush);
993 pr_info("Conditional flush on switch_mm() enabled\n");
994 }
995
l1d_flush_parse_cmdline(char * str)996 static int __init l1d_flush_parse_cmdline(char *str)
997 {
998 if (!strcmp(str, "on"))
999 l1d_flush_mitigation = L1D_FLUSH_ON;
1000
1001 return 0;
1002 }
1003 early_param("l1d_flush", l1d_flush_parse_cmdline);
1004
1005 #undef pr_fmt
1006 #define pr_fmt(fmt) "GDS: " fmt
1007
1008 enum gds_mitigations {
1009 GDS_MITIGATION_OFF,
1010 GDS_MITIGATION_AUTO,
1011 GDS_MITIGATION_UCODE_NEEDED,
1012 GDS_MITIGATION_FORCE,
1013 GDS_MITIGATION_FULL,
1014 GDS_MITIGATION_FULL_LOCKED,
1015 GDS_MITIGATION_HYPERVISOR,
1016 };
1017
1018 static enum gds_mitigations gds_mitigation __ro_after_init =
1019 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
1020
1021 static const char * const gds_strings[] = {
1022 [GDS_MITIGATION_OFF] = "Vulnerable",
1023 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1024 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
1025 [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
1026 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
1027 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
1028 };
1029
gds_ucode_mitigated(void)1030 bool gds_ucode_mitigated(void)
1031 {
1032 return (gds_mitigation == GDS_MITIGATION_FULL ||
1033 gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
1034 }
1035 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
1036
update_gds_msr(void)1037 void update_gds_msr(void)
1038 {
1039 u64 mcu_ctrl_after;
1040 u64 mcu_ctrl;
1041
1042 switch (gds_mitigation) {
1043 case GDS_MITIGATION_OFF:
1044 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1045 mcu_ctrl |= GDS_MITG_DIS;
1046 break;
1047 case GDS_MITIGATION_FULL_LOCKED:
1048 /*
1049 * The LOCKED state comes from the boot CPU. APs might not have
1050 * the same state. Make sure the mitigation is enabled on all
1051 * CPUs.
1052 */
1053 case GDS_MITIGATION_FULL:
1054 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1055 mcu_ctrl &= ~GDS_MITG_DIS;
1056 break;
1057 case GDS_MITIGATION_FORCE:
1058 case GDS_MITIGATION_UCODE_NEEDED:
1059 case GDS_MITIGATION_HYPERVISOR:
1060 case GDS_MITIGATION_AUTO:
1061 return;
1062 }
1063
1064 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1065
1066 /*
1067 * Check to make sure that the WRMSR value was not ignored. Writes to
1068 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
1069 * processor was not.
1070 */
1071 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
1072 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
1073 }
1074
gds_select_mitigation(void)1075 static void __init gds_select_mitigation(void)
1076 {
1077 u64 mcu_ctrl;
1078
1079 if (!boot_cpu_has_bug(X86_BUG_GDS))
1080 return;
1081
1082 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1083 gds_mitigation = GDS_MITIGATION_HYPERVISOR;
1084 return;
1085 }
1086
1087 /* Will verify below that mitigation _can_ be disabled */
1088 if (gds_mitigation == GDS_MITIGATION_AUTO) {
1089 if (should_mitigate_vuln(X86_BUG_GDS))
1090 gds_mitigation = GDS_MITIGATION_FULL;
1091 else
1092 gds_mitigation = GDS_MITIGATION_OFF;
1093 }
1094
1095 /* No microcode */
1096 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
1097 if (gds_mitigation != GDS_MITIGATION_FORCE)
1098 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
1099 return;
1100 }
1101
1102 /* Microcode has mitigation, use it */
1103 if (gds_mitigation == GDS_MITIGATION_FORCE)
1104 gds_mitigation = GDS_MITIGATION_FULL;
1105
1106 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1107 if (mcu_ctrl & GDS_MITG_LOCKED) {
1108 if (gds_mitigation == GDS_MITIGATION_OFF)
1109 pr_warn("Mitigation locked. Disable failed.\n");
1110
1111 /*
1112 * The mitigation is selected from the boot CPU. All other CPUs
1113 * _should_ have the same state. If the boot CPU isn't locked
1114 * but others are then update_gds_msr() will WARN() of the state
1115 * mismatch. If the boot CPU is locked update_gds_msr() will
1116 * ensure the other CPUs have the mitigation enabled.
1117 */
1118 gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
1119 }
1120 }
1121
gds_apply_mitigation(void)1122 static void __init gds_apply_mitigation(void)
1123 {
1124 if (!boot_cpu_has_bug(X86_BUG_GDS))
1125 return;
1126
1127 /* Microcode is present */
1128 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
1129 update_gds_msr();
1130 else if (gds_mitigation == GDS_MITIGATION_FORCE) {
1131 /*
1132 * This only needs to be done on the boot CPU so do it
1133 * here rather than in update_gds_msr()
1134 */
1135 setup_clear_cpu_cap(X86_FEATURE_AVX);
1136 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
1137 }
1138
1139 pr_info("%s\n", gds_strings[gds_mitigation]);
1140 }
1141
gds_parse_cmdline(char * str)1142 static int __init gds_parse_cmdline(char *str)
1143 {
1144 if (!str)
1145 return -EINVAL;
1146
1147 if (!boot_cpu_has_bug(X86_BUG_GDS))
1148 return 0;
1149
1150 if (!strcmp(str, "off"))
1151 gds_mitigation = GDS_MITIGATION_OFF;
1152 else if (!strcmp(str, "force"))
1153 gds_mitigation = GDS_MITIGATION_FORCE;
1154
1155 return 0;
1156 }
1157 early_param("gather_data_sampling", gds_parse_cmdline);
1158
1159 #undef pr_fmt
1160 #define pr_fmt(fmt) "Spectre V1 : " fmt
1161
1162 enum spectre_v1_mitigation {
1163 SPECTRE_V1_MITIGATION_NONE,
1164 SPECTRE_V1_MITIGATION_AUTO,
1165 };
1166
1167 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
1168 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
1169 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
1170
1171 static const char * const spectre_v1_strings[] = {
1172 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
1173 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
1174 };
1175
1176 /*
1177 * Does SMAP provide full mitigation against speculative kernel access to
1178 * userspace?
1179 */
smap_works_speculatively(void)1180 static bool smap_works_speculatively(void)
1181 {
1182 if (!boot_cpu_has(X86_FEATURE_SMAP))
1183 return false;
1184
1185 /*
1186 * On CPUs which are vulnerable to Meltdown, SMAP does not
1187 * prevent speculative access to user data in the L1 cache.
1188 * Consider SMAP to be non-functional as a mitigation on these
1189 * CPUs.
1190 */
1191 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
1192 return false;
1193
1194 return true;
1195 }
1196
spectre_v1_select_mitigation(void)1197 static void __init spectre_v1_select_mitigation(void)
1198 {
1199 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1200 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1201
1202 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1))
1203 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1204 }
1205
spectre_v1_apply_mitigation(void)1206 static void __init spectre_v1_apply_mitigation(void)
1207 {
1208 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1209 return;
1210
1211 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
1212 /*
1213 * With Spectre v1, a user can speculatively control either
1214 * path of a conditional swapgs with a user-controlled GS
1215 * value. The mitigation is to add lfences to both code paths.
1216 *
1217 * If FSGSBASE is enabled, the user can put a kernel address in
1218 * GS, in which case SMAP provides no protection.
1219 *
1220 * If FSGSBASE is disabled, the user can only put a user space
1221 * address in GS. That makes an attack harder, but still
1222 * possible if there's no SMAP protection.
1223 */
1224 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
1225 !smap_works_speculatively()) {
1226 /*
1227 * Mitigation can be provided from SWAPGS itself or
1228 * PTI as the CR3 write in the Meltdown mitigation
1229 * is serializing.
1230 *
1231 * If neither is there, mitigate with an LFENCE to
1232 * stop speculation through swapgs.
1233 */
1234 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
1235 !boot_cpu_has(X86_FEATURE_PTI))
1236 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
1237
1238 /*
1239 * Enable lfences in the kernel entry (non-swapgs)
1240 * paths, to prevent user entry from speculatively
1241 * skipping swapgs.
1242 */
1243 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
1244 }
1245 }
1246
1247 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1248 }
1249
nospectre_v1_cmdline(char * str)1250 static int __init nospectre_v1_cmdline(char *str)
1251 {
1252 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1253 return 0;
1254 }
1255 early_param("nospectre_v1", nospectre_v1_cmdline);
1256
1257 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
1258
1259 /* Depends on spectre_v2 mitigation selected already */
cdt_possible(enum spectre_v2_mitigation mode)1260 static inline bool cdt_possible(enum spectre_v2_mitigation mode)
1261 {
1262 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) ||
1263 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE))
1264 return false;
1265
1266 if (mode == SPECTRE_V2_RETPOLINE ||
1267 mode == SPECTRE_V2_EIBRS_RETPOLINE)
1268 return true;
1269
1270 return false;
1271 }
1272
1273 #undef pr_fmt
1274 #define pr_fmt(fmt) "RETBleed: " fmt
1275
1276 enum its_mitigation {
1277 ITS_MITIGATION_OFF,
1278 ITS_MITIGATION_AUTO,
1279 ITS_MITIGATION_VMEXIT_ONLY,
1280 ITS_MITIGATION_ALIGNED_THUNKS,
1281 ITS_MITIGATION_RETPOLINE_STUFF,
1282 };
1283
1284 static enum its_mitigation its_mitigation __ro_after_init =
1285 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
1286
1287 enum retbleed_mitigation {
1288 RETBLEED_MITIGATION_NONE,
1289 RETBLEED_MITIGATION_AUTO,
1290 RETBLEED_MITIGATION_UNRET,
1291 RETBLEED_MITIGATION_IBPB,
1292 RETBLEED_MITIGATION_IBRS,
1293 RETBLEED_MITIGATION_EIBRS,
1294 RETBLEED_MITIGATION_STUFF,
1295 };
1296
1297 static const char * const retbleed_strings[] = {
1298 [RETBLEED_MITIGATION_NONE] = "Vulnerable",
1299 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
1300 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
1301 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
1302 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
1303 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
1304 };
1305
1306 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
1307 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
1308
1309 static int __ro_after_init retbleed_nosmt = false;
1310
1311 enum srso_mitigation {
1312 SRSO_MITIGATION_NONE,
1313 SRSO_MITIGATION_AUTO,
1314 SRSO_MITIGATION_UCODE_NEEDED,
1315 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
1316 SRSO_MITIGATION_MICROCODE,
1317 SRSO_MITIGATION_NOSMT,
1318 SRSO_MITIGATION_SAFE_RET,
1319 SRSO_MITIGATION_IBPB,
1320 SRSO_MITIGATION_IBPB_ON_VMEXIT,
1321 SRSO_MITIGATION_BP_SPEC_REDUCE,
1322 };
1323
1324 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
1325
retbleed_parse_cmdline(char * str)1326 static int __init retbleed_parse_cmdline(char *str)
1327 {
1328 if (!str)
1329 return -EINVAL;
1330
1331 while (str) {
1332 char *next = strchr(str, ',');
1333 if (next) {
1334 *next = 0;
1335 next++;
1336 }
1337
1338 if (!strcmp(str, "off")) {
1339 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1340 } else if (!strcmp(str, "auto")) {
1341 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1342 } else if (!strcmp(str, "unret")) {
1343 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1344 } else if (!strcmp(str, "ibpb")) {
1345 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1346 } else if (!strcmp(str, "stuff")) {
1347 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1348 } else if (!strcmp(str, "nosmt")) {
1349 retbleed_nosmt = true;
1350 } else if (!strcmp(str, "force")) {
1351 setup_force_cpu_bug(X86_BUG_RETBLEED);
1352 } else {
1353 pr_err("Ignoring unknown retbleed option (%s).", str);
1354 }
1355
1356 str = next;
1357 }
1358
1359 return 0;
1360 }
1361 early_param("retbleed", retbleed_parse_cmdline);
1362
1363 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1364 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1365
retbleed_select_mitigation(void)1366 static void __init retbleed_select_mitigation(void)
1367 {
1368 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) {
1369 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1370 return;
1371 }
1372
1373 switch (retbleed_mitigation) {
1374 case RETBLEED_MITIGATION_UNRET:
1375 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1376 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1377 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1378 }
1379 break;
1380 case RETBLEED_MITIGATION_IBPB:
1381 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1382 pr_err("WARNING: CPU does not support IBPB.\n");
1383 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1384 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1385 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1386 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1387 }
1388 break;
1389 case RETBLEED_MITIGATION_STUFF:
1390 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1391 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1392 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1393 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1394 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
1395 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1396 }
1397 break;
1398 default:
1399 break;
1400 }
1401
1402 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
1403 return;
1404
1405 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) {
1406 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1407 return;
1408 }
1409
1410 /* Intel mitigation selected in retbleed_update_mitigation() */
1411 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1412 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1413 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1414 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1415 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1416 boot_cpu_has(X86_FEATURE_IBPB))
1417 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1418 else
1419 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1420 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1421 /* Final mitigation depends on spectre-v2 selection */
1422 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1423 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1424 else if (boot_cpu_has(X86_FEATURE_IBRS))
1425 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1426 else
1427 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1428 }
1429 }
1430
retbleed_update_mitigation(void)1431 static void __init retbleed_update_mitigation(void)
1432 {
1433 if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
1434 return;
1435
1436 /* ITS can also enable stuffing */
1437 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF)
1438 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1439
1440 /* If SRSO is using IBPB, that works for retbleed too */
1441 if (srso_mitigation == SRSO_MITIGATION_IBPB)
1442 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1443
1444 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF &&
1445 !cdt_possible(spectre_v2_enabled)) {
1446 pr_err("WARNING: retbleed=stuff depends on retpoline\n");
1447 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1448 }
1449
1450 /*
1451 * Let IBRS trump all on Intel without affecting the effects of the
1452 * retbleed= cmdline option except for call depth based stuffing
1453 */
1454 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1455 switch (spectre_v2_enabled) {
1456 case SPECTRE_V2_IBRS:
1457 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1458 break;
1459 case SPECTRE_V2_EIBRS:
1460 case SPECTRE_V2_EIBRS_RETPOLINE:
1461 case SPECTRE_V2_EIBRS_LFENCE:
1462 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1463 break;
1464 default:
1465 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
1466 if (retbleed_mitigation != RETBLEED_MITIGATION_NONE)
1467 pr_err(RETBLEED_INTEL_MSG);
1468
1469 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1470 }
1471 }
1472 }
1473
1474 pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1475 }
1476
retbleed_apply_mitigation(void)1477 static void __init retbleed_apply_mitigation(void)
1478 {
1479 bool mitigate_smt = false;
1480
1481 switch (retbleed_mitigation) {
1482 case RETBLEED_MITIGATION_NONE:
1483 return;
1484
1485 case RETBLEED_MITIGATION_UNRET:
1486 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1487 setup_force_cpu_cap(X86_FEATURE_UNRET);
1488
1489 set_return_thunk(retbleed_return_thunk);
1490
1491 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1492 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1493 pr_err(RETBLEED_UNTRAIN_MSG);
1494
1495 mitigate_smt = true;
1496 break;
1497
1498 case RETBLEED_MITIGATION_IBPB:
1499 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1500 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1501 mitigate_smt = true;
1502
1503 /*
1504 * IBPB on entry already obviates the need for
1505 * software-based untraining so clear those in case some
1506 * other mitigation like SRSO has selected them.
1507 */
1508 setup_clear_cpu_cap(X86_FEATURE_UNRET);
1509 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1510
1511 /*
1512 * There is no need for RSB filling: write_ibpb() ensures
1513 * all predictions, including the RSB, are invalidated,
1514 * regardless of IBPB implementation.
1515 */
1516 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1517
1518 break;
1519
1520 case RETBLEED_MITIGATION_STUFF:
1521 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1522 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1523
1524 set_return_thunk(call_depth_return_thunk);
1525 break;
1526
1527 default:
1528 break;
1529 }
1530
1531 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1532 (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
1533 cpu_smt_disable(false);
1534 }
1535
1536 #undef pr_fmt
1537 #define pr_fmt(fmt) "ITS: " fmt
1538
1539 static const char * const its_strings[] = {
1540 [ITS_MITIGATION_OFF] = "Vulnerable",
1541 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
1542 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
1543 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
1544 };
1545
its_parse_cmdline(char * str)1546 static int __init its_parse_cmdline(char *str)
1547 {
1548 if (!str)
1549 return -EINVAL;
1550
1551 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
1552 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
1553 return 0;
1554 }
1555
1556 if (!strcmp(str, "off")) {
1557 its_mitigation = ITS_MITIGATION_OFF;
1558 } else if (!strcmp(str, "on")) {
1559 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1560 } else if (!strcmp(str, "force")) {
1561 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1562 setup_force_cpu_bug(X86_BUG_ITS);
1563 } else if (!strcmp(str, "vmexit")) {
1564 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
1565 } else if (!strcmp(str, "stuff")) {
1566 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1567 } else {
1568 pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
1569 }
1570
1571 return 0;
1572 }
1573 early_param("indirect_target_selection", its_parse_cmdline);
1574
its_select_mitigation(void)1575 static void __init its_select_mitigation(void)
1576 {
1577 if (!boot_cpu_has_bug(X86_BUG_ITS)) {
1578 its_mitigation = ITS_MITIGATION_OFF;
1579 return;
1580 }
1581
1582 if (its_mitigation == ITS_MITIGATION_AUTO) {
1583 if (should_mitigate_vuln(X86_BUG_ITS))
1584 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1585 else
1586 its_mitigation = ITS_MITIGATION_OFF;
1587 }
1588
1589 if (its_mitigation == ITS_MITIGATION_OFF)
1590 return;
1591
1592 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
1593 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
1594 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
1595 its_mitigation = ITS_MITIGATION_OFF;
1596 return;
1597 }
1598
1599 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
1600 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
1601 its_mitigation = ITS_MITIGATION_OFF;
1602 return;
1603 }
1604
1605 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1606 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1607 pr_err("RSB stuff mitigation not supported, using default\n");
1608 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1609 }
1610
1611 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
1612 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
1613 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1614 }
1615
its_update_mitigation(void)1616 static void __init its_update_mitigation(void)
1617 {
1618 if (!boot_cpu_has_bug(X86_BUG_ITS))
1619 return;
1620
1621 switch (spectre_v2_enabled) {
1622 case SPECTRE_V2_NONE:
1623 if (its_mitigation != ITS_MITIGATION_OFF)
1624 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
1625 its_mitigation = ITS_MITIGATION_OFF;
1626 break;
1627 case SPECTRE_V2_RETPOLINE:
1628 case SPECTRE_V2_EIBRS_RETPOLINE:
1629 /* Retpoline+CDT mitigates ITS */
1630 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
1631 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1632 break;
1633 case SPECTRE_V2_LFENCE:
1634 case SPECTRE_V2_EIBRS_LFENCE:
1635 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
1636 its_mitigation = ITS_MITIGATION_OFF;
1637 break;
1638 default:
1639 break;
1640 }
1641
1642 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1643 !cdt_possible(spectre_v2_enabled))
1644 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1645
1646 pr_info("%s\n", its_strings[its_mitigation]);
1647 }
1648
its_apply_mitigation(void)1649 static void __init its_apply_mitigation(void)
1650 {
1651 switch (its_mitigation) {
1652 case ITS_MITIGATION_OFF:
1653 case ITS_MITIGATION_AUTO:
1654 case ITS_MITIGATION_VMEXIT_ONLY:
1655 break;
1656 case ITS_MITIGATION_ALIGNED_THUNKS:
1657 if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
1658 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
1659
1660 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1661 set_return_thunk(its_return_thunk);
1662 break;
1663 case ITS_MITIGATION_RETPOLINE_STUFF:
1664 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1665 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1666 set_return_thunk(call_depth_return_thunk);
1667 break;
1668 }
1669 }
1670
1671 #undef pr_fmt
1672 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
1673
1674 enum tsa_mitigations {
1675 TSA_MITIGATION_NONE,
1676 TSA_MITIGATION_AUTO,
1677 TSA_MITIGATION_UCODE_NEEDED,
1678 TSA_MITIGATION_USER_KERNEL,
1679 TSA_MITIGATION_VM,
1680 TSA_MITIGATION_FULL,
1681 };
1682
1683 static const char * const tsa_strings[] = {
1684 [TSA_MITIGATION_NONE] = "Vulnerable",
1685 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1686 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
1687 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
1688 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
1689 };
1690
1691 static enum tsa_mitigations tsa_mitigation __ro_after_init =
1692 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
1693
tsa_parse_cmdline(char * str)1694 static int __init tsa_parse_cmdline(char *str)
1695 {
1696 if (!str)
1697 return -EINVAL;
1698
1699 if (!strcmp(str, "off"))
1700 tsa_mitigation = TSA_MITIGATION_NONE;
1701 else if (!strcmp(str, "on"))
1702 tsa_mitigation = TSA_MITIGATION_FULL;
1703 else if (!strcmp(str, "user"))
1704 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1705 else if (!strcmp(str, "vm"))
1706 tsa_mitigation = TSA_MITIGATION_VM;
1707 else
1708 pr_err("Ignoring unknown tsa=%s option.\n", str);
1709
1710 return 0;
1711 }
1712 early_param("tsa", tsa_parse_cmdline);
1713
tsa_select_mitigation(void)1714 static void __init tsa_select_mitigation(void)
1715 {
1716 if (!boot_cpu_has_bug(X86_BUG_TSA)) {
1717 tsa_mitigation = TSA_MITIGATION_NONE;
1718 return;
1719 }
1720
1721 if (tsa_mitigation == TSA_MITIGATION_AUTO) {
1722 bool vm = false, uk = false;
1723
1724 tsa_mitigation = TSA_MITIGATION_NONE;
1725
1726 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
1727 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) {
1728 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1729 uk = true;
1730 }
1731
1732 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
1733 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
1734 tsa_mitigation = TSA_MITIGATION_VM;
1735 vm = true;
1736 }
1737
1738 if (uk && vm)
1739 tsa_mitigation = TSA_MITIGATION_FULL;
1740 }
1741
1742 if (tsa_mitigation == TSA_MITIGATION_NONE)
1743 return;
1744
1745 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
1746 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
1747
1748 /*
1749 * No need to set verw_clear_cpu_buf_mitigation_selected - it
1750 * doesn't fit all cases here and it is not needed because this
1751 * is the only VERW-based mitigation on AMD.
1752 */
1753 pr_info("%s\n", tsa_strings[tsa_mitigation]);
1754 }
1755
tsa_apply_mitigation(void)1756 static void __init tsa_apply_mitigation(void)
1757 {
1758 switch (tsa_mitigation) {
1759 case TSA_MITIGATION_USER_KERNEL:
1760 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1761 break;
1762 case TSA_MITIGATION_VM:
1763 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1764 break;
1765 case TSA_MITIGATION_FULL:
1766 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1767 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1768 break;
1769 default:
1770 break;
1771 }
1772 }
1773
1774 #undef pr_fmt
1775 #define pr_fmt(fmt) "Spectre V2 : " fmt
1776
1777 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1778 SPECTRE_V2_USER_NONE;
1779 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1780 SPECTRE_V2_USER_NONE;
1781
1782 #ifdef CONFIG_MITIGATION_RETPOLINE
1783 static bool spectre_v2_bad_module;
1784
retpoline_module_ok(bool has_retpoline)1785 bool retpoline_module_ok(bool has_retpoline)
1786 {
1787 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1788 return true;
1789
1790 pr_err("System may be vulnerable to spectre v2\n");
1791 spectre_v2_bad_module = true;
1792 return false;
1793 }
1794
spectre_v2_module_string(void)1795 static inline const char *spectre_v2_module_string(void)
1796 {
1797 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1798 }
1799 #else
spectre_v2_module_string(void)1800 static inline const char *spectre_v2_module_string(void) { return ""; }
1801 #endif
1802
1803 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1804 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1805 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1806 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1807
1808 #ifdef CONFIG_BPF_SYSCALL
unpriv_ebpf_notify(int new_state)1809 void unpriv_ebpf_notify(int new_state)
1810 {
1811 if (new_state)
1812 return;
1813
1814 /* Unprivileged eBPF is enabled */
1815
1816 switch (spectre_v2_enabled) {
1817 case SPECTRE_V2_EIBRS:
1818 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1819 break;
1820 case SPECTRE_V2_EIBRS_LFENCE:
1821 if (sched_smt_active())
1822 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1823 break;
1824 default:
1825 break;
1826 }
1827 }
1828 #endif
1829
1830 /* The kernel command line selection for spectre v2 */
1831 enum spectre_v2_mitigation_cmd {
1832 SPECTRE_V2_CMD_NONE,
1833 SPECTRE_V2_CMD_AUTO,
1834 SPECTRE_V2_CMD_FORCE,
1835 SPECTRE_V2_CMD_RETPOLINE,
1836 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1837 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1838 SPECTRE_V2_CMD_EIBRS,
1839 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1840 SPECTRE_V2_CMD_EIBRS_LFENCE,
1841 SPECTRE_V2_CMD_IBRS,
1842 };
1843
1844 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init =
1845 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
1846
1847 enum spectre_v2_user_mitigation_cmd {
1848 SPECTRE_V2_USER_CMD_NONE,
1849 SPECTRE_V2_USER_CMD_AUTO,
1850 SPECTRE_V2_USER_CMD_FORCE,
1851 SPECTRE_V2_USER_CMD_PRCTL,
1852 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1853 SPECTRE_V2_USER_CMD_SECCOMP,
1854 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1855 };
1856
1857 static enum spectre_v2_user_mitigation_cmd spectre_v2_user_cmd __ro_after_init =
1858 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
1859
1860 static const char * const spectre_v2_user_strings[] = {
1861 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
1862 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
1863 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
1864 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
1865 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
1866 };
1867
spectre_v2_user_parse_cmdline(char * str)1868 static int __init spectre_v2_user_parse_cmdline(char *str)
1869 {
1870 if (!str)
1871 return -EINVAL;
1872
1873 if (!strcmp(str, "auto"))
1874 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_AUTO;
1875 else if (!strcmp(str, "off"))
1876 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_NONE;
1877 else if (!strcmp(str, "on"))
1878 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_FORCE;
1879 else if (!strcmp(str, "prctl"))
1880 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL;
1881 else if (!strcmp(str, "prctl,ibpb"))
1882 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL_IBPB;
1883 else if (!strcmp(str, "seccomp"))
1884 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP;
1885 else if (!strcmp(str, "seccomp,ibpb"))
1886 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP_IBPB;
1887 else
1888 pr_err("Ignoring unknown spectre_v2_user option (%s).", str);
1889
1890 return 0;
1891 }
1892 early_param("spectre_v2_user", spectre_v2_user_parse_cmdline);
1893
spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)1894 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1895 {
1896 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1897 }
1898
spectre_v2_user_select_mitigation(void)1899 static void __init spectre_v2_user_select_mitigation(void)
1900 {
1901 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1902 return;
1903
1904 switch (spectre_v2_user_cmd) {
1905 case SPECTRE_V2_USER_CMD_NONE:
1906 return;
1907 case SPECTRE_V2_USER_CMD_FORCE:
1908 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1909 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1910 break;
1911 case SPECTRE_V2_USER_CMD_AUTO:
1912 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER))
1913 break;
1914 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1915 if (smt_mitigations == SMT_MITIGATIONS_OFF)
1916 break;
1917 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1918 break;
1919 case SPECTRE_V2_USER_CMD_PRCTL:
1920 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1921 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1922 break;
1923 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1924 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1925 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1926 break;
1927 case SPECTRE_V2_USER_CMD_SECCOMP:
1928 if (IS_ENABLED(CONFIG_SECCOMP))
1929 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
1930 else
1931 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1932 spectre_v2_user_stibp = spectre_v2_user_ibpb;
1933 break;
1934 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1935 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1936 if (IS_ENABLED(CONFIG_SECCOMP))
1937 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
1938 else
1939 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1940 break;
1941 }
1942
1943 /*
1944 * At this point, an STIBP mode other than "off" has been set.
1945 * If STIBP support is not being forced, check if STIBP always-on
1946 * is preferred.
1947 */
1948 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1949 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
1950 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1951 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1952
1953 if (!boot_cpu_has(X86_FEATURE_IBPB))
1954 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1955
1956 if (!boot_cpu_has(X86_FEATURE_STIBP))
1957 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1958 }
1959
spectre_v2_user_update_mitigation(void)1960 static void __init spectre_v2_user_update_mitigation(void)
1961 {
1962 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1963 return;
1964
1965 /* The spectre_v2 cmd line can override spectre_v2_user options */
1966 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
1967 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1968 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1969 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
1970 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1971 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1972 }
1973
1974 /*
1975 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1976 * is not required.
1977 *
1978 * Intel's Enhanced IBRS also protects against cross-thread branch target
1979 * injection in user-mode as the IBRS bit remains always set which
1980 * implicitly enables cross-thread protections. However, in legacy IBRS
1981 * mode, the IBRS bit is set only on kernel entry and cleared on return
1982 * to userspace. AMD Automatic IBRS also does not protect userspace.
1983 * These modes therefore disable the implicit cross-thread protection,
1984 * so allow for STIBP to be selected in those cases.
1985 */
1986 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1987 !cpu_smt_possible() ||
1988 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1989 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
1990 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1991 return;
1992 }
1993
1994 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
1995 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1996 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
1997 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
1998 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
1999 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
2000 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
2001 }
2002 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
2003 }
2004
spectre_v2_user_apply_mitigation(void)2005 static void __init spectre_v2_user_apply_mitigation(void)
2006 {
2007 /* Initialize Indirect Branch Prediction Barrier */
2008 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
2009 static_branch_enable(&switch_vcpu_ibpb);
2010
2011 switch (spectre_v2_user_ibpb) {
2012 case SPECTRE_V2_USER_STRICT:
2013 static_branch_enable(&switch_mm_always_ibpb);
2014 break;
2015 case SPECTRE_V2_USER_PRCTL:
2016 case SPECTRE_V2_USER_SECCOMP:
2017 static_branch_enable(&switch_mm_cond_ibpb);
2018 break;
2019 default:
2020 break;
2021 }
2022
2023 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
2024 static_key_enabled(&switch_mm_always_ibpb) ?
2025 "always-on" : "conditional");
2026 }
2027 }
2028
2029 static const char * const spectre_v2_strings[] = {
2030 [SPECTRE_V2_NONE] = "Vulnerable",
2031 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
2032 [SPECTRE_V2_LFENCE] = "Vulnerable: LFENCE",
2033 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
2034 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
2035 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
2036 [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
2037 };
2038
2039 static bool nospectre_v2 __ro_after_init;
2040
nospectre_v2_parse_cmdline(char * str)2041 static int __init nospectre_v2_parse_cmdline(char *str)
2042 {
2043 nospectre_v2 = true;
2044 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
2045 return 0;
2046 }
2047 early_param("nospectre_v2", nospectre_v2_parse_cmdline);
2048
spectre_v2_parse_cmdline(char * str)2049 static int __init spectre_v2_parse_cmdline(char *str)
2050 {
2051 if (!str)
2052 return -EINVAL;
2053
2054 if (nospectre_v2)
2055 return 0;
2056
2057 if (!strcmp(str, "off")) {
2058 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
2059 } else if (!strcmp(str, "on")) {
2060 spectre_v2_cmd = SPECTRE_V2_CMD_FORCE;
2061 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
2062 setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER);
2063 } else if (!strcmp(str, "retpoline")) {
2064 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE;
2065 } else if (!strcmp(str, "retpoline,amd") ||
2066 !strcmp(str, "retpoline,lfence")) {
2067 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_LFENCE;
2068 } else if (!strcmp(str, "retpoline,generic")) {
2069 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_GENERIC;
2070 } else if (!strcmp(str, "eibrs")) {
2071 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS;
2072 } else if (!strcmp(str, "eibrs,lfence")) {
2073 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_LFENCE;
2074 } else if (!strcmp(str, "eibrs,retpoline")) {
2075 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_RETPOLINE;
2076 } else if (!strcmp(str, "auto")) {
2077 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2078 } else if (!strcmp(str, "ibrs")) {
2079 spectre_v2_cmd = SPECTRE_V2_CMD_IBRS;
2080 } else {
2081 pr_err("Ignoring unknown spectre_v2 option (%s).", str);
2082 }
2083
2084 return 0;
2085 }
2086 early_param("spectre_v2", spectre_v2_parse_cmdline);
2087
spectre_v2_select_retpoline(void)2088 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
2089 {
2090 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2091 pr_err("Kernel not compiled with retpoline; no mitigation available!");
2092 return SPECTRE_V2_NONE;
2093 }
2094
2095 return SPECTRE_V2_RETPOLINE;
2096 }
2097
2098 static bool __ro_after_init rrsba_disabled;
2099
2100 /* Disable in-kernel use of non-RSB RET predictors */
spec_ctrl_disable_kernel_rrsba(void)2101 static void __init spec_ctrl_disable_kernel_rrsba(void)
2102 {
2103 if (rrsba_disabled)
2104 return;
2105
2106 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
2107 rrsba_disabled = true;
2108 return;
2109 }
2110
2111 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
2112 return;
2113
2114 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
2115 update_spec_ctrl(x86_spec_ctrl_base);
2116 rrsba_disabled = true;
2117 }
2118
spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)2119 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
2120 {
2121 /*
2122 * WARNING! There are many subtleties to consider when changing *any*
2123 * code related to RSB-related mitigations. Before doing so, carefully
2124 * read the following document, and update if necessary:
2125 *
2126 * Documentation/admin-guide/hw-vuln/rsb.rst
2127 *
2128 * In an overly simplified nutshell:
2129 *
2130 * - User->user RSB attacks are conditionally mitigated during
2131 * context switches by cond_mitigation -> write_ibpb().
2132 *
2133 * - User->kernel and guest->host attacks are mitigated by eIBRS or
2134 * RSB filling.
2135 *
2136 * Though, depending on config, note that other alternative
2137 * mitigations may end up getting used instead, e.g., IBPB on
2138 * entry/vmexit, call depth tracking, or return thunks.
2139 */
2140
2141 switch (mode) {
2142 case SPECTRE_V2_NONE:
2143 break;
2144
2145 case SPECTRE_V2_EIBRS:
2146 case SPECTRE_V2_EIBRS_LFENCE:
2147 case SPECTRE_V2_EIBRS_RETPOLINE:
2148 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2149 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
2150 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
2151 }
2152 break;
2153
2154 case SPECTRE_V2_RETPOLINE:
2155 case SPECTRE_V2_LFENCE:
2156 case SPECTRE_V2_IBRS:
2157 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
2158 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
2159 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
2160 break;
2161
2162 default:
2163 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
2164 dump_stack();
2165 break;
2166 }
2167 }
2168
2169 /*
2170 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
2171 * branch history in userspace. Not needed if BHI_NO is set.
2172 */
spec_ctrl_bhi_dis(void)2173 static bool __init spec_ctrl_bhi_dis(void)
2174 {
2175 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
2176 return false;
2177
2178 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
2179 update_spec_ctrl(x86_spec_ctrl_base);
2180 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
2181
2182 return true;
2183 }
2184
2185 enum bhi_mitigations {
2186 BHI_MITIGATION_OFF,
2187 BHI_MITIGATION_AUTO,
2188 BHI_MITIGATION_ON,
2189 BHI_MITIGATION_VMEXIT_ONLY,
2190 };
2191
2192 static enum bhi_mitigations bhi_mitigation __ro_after_init =
2193 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
2194
spectre_bhi_parse_cmdline(char * str)2195 static int __init spectre_bhi_parse_cmdline(char *str)
2196 {
2197 if (!str)
2198 return -EINVAL;
2199
2200 if (!strcmp(str, "off"))
2201 bhi_mitigation = BHI_MITIGATION_OFF;
2202 else if (!strcmp(str, "on"))
2203 bhi_mitigation = BHI_MITIGATION_ON;
2204 else if (!strcmp(str, "vmexit"))
2205 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2206 else
2207 pr_err("Ignoring unknown spectre_bhi option (%s)", str);
2208
2209 return 0;
2210 }
2211 early_param("spectre_bhi", spectre_bhi_parse_cmdline);
2212
bhi_select_mitigation(void)2213 static void __init bhi_select_mitigation(void)
2214 {
2215 if (!boot_cpu_has(X86_BUG_BHI))
2216 bhi_mitigation = BHI_MITIGATION_OFF;
2217
2218 if (bhi_mitigation != BHI_MITIGATION_AUTO)
2219 return;
2220
2221 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) {
2222 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
2223 bhi_mitigation = BHI_MITIGATION_ON;
2224 else
2225 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2226 } else {
2227 bhi_mitigation = BHI_MITIGATION_OFF;
2228 }
2229 }
2230
bhi_update_mitigation(void)2231 static void __init bhi_update_mitigation(void)
2232 {
2233 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
2234 bhi_mitigation = BHI_MITIGATION_OFF;
2235 }
2236
bhi_apply_mitigation(void)2237 static void __init bhi_apply_mitigation(void)
2238 {
2239 if (bhi_mitigation == BHI_MITIGATION_OFF)
2240 return;
2241
2242 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
2243 if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2244 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
2245 spec_ctrl_disable_kernel_rrsba();
2246 if (rrsba_disabled)
2247 return;
2248 }
2249
2250 if (!IS_ENABLED(CONFIG_X86_64))
2251 return;
2252
2253 /* Mitigate in hardware if supported */
2254 if (spec_ctrl_bhi_dis())
2255 return;
2256
2257 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
2258 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
2259 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2260 return;
2261 }
2262
2263 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
2264 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
2265 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2266 }
2267
spectre_v2_select_mitigation(void)2268 static void __init spectre_v2_select_mitigation(void)
2269 {
2270 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE ||
2271 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2272 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
2273 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2274 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2275 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2276 pr_err("RETPOLINE selected but not compiled in. Switching to AUTO select\n");
2277 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2278 }
2279
2280 if ((spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS ||
2281 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2282 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2283 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2284 pr_err("EIBRS selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n");
2285 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2286 }
2287
2288 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2289 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
2290 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
2291 pr_err("LFENCE selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n");
2292 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2293 }
2294
2295 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
2296 pr_err("IBRS selected but not compiled in. Switching to AUTO select\n");
2297 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2298 }
2299
2300 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
2301 pr_err("IBRS selected but not Intel CPU. Switching to AUTO select\n");
2302 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2303 }
2304
2305 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
2306 pr_err("IBRS selected but CPU doesn't have IBRS. Switching to AUTO select\n");
2307 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2308 }
2309
2310 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
2311 pr_err("IBRS selected but running as XenPV guest. Switching to AUTO select\n");
2312 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2313 }
2314
2315 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) {
2316 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
2317 return;
2318 }
2319
2320 switch (spectre_v2_cmd) {
2321 case SPECTRE_V2_CMD_NONE:
2322 return;
2323
2324 case SPECTRE_V2_CMD_AUTO:
2325 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2))
2326 break;
2327 fallthrough;
2328 case SPECTRE_V2_CMD_FORCE:
2329 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2330 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2331 break;
2332 }
2333
2334 spectre_v2_enabled = spectre_v2_select_retpoline();
2335 break;
2336
2337 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
2338 pr_err(SPECTRE_V2_LFENCE_MSG);
2339 spectre_v2_enabled = SPECTRE_V2_LFENCE;
2340 break;
2341
2342 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
2343 spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
2344 break;
2345
2346 case SPECTRE_V2_CMD_RETPOLINE:
2347 spectre_v2_enabled = spectre_v2_select_retpoline();
2348 break;
2349
2350 case SPECTRE_V2_CMD_IBRS:
2351 spectre_v2_enabled = SPECTRE_V2_IBRS;
2352 break;
2353
2354 case SPECTRE_V2_CMD_EIBRS:
2355 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2356 break;
2357
2358 case SPECTRE_V2_CMD_EIBRS_LFENCE:
2359 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
2360 break;
2361
2362 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
2363 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
2364 break;
2365 }
2366 }
2367
spectre_v2_update_mitigation(void)2368 static void __init spectre_v2_update_mitigation(void)
2369 {
2370 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
2371 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
2372 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
2373 boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2374 retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
2375 retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
2376 boot_cpu_has(X86_FEATURE_IBRS) &&
2377 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
2378 spectre_v2_enabled = SPECTRE_V2_IBRS;
2379 }
2380 }
2381
2382 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2383 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
2384 }
2385
spectre_v2_apply_mitigation(void)2386 static void __init spectre_v2_apply_mitigation(void)
2387 {
2388 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2389 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
2390
2391 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2392 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
2393 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
2394 } else {
2395 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
2396 update_spec_ctrl(x86_spec_ctrl_base);
2397 }
2398 }
2399
2400 switch (spectre_v2_enabled) {
2401 case SPECTRE_V2_NONE:
2402 return;
2403
2404 case SPECTRE_V2_EIBRS:
2405 break;
2406
2407 case SPECTRE_V2_IBRS:
2408 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
2409 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
2410 pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
2411 break;
2412
2413 case SPECTRE_V2_LFENCE:
2414 case SPECTRE_V2_EIBRS_LFENCE:
2415 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
2416 fallthrough;
2417
2418 case SPECTRE_V2_RETPOLINE:
2419 case SPECTRE_V2_EIBRS_RETPOLINE:
2420 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
2421 break;
2422 }
2423
2424 /*
2425 * Disable alternate RSB predictions in kernel when indirect CALLs and
2426 * JMPs gets protection against BHI and Intramode-BTI, but RET
2427 * prediction from a non-RSB predictor is still a risk.
2428 */
2429 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
2430 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
2431 spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
2432 spec_ctrl_disable_kernel_rrsba();
2433
2434 spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
2435
2436 /*
2437 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
2438 * and Enhanced IBRS protect firmware too, so enable IBRS around
2439 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
2440 * otherwise enabled.
2441 *
2442 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
2443 * boot_cpu_has(), because the user might select retpoline on the kernel
2444 * command line and if the CPU supports Enhanced IBRS, kernel might
2445 * un-intentionally not enable IBRS around firmware calls.
2446 */
2447 if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2448 boot_cpu_has(X86_FEATURE_IBPB) &&
2449 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2450 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
2451
2452 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
2453 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
2454 pr_info("Enabling Speculation Barrier for firmware calls\n");
2455 }
2456
2457 } else if (boot_cpu_has(X86_FEATURE_IBRS) &&
2458 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2459 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2460 pr_info("Enabling Restricted Speculation for firmware calls\n");
2461 }
2462 }
2463
update_stibp_msr(void * __unused)2464 static void update_stibp_msr(void * __unused)
2465 {
2466 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
2467 update_spec_ctrl(val);
2468 }
2469
2470 /* Update x86_spec_ctrl_base in case SMT state changed. */
update_stibp_strict(void)2471 static void update_stibp_strict(void)
2472 {
2473 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
2474
2475 if (sched_smt_active())
2476 mask |= SPEC_CTRL_STIBP;
2477
2478 if (mask == x86_spec_ctrl_base)
2479 return;
2480
2481 pr_info("Update user space SMT mitigation: STIBP %s\n",
2482 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
2483 x86_spec_ctrl_base = mask;
2484 on_each_cpu(update_stibp_msr, NULL, 1);
2485 }
2486
2487 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
update_indir_branch_cond(void)2488 static void update_indir_branch_cond(void)
2489 {
2490 if (sched_smt_active())
2491 static_branch_enable(&switch_to_cond_stibp);
2492 else
2493 static_branch_disable(&switch_to_cond_stibp);
2494 }
2495
2496 #undef pr_fmt
2497 #define pr_fmt(fmt) fmt
2498
2499 /* Update the static key controlling the MDS CPU buffer clear in idle */
update_mds_branch_idle(void)2500 static void update_mds_branch_idle(void)
2501 {
2502 /*
2503 * Enable the idle clearing if SMT is active on CPUs which are
2504 * affected only by MSBDS and not any other MDS variant.
2505 *
2506 * The other variants cannot be mitigated when SMT is enabled, so
2507 * clearing the buffers on idle just to prevent the Store Buffer
2508 * repartitioning leak would be a window dressing exercise.
2509 */
2510 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
2511 return;
2512
2513 if (sched_smt_active()) {
2514 static_branch_enable(&cpu_buf_idle_clear);
2515 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
2516 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2517 static_branch_disable(&cpu_buf_idle_clear);
2518 }
2519 }
2520
2521 #undef pr_fmt
2522 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
2523
2524 static enum ssb_mitigation ssb_mode __ro_after_init =
2525 IS_ENABLED(CONFIG_MITIGATION_SSB) ? SPEC_STORE_BYPASS_AUTO : SPEC_STORE_BYPASS_NONE;
2526
2527 static const char * const ssb_strings[] = {
2528 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
2529 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
2530 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
2531 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2532 };
2533
2534 static bool nossb __ro_after_init;
2535
nossb_parse_cmdline(char * str)2536 static int __init nossb_parse_cmdline(char *str)
2537 {
2538 nossb = true;
2539 ssb_mode = SPEC_STORE_BYPASS_NONE;
2540 return 0;
2541 }
2542 early_param("nospec_store_bypass_disable", nossb_parse_cmdline);
2543
ssb_parse_cmdline(char * str)2544 static int __init ssb_parse_cmdline(char *str)
2545 {
2546 if (!str)
2547 return -EINVAL;
2548
2549 if (nossb)
2550 return 0;
2551
2552 if (!strcmp(str, "auto"))
2553 ssb_mode = SPEC_STORE_BYPASS_AUTO;
2554 else if (!strcmp(str, "on"))
2555 ssb_mode = SPEC_STORE_BYPASS_DISABLE;
2556 else if (!strcmp(str, "off"))
2557 ssb_mode = SPEC_STORE_BYPASS_NONE;
2558 else if (!strcmp(str, "prctl"))
2559 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2560 else if (!strcmp(str, "seccomp"))
2561 ssb_mode = IS_ENABLED(CONFIG_SECCOMP) ?
2562 SPEC_STORE_BYPASS_SECCOMP : SPEC_STORE_BYPASS_PRCTL;
2563 else
2564 pr_err("Ignoring unknown spec_store_bypass_disable option (%s).\n",
2565 str);
2566
2567 return 0;
2568 }
2569 early_param("spec_store_bypass_disable", ssb_parse_cmdline);
2570
ssb_select_mitigation(void)2571 static void __init ssb_select_mitigation(void)
2572 {
2573 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) {
2574 ssb_mode = SPEC_STORE_BYPASS_NONE;
2575 return;
2576 }
2577
2578 if (ssb_mode == SPEC_STORE_BYPASS_AUTO) {
2579 if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
2580 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2581 else
2582 ssb_mode = SPEC_STORE_BYPASS_NONE;
2583 }
2584
2585 if (!boot_cpu_has(X86_FEATURE_SSBD))
2586 ssb_mode = SPEC_STORE_BYPASS_NONE;
2587
2588 pr_info("%s\n", ssb_strings[ssb_mode]);
2589 }
2590
ssb_apply_mitigation(void)2591 static void __init ssb_apply_mitigation(void)
2592 {
2593 /*
2594 * We have three CPU feature flags that are in play here:
2595 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2596 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2597 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2598 */
2599 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
2600 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2601 /*
2602 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2603 * use a completely different MSR and bit dependent on family.
2604 */
2605 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2606 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2607 x86_amd_ssb_disable();
2608 } else {
2609 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2610 update_spec_ctrl(x86_spec_ctrl_base);
2611 }
2612 }
2613 }
2614
2615 #undef pr_fmt
2616 #define pr_fmt(fmt) "Speculation prctl: " fmt
2617
task_update_spec_tif(struct task_struct * tsk)2618 static void task_update_spec_tif(struct task_struct *tsk)
2619 {
2620 /* Force the update of the real TIF bits */
2621 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2622
2623 /*
2624 * Immediately update the speculation control MSRs for the current
2625 * task, but for a non-current task delay setting the CPU
2626 * mitigation until it is scheduled next.
2627 *
2628 * This can only happen for SECCOMP mitigation. For PRCTL it's
2629 * always the current task.
2630 */
2631 if (tsk == current)
2632 speculation_ctrl_update_current();
2633 }
2634
l1d_flush_prctl_set(struct task_struct * task,unsigned long ctrl)2635 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2636 {
2637
2638 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2639 return -EPERM;
2640
2641 switch (ctrl) {
2642 case PR_SPEC_ENABLE:
2643 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2644 return 0;
2645 case PR_SPEC_DISABLE:
2646 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2647 return 0;
2648 default:
2649 return -ERANGE;
2650 }
2651 }
2652
ssb_prctl_set(struct task_struct * task,unsigned long ctrl)2653 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2654 {
2655 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2656 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2657 return -ENXIO;
2658
2659 switch (ctrl) {
2660 case PR_SPEC_ENABLE:
2661 /* If speculation is force disabled, enable is not allowed */
2662 if (task_spec_ssb_force_disable(task))
2663 return -EPERM;
2664 task_clear_spec_ssb_disable(task);
2665 task_clear_spec_ssb_noexec(task);
2666 task_update_spec_tif(task);
2667 break;
2668 case PR_SPEC_DISABLE:
2669 task_set_spec_ssb_disable(task);
2670 task_clear_spec_ssb_noexec(task);
2671 task_update_spec_tif(task);
2672 break;
2673 case PR_SPEC_FORCE_DISABLE:
2674 task_set_spec_ssb_disable(task);
2675 task_set_spec_ssb_force_disable(task);
2676 task_clear_spec_ssb_noexec(task);
2677 task_update_spec_tif(task);
2678 break;
2679 case PR_SPEC_DISABLE_NOEXEC:
2680 if (task_spec_ssb_force_disable(task))
2681 return -EPERM;
2682 task_set_spec_ssb_disable(task);
2683 task_set_spec_ssb_noexec(task);
2684 task_update_spec_tif(task);
2685 break;
2686 default:
2687 return -ERANGE;
2688 }
2689 return 0;
2690 }
2691
is_spec_ib_user_controlled(void)2692 static bool is_spec_ib_user_controlled(void)
2693 {
2694 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2695 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2696 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2697 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2698 }
2699
ib_prctl_set(struct task_struct * task,unsigned long ctrl)2700 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2701 {
2702 switch (ctrl) {
2703 case PR_SPEC_ENABLE:
2704 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2705 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2706 return 0;
2707
2708 /*
2709 * With strict mode for both IBPB and STIBP, the instruction
2710 * code paths avoid checking this task flag and instead,
2711 * unconditionally run the instruction. However, STIBP and IBPB
2712 * are independent and either can be set to conditionally
2713 * enabled regardless of the mode of the other.
2714 *
2715 * If either is set to conditional, allow the task flag to be
2716 * updated, unless it was force-disabled by a previous prctl
2717 * call. Currently, this is possible on an AMD CPU which has the
2718 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2719 * kernel is booted with 'spectre_v2_user=seccomp', then
2720 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2721 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2722 */
2723 if (!is_spec_ib_user_controlled() ||
2724 task_spec_ib_force_disable(task))
2725 return -EPERM;
2726
2727 task_clear_spec_ib_disable(task);
2728 task_update_spec_tif(task);
2729 break;
2730 case PR_SPEC_DISABLE:
2731 case PR_SPEC_FORCE_DISABLE:
2732 /*
2733 * Indirect branch speculation is always allowed when
2734 * mitigation is force disabled.
2735 */
2736 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2737 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2738 return -EPERM;
2739
2740 if (!is_spec_ib_user_controlled())
2741 return 0;
2742
2743 task_set_spec_ib_disable(task);
2744 if (ctrl == PR_SPEC_FORCE_DISABLE)
2745 task_set_spec_ib_force_disable(task);
2746 task_update_spec_tif(task);
2747 if (task == current)
2748 indirect_branch_prediction_barrier();
2749 break;
2750 default:
2751 return -ERANGE;
2752 }
2753 return 0;
2754 }
2755
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)2756 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2757 unsigned long ctrl)
2758 {
2759 switch (which) {
2760 case PR_SPEC_STORE_BYPASS:
2761 return ssb_prctl_set(task, ctrl);
2762 case PR_SPEC_INDIRECT_BRANCH:
2763 return ib_prctl_set(task, ctrl);
2764 case PR_SPEC_L1D_FLUSH:
2765 return l1d_flush_prctl_set(task, ctrl);
2766 default:
2767 return -ENODEV;
2768 }
2769 }
2770
2771 #ifdef CONFIG_SECCOMP
arch_seccomp_spec_mitigate(struct task_struct * task)2772 void arch_seccomp_spec_mitigate(struct task_struct *task)
2773 {
2774 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2775 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2776 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2777 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2778 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2779 }
2780 #endif
2781
l1d_flush_prctl_get(struct task_struct * task)2782 static int l1d_flush_prctl_get(struct task_struct *task)
2783 {
2784 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2785 return PR_SPEC_FORCE_DISABLE;
2786
2787 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2788 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2789 else
2790 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2791 }
2792
ssb_prctl_get(struct task_struct * task)2793 static int ssb_prctl_get(struct task_struct *task)
2794 {
2795 switch (ssb_mode) {
2796 case SPEC_STORE_BYPASS_NONE:
2797 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2798 return PR_SPEC_ENABLE;
2799 return PR_SPEC_NOT_AFFECTED;
2800 case SPEC_STORE_BYPASS_DISABLE:
2801 return PR_SPEC_DISABLE;
2802 case SPEC_STORE_BYPASS_SECCOMP:
2803 case SPEC_STORE_BYPASS_PRCTL:
2804 case SPEC_STORE_BYPASS_AUTO:
2805 if (task_spec_ssb_force_disable(task))
2806 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2807 if (task_spec_ssb_noexec(task))
2808 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2809 if (task_spec_ssb_disable(task))
2810 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2811 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2812 }
2813 BUG();
2814 }
2815
ib_prctl_get(struct task_struct * task)2816 static int ib_prctl_get(struct task_struct *task)
2817 {
2818 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2819 return PR_SPEC_NOT_AFFECTED;
2820
2821 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2822 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2823 return PR_SPEC_ENABLE;
2824 else if (is_spec_ib_user_controlled()) {
2825 if (task_spec_ib_force_disable(task))
2826 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2827 if (task_spec_ib_disable(task))
2828 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2829 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2830 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2831 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2832 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2833 return PR_SPEC_DISABLE;
2834 else
2835 return PR_SPEC_NOT_AFFECTED;
2836 }
2837
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)2838 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2839 {
2840 switch (which) {
2841 case PR_SPEC_STORE_BYPASS:
2842 return ssb_prctl_get(task);
2843 case PR_SPEC_INDIRECT_BRANCH:
2844 return ib_prctl_get(task);
2845 case PR_SPEC_L1D_FLUSH:
2846 return l1d_flush_prctl_get(task);
2847 default:
2848 return -ENODEV;
2849 }
2850 }
2851
x86_spec_ctrl_setup_ap(void)2852 void x86_spec_ctrl_setup_ap(void)
2853 {
2854 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2855 update_spec_ctrl(x86_spec_ctrl_base);
2856
2857 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2858 x86_amd_ssb_disable();
2859 }
2860
2861 bool itlb_multihit_kvm_mitigation;
2862 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2863
2864 #undef pr_fmt
2865 #define pr_fmt(fmt) "L1TF: " fmt
2866
2867 /* Default mitigation for L1TF-affected CPUs */
2868 enum l1tf_mitigations l1tf_mitigation __ro_after_init =
2869 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
2870 #if IS_ENABLED(CONFIG_KVM_INTEL)
2871 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2872 #endif
2873 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2874 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2875
2876 /*
2877 * These CPUs all support 44bits physical address space internally in the
2878 * cache but CPUID can report a smaller number of physical address bits.
2879 *
2880 * The L1TF mitigation uses the top most address bit for the inversion of
2881 * non present PTEs. When the installed memory reaches into the top most
2882 * address bit due to memory holes, which has been observed on machines
2883 * which report 36bits physical address bits and have 32G RAM installed,
2884 * then the mitigation range check in l1tf_select_mitigation() triggers.
2885 * This is a false positive because the mitigation is still possible due to
2886 * the fact that the cache uses 44bit internally. Use the cache bits
2887 * instead of the reported physical bits and adjust them on the affected
2888 * machines to 44bit if the reported bits are less than 44.
2889 */
override_cache_bits(struct cpuinfo_x86 * c)2890 static void override_cache_bits(struct cpuinfo_x86 *c)
2891 {
2892 if (c->x86 != 6)
2893 return;
2894
2895 switch (c->x86_vfm) {
2896 case INTEL_NEHALEM:
2897 case INTEL_WESTMERE:
2898 case INTEL_SANDYBRIDGE:
2899 case INTEL_IVYBRIDGE:
2900 case INTEL_HASWELL:
2901 case INTEL_HASWELL_L:
2902 case INTEL_HASWELL_G:
2903 case INTEL_BROADWELL:
2904 case INTEL_BROADWELL_G:
2905 case INTEL_SKYLAKE_L:
2906 case INTEL_SKYLAKE:
2907 case INTEL_KABYLAKE_L:
2908 case INTEL_KABYLAKE:
2909 if (c->x86_cache_bits < 44)
2910 c->x86_cache_bits = 44;
2911 break;
2912 }
2913 }
2914
l1tf_select_mitigation(void)2915 static void __init l1tf_select_mitigation(void)
2916 {
2917 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
2918 l1tf_mitigation = L1TF_MITIGATION_OFF;
2919 return;
2920 }
2921
2922 if (l1tf_mitigation != L1TF_MITIGATION_AUTO)
2923 return;
2924
2925 if (!should_mitigate_vuln(X86_BUG_L1TF)) {
2926 l1tf_mitigation = L1TF_MITIGATION_OFF;
2927 return;
2928 }
2929
2930 if (smt_mitigations == SMT_MITIGATIONS_ON)
2931 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2932 else
2933 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2934 }
2935
l1tf_apply_mitigation(void)2936 static void __init l1tf_apply_mitigation(void)
2937 {
2938 u64 half_pa;
2939
2940 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2941 return;
2942
2943 override_cache_bits(&boot_cpu_data);
2944
2945 switch (l1tf_mitigation) {
2946 case L1TF_MITIGATION_OFF:
2947 case L1TF_MITIGATION_FLUSH_NOWARN:
2948 case L1TF_MITIGATION_FLUSH:
2949 case L1TF_MITIGATION_AUTO:
2950 break;
2951 case L1TF_MITIGATION_FLUSH_NOSMT:
2952 case L1TF_MITIGATION_FULL:
2953 cpu_smt_disable(false);
2954 break;
2955 case L1TF_MITIGATION_FULL_FORCE:
2956 cpu_smt_disable(true);
2957 break;
2958 }
2959
2960 #if CONFIG_PGTABLE_LEVELS == 2
2961 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2962 return;
2963 #endif
2964
2965 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2966 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2967 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2968 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2969 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2970 half_pa);
2971 pr_info("However, doing so will make a part of your RAM unusable.\n");
2972 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2973 return;
2974 }
2975
2976 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2977 }
2978
l1tf_cmdline(char * str)2979 static int __init l1tf_cmdline(char *str)
2980 {
2981 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2982 return 0;
2983
2984 if (!str)
2985 return -EINVAL;
2986
2987 if (!strcmp(str, "off"))
2988 l1tf_mitigation = L1TF_MITIGATION_OFF;
2989 else if (!strcmp(str, "flush,nowarn"))
2990 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2991 else if (!strcmp(str, "flush"))
2992 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2993 else if (!strcmp(str, "flush,nosmt"))
2994 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2995 else if (!strcmp(str, "full"))
2996 l1tf_mitigation = L1TF_MITIGATION_FULL;
2997 else if (!strcmp(str, "full,force"))
2998 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2999
3000 return 0;
3001 }
3002 early_param("l1tf", l1tf_cmdline);
3003
3004 #undef pr_fmt
3005 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
3006
3007 static const char * const srso_strings[] = {
3008 [SRSO_MITIGATION_NONE] = "Vulnerable",
3009 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
3010 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
3011 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
3012 [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled",
3013 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
3014 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
3015 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
3016 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
3017 };
3018
srso_parse_cmdline(char * str)3019 static int __init srso_parse_cmdline(char *str)
3020 {
3021 if (!str)
3022 return -EINVAL;
3023
3024 if (!strcmp(str, "off"))
3025 srso_mitigation = SRSO_MITIGATION_NONE;
3026 else if (!strcmp(str, "microcode"))
3027 srso_mitigation = SRSO_MITIGATION_MICROCODE;
3028 else if (!strcmp(str, "safe-ret"))
3029 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3030 else if (!strcmp(str, "ibpb"))
3031 srso_mitigation = SRSO_MITIGATION_IBPB;
3032 else if (!strcmp(str, "ibpb-vmexit"))
3033 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3034 else
3035 pr_err("Ignoring unknown SRSO option (%s).", str);
3036
3037 return 0;
3038 }
3039 early_param("spec_rstack_overflow", srso_parse_cmdline);
3040
3041 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
3042
srso_select_mitigation(void)3043 static void __init srso_select_mitigation(void)
3044 {
3045 if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
3046 srso_mitigation = SRSO_MITIGATION_NONE;
3047 return;
3048 }
3049
3050 if (srso_mitigation == SRSO_MITIGATION_AUTO) {
3051 /*
3052 * Use safe-RET if user->kernel or guest->host protection is
3053 * required. Otherwise the 'microcode' mitigation is sufficient
3054 * to protect the user->user and guest->guest vectors.
3055 */
3056 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
3057 (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&
3058 !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {
3059 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3060 } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
3061 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
3062 srso_mitigation = SRSO_MITIGATION_MICROCODE;
3063 } else {
3064 srso_mitigation = SRSO_MITIGATION_NONE;
3065 return;
3066 }
3067 }
3068
3069 /* Zen1/2 with SMT off aren't vulnerable to SRSO. */
3070 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
3071 srso_mitigation = SRSO_MITIGATION_NOSMT;
3072 return;
3073 }
3074
3075 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) {
3076 pr_warn("IBPB-extending microcode not applied!\n");
3077 pr_warn(SRSO_NOTICE);
3078
3079 /*
3080 * Safe-RET provides partial mitigation without microcode, but
3081 * other mitigations require microcode to provide any
3082 * mitigations.
3083 */
3084 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET)
3085 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
3086 else
3087 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
3088 }
3089
3090 switch (srso_mitigation) {
3091 case SRSO_MITIGATION_SAFE_RET:
3092 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3093 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
3094 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3095 goto ibpb_on_vmexit;
3096 }
3097
3098 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
3099 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
3100 srso_mitigation = SRSO_MITIGATION_NONE;
3101 }
3102 break;
3103 ibpb_on_vmexit:
3104 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3105 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
3106 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
3107 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
3108 break;
3109 }
3110 fallthrough;
3111 case SRSO_MITIGATION_IBPB:
3112 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
3113 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
3114 srso_mitigation = SRSO_MITIGATION_NONE;
3115 }
3116 break;
3117 default:
3118 break;
3119 }
3120 }
3121
srso_update_mitigation(void)3122 static void __init srso_update_mitigation(void)
3123 {
3124 if (!boot_cpu_has_bug(X86_BUG_SRSO))
3125 return;
3126
3127 /* If retbleed is using IBPB, that works for SRSO as well */
3128 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
3129 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
3130 srso_mitigation = SRSO_MITIGATION_IBPB;
3131
3132 pr_info("%s\n", srso_strings[srso_mitigation]);
3133 }
3134
srso_apply_mitigation(void)3135 static void __init srso_apply_mitigation(void)
3136 {
3137 /*
3138 * Clear the feature flag if this mitigation is not selected as that
3139 * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
3140 */
3141 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
3142 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
3143
3144 if (srso_mitigation == SRSO_MITIGATION_NONE) {
3145 if (boot_cpu_has(X86_FEATURE_SBPB))
3146 x86_pred_cmd = PRED_CMD_SBPB;
3147 return;
3148 }
3149
3150 switch (srso_mitigation) {
3151 case SRSO_MITIGATION_SAFE_RET:
3152 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3153 /*
3154 * Enable the return thunk for generated code
3155 * like ftrace, static_call, etc.
3156 */
3157 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
3158 setup_force_cpu_cap(X86_FEATURE_UNRET);
3159
3160 if (boot_cpu_data.x86 == 0x19) {
3161 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
3162 set_return_thunk(srso_alias_return_thunk);
3163 } else {
3164 setup_force_cpu_cap(X86_FEATURE_SRSO);
3165 set_return_thunk(srso_return_thunk);
3166 }
3167 break;
3168 case SRSO_MITIGATION_IBPB:
3169 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
3170 /*
3171 * IBPB on entry already obviates the need for
3172 * software-based untraining so clear those in case some
3173 * other mitigation like Retbleed has selected them.
3174 */
3175 setup_clear_cpu_cap(X86_FEATURE_UNRET);
3176 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
3177 fallthrough;
3178 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3179 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
3180 /*
3181 * There is no need for RSB filling: entry_ibpb() ensures
3182 * all predictions, including the RSB, are invalidated,
3183 * regardless of IBPB implementation.
3184 */
3185 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
3186 break;
3187 default:
3188 break;
3189 }
3190 }
3191
3192 #undef pr_fmt
3193 #define pr_fmt(fmt) "VMSCAPE: " fmt
3194
3195 enum vmscape_mitigations {
3196 VMSCAPE_MITIGATION_NONE,
3197 VMSCAPE_MITIGATION_AUTO,
3198 VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
3199 VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
3200 };
3201
3202 static const char * const vmscape_strings[] = {
3203 [VMSCAPE_MITIGATION_NONE] = "Vulnerable",
3204 /* [VMSCAPE_MITIGATION_AUTO] */
3205 [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace",
3206 [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT",
3207 };
3208
3209 static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
3210 IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
3211
vmscape_parse_cmdline(char * str)3212 static int __init vmscape_parse_cmdline(char *str)
3213 {
3214 if (!str)
3215 return -EINVAL;
3216
3217 if (!strcmp(str, "off")) {
3218 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3219 } else if (!strcmp(str, "ibpb")) {
3220 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3221 } else if (!strcmp(str, "force")) {
3222 setup_force_cpu_bug(X86_BUG_VMSCAPE);
3223 vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
3224 } else {
3225 pr_err("Ignoring unknown vmscape=%s option.\n", str);
3226 }
3227
3228 return 0;
3229 }
3230 early_param("vmscape", vmscape_parse_cmdline);
3231
vmscape_select_mitigation(void)3232 static void __init vmscape_select_mitigation(void)
3233 {
3234 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
3235 !boot_cpu_has(X86_FEATURE_IBPB)) {
3236 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3237 return;
3238 }
3239
3240 if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) {
3241 if (should_mitigate_vuln(X86_BUG_VMSCAPE))
3242 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3243 else
3244 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3245 }
3246 }
3247
vmscape_update_mitigation(void)3248 static void __init vmscape_update_mitigation(void)
3249 {
3250 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE))
3251 return;
3252
3253 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
3254 srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
3255 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
3256
3257 pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
3258 }
3259
vmscape_apply_mitigation(void)3260 static void __init vmscape_apply_mitigation(void)
3261 {
3262 if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
3263 setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
3264 }
3265
3266 #undef pr_fmt
3267 #define pr_fmt(fmt) fmt
3268
3269 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
3270 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
3271 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
3272 #define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
3273
cpu_bugs_smt_update(void)3274 void cpu_bugs_smt_update(void)
3275 {
3276 mutex_lock(&spec_ctrl_mutex);
3277
3278 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3279 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3280 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
3281
3282 switch (spectre_v2_user_stibp) {
3283 case SPECTRE_V2_USER_NONE:
3284 break;
3285 case SPECTRE_V2_USER_STRICT:
3286 case SPECTRE_V2_USER_STRICT_PREFERRED:
3287 update_stibp_strict();
3288 break;
3289 case SPECTRE_V2_USER_PRCTL:
3290 case SPECTRE_V2_USER_SECCOMP:
3291 update_indir_branch_cond();
3292 break;
3293 }
3294
3295 switch (mds_mitigation) {
3296 case MDS_MITIGATION_FULL:
3297 case MDS_MITIGATION_AUTO:
3298 case MDS_MITIGATION_VMWERV:
3299 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
3300 pr_warn_once(MDS_MSG_SMT);
3301 update_mds_branch_idle();
3302 break;
3303 case MDS_MITIGATION_OFF:
3304 break;
3305 }
3306
3307 switch (taa_mitigation) {
3308 case TAA_MITIGATION_VERW:
3309 case TAA_MITIGATION_AUTO:
3310 case TAA_MITIGATION_UCODE_NEEDED:
3311 if (sched_smt_active())
3312 pr_warn_once(TAA_MSG_SMT);
3313 break;
3314 case TAA_MITIGATION_TSX_DISABLED:
3315 case TAA_MITIGATION_OFF:
3316 break;
3317 }
3318
3319 switch (mmio_mitigation) {
3320 case MMIO_MITIGATION_VERW:
3321 case MMIO_MITIGATION_AUTO:
3322 case MMIO_MITIGATION_UCODE_NEEDED:
3323 if (sched_smt_active())
3324 pr_warn_once(MMIO_MSG_SMT);
3325 break;
3326 case MMIO_MITIGATION_OFF:
3327 break;
3328 }
3329
3330 switch (tsa_mitigation) {
3331 case TSA_MITIGATION_USER_KERNEL:
3332 case TSA_MITIGATION_VM:
3333 case TSA_MITIGATION_AUTO:
3334 case TSA_MITIGATION_FULL:
3335 /*
3336 * TSA-SQ can potentially lead to info leakage between
3337 * SMT threads.
3338 */
3339 if (sched_smt_active())
3340 static_branch_enable(&cpu_buf_idle_clear);
3341 else
3342 static_branch_disable(&cpu_buf_idle_clear);
3343 break;
3344 case TSA_MITIGATION_NONE:
3345 case TSA_MITIGATION_UCODE_NEEDED:
3346 break;
3347 }
3348
3349 switch (vmscape_mitigation) {
3350 case VMSCAPE_MITIGATION_NONE:
3351 case VMSCAPE_MITIGATION_AUTO:
3352 break;
3353 case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
3354 case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
3355 /*
3356 * Hypervisors can be attacked across-threads, warn for SMT when
3357 * STIBP is not already enabled system-wide.
3358 *
3359 * Intel eIBRS (!AUTOIBRS) implies STIBP on.
3360 */
3361 if (!sched_smt_active() ||
3362 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3363 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
3364 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3365 !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
3366 break;
3367 pr_warn_once(VMSCAPE_MSG_SMT);
3368 break;
3369 }
3370
3371 mutex_unlock(&spec_ctrl_mutex);
3372 }
3373
3374 #ifdef CONFIG_SYSFS
3375
3376 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
3377
3378 #if IS_ENABLED(CONFIG_KVM_INTEL)
3379 static const char * const l1tf_vmx_states[] = {
3380 [VMENTER_L1D_FLUSH_AUTO] = "auto",
3381 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
3382 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
3383 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
3384 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
3385 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
3386 };
3387
l1tf_show_state(char * buf)3388 static ssize_t l1tf_show_state(char *buf)
3389 {
3390 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
3391 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3392
3393 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
3394 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
3395 sched_smt_active())) {
3396 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
3397 l1tf_vmx_states[l1tf_vmx_mitigation]);
3398 }
3399
3400 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
3401 l1tf_vmx_states[l1tf_vmx_mitigation],
3402 sched_smt_active() ? "vulnerable" : "disabled");
3403 }
3404
itlb_multihit_show_state(char * buf)3405 static ssize_t itlb_multihit_show_state(char *buf)
3406 {
3407 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
3408 !boot_cpu_has(X86_FEATURE_VMX))
3409 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
3410 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
3411 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
3412 else if (itlb_multihit_kvm_mitigation)
3413 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
3414 else
3415 return sysfs_emit(buf, "KVM: Vulnerable\n");
3416 }
3417 #else
l1tf_show_state(char * buf)3418 static ssize_t l1tf_show_state(char *buf)
3419 {
3420 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3421 }
3422
itlb_multihit_show_state(char * buf)3423 static ssize_t itlb_multihit_show_state(char *buf)
3424 {
3425 return sysfs_emit(buf, "Processor vulnerable\n");
3426 }
3427 #endif
3428
mds_show_state(char * buf)3429 static ssize_t mds_show_state(char *buf)
3430 {
3431 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3432 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3433 mds_strings[mds_mitigation]);
3434 }
3435
3436 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
3437 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3438 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
3439 sched_smt_active() ? "mitigated" : "disabled"));
3440 }
3441
3442 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3443 sched_smt_active() ? "vulnerable" : "disabled");
3444 }
3445
tsx_async_abort_show_state(char * buf)3446 static ssize_t tsx_async_abort_show_state(char *buf)
3447 {
3448 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
3449 (taa_mitigation == TAA_MITIGATION_OFF))
3450 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
3451
3452 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3453 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3454 taa_strings[taa_mitigation]);
3455 }
3456
3457 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
3458 sched_smt_active() ? "vulnerable" : "disabled");
3459 }
3460
mmio_stale_data_show_state(char * buf)3461 static ssize_t mmio_stale_data_show_state(char *buf)
3462 {
3463 if (mmio_mitigation == MMIO_MITIGATION_OFF)
3464 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
3465
3466 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3467 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3468 mmio_strings[mmio_mitigation]);
3469 }
3470
3471 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
3472 sched_smt_active() ? "vulnerable" : "disabled");
3473 }
3474
rfds_show_state(char * buf)3475 static ssize_t rfds_show_state(char *buf)
3476 {
3477 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
3478 }
3479
old_microcode_show_state(char * buf)3480 static ssize_t old_microcode_show_state(char *buf)
3481 {
3482 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
3483 return sysfs_emit(buf, "Unknown: running under hypervisor");
3484
3485 return sysfs_emit(buf, "Vulnerable\n");
3486 }
3487
its_show_state(char * buf)3488 static ssize_t its_show_state(char *buf)
3489 {
3490 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
3491 }
3492
stibp_state(void)3493 static char *stibp_state(void)
3494 {
3495 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3496 !boot_cpu_has(X86_FEATURE_AUTOIBRS))
3497 return "";
3498
3499 switch (spectre_v2_user_stibp) {
3500 case SPECTRE_V2_USER_NONE:
3501 return "; STIBP: disabled";
3502 case SPECTRE_V2_USER_STRICT:
3503 return "; STIBP: forced";
3504 case SPECTRE_V2_USER_STRICT_PREFERRED:
3505 return "; STIBP: always-on";
3506 case SPECTRE_V2_USER_PRCTL:
3507 case SPECTRE_V2_USER_SECCOMP:
3508 if (static_key_enabled(&switch_to_cond_stibp))
3509 return "; STIBP: conditional";
3510 }
3511 return "";
3512 }
3513
ibpb_state(void)3514 static char *ibpb_state(void)
3515 {
3516 if (boot_cpu_has(X86_FEATURE_IBPB)) {
3517 if (static_key_enabled(&switch_mm_always_ibpb))
3518 return "; IBPB: always-on";
3519 if (static_key_enabled(&switch_mm_cond_ibpb))
3520 return "; IBPB: conditional";
3521 return "; IBPB: disabled";
3522 }
3523 return "";
3524 }
3525
pbrsb_eibrs_state(void)3526 static char *pbrsb_eibrs_state(void)
3527 {
3528 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
3529 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
3530 boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
3531 return "; PBRSB-eIBRS: SW sequence";
3532 else
3533 return "; PBRSB-eIBRS: Vulnerable";
3534 } else {
3535 return "; PBRSB-eIBRS: Not affected";
3536 }
3537 }
3538
spectre_bhi_state(void)3539 static const char *spectre_bhi_state(void)
3540 {
3541 if (!boot_cpu_has_bug(X86_BUG_BHI))
3542 return "; BHI: Not affected";
3543 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
3544 return "; BHI: BHI_DIS_S";
3545 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
3546 return "; BHI: SW loop, KVM: SW loop";
3547 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
3548 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
3549 rrsba_disabled)
3550 return "; BHI: Retpoline";
3551 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
3552 return "; BHI: Vulnerable, KVM: SW loop";
3553
3554 return "; BHI: Vulnerable";
3555 }
3556
spectre_v2_show_state(char * buf)3557 static ssize_t spectre_v2_show_state(char *buf)
3558 {
3559 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
3560 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
3561
3562 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3563 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3564 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
3565
3566 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
3567 spectre_v2_strings[spectre_v2_enabled],
3568 ibpb_state(),
3569 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
3570 stibp_state(),
3571 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
3572 pbrsb_eibrs_state(),
3573 spectre_bhi_state(),
3574 /* this should always be at the end */
3575 spectre_v2_module_string());
3576 }
3577
srbds_show_state(char * buf)3578 static ssize_t srbds_show_state(char *buf)
3579 {
3580 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
3581 }
3582
retbleed_show_state(char * buf)3583 static ssize_t retbleed_show_state(char *buf)
3584 {
3585 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
3586 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
3587 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
3588 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
3589 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
3590
3591 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
3592 !sched_smt_active() ? "disabled" :
3593 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3594 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
3595 "enabled with STIBP protection" : "vulnerable");
3596 }
3597
3598 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
3599 }
3600
srso_show_state(char * buf)3601 static ssize_t srso_show_state(char *buf)
3602 {
3603 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
3604 }
3605
gds_show_state(char * buf)3606 static ssize_t gds_show_state(char *buf)
3607 {
3608 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
3609 }
3610
tsa_show_state(char * buf)3611 static ssize_t tsa_show_state(char *buf)
3612 {
3613 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
3614 }
3615
vmscape_show_state(char * buf)3616 static ssize_t vmscape_show_state(char *buf)
3617 {
3618 return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
3619 }
3620
cpu_show_common(struct device * dev,struct device_attribute * attr,char * buf,unsigned int bug)3621 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
3622 char *buf, unsigned int bug)
3623 {
3624 if (!boot_cpu_has_bug(bug))
3625 return sysfs_emit(buf, "Not affected\n");
3626
3627 switch (bug) {
3628 case X86_BUG_CPU_MELTDOWN:
3629 if (boot_cpu_has(X86_FEATURE_PTI))
3630 return sysfs_emit(buf, "Mitigation: PTI\n");
3631
3632 if (hypervisor_is_type(X86_HYPER_XEN_PV))
3633 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
3634
3635 break;
3636
3637 case X86_BUG_SPECTRE_V1:
3638 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
3639
3640 case X86_BUG_SPECTRE_V2:
3641 return spectre_v2_show_state(buf);
3642
3643 case X86_BUG_SPEC_STORE_BYPASS:
3644 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
3645
3646 case X86_BUG_L1TF:
3647 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
3648 return l1tf_show_state(buf);
3649 break;
3650
3651 case X86_BUG_MDS:
3652 return mds_show_state(buf);
3653
3654 case X86_BUG_TAA:
3655 return tsx_async_abort_show_state(buf);
3656
3657 case X86_BUG_ITLB_MULTIHIT:
3658 return itlb_multihit_show_state(buf);
3659
3660 case X86_BUG_SRBDS:
3661 return srbds_show_state(buf);
3662
3663 case X86_BUG_MMIO_STALE_DATA:
3664 return mmio_stale_data_show_state(buf);
3665
3666 case X86_BUG_RETBLEED:
3667 return retbleed_show_state(buf);
3668
3669 case X86_BUG_SRSO:
3670 return srso_show_state(buf);
3671
3672 case X86_BUG_GDS:
3673 return gds_show_state(buf);
3674
3675 case X86_BUG_RFDS:
3676 return rfds_show_state(buf);
3677
3678 case X86_BUG_OLD_MICROCODE:
3679 return old_microcode_show_state(buf);
3680
3681 case X86_BUG_ITS:
3682 return its_show_state(buf);
3683
3684 case X86_BUG_TSA:
3685 return tsa_show_state(buf);
3686
3687 case X86_BUG_VMSCAPE:
3688 return vmscape_show_state(buf);
3689
3690 default:
3691 break;
3692 }
3693
3694 return sysfs_emit(buf, "Vulnerable\n");
3695 }
3696
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)3697 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
3698 {
3699 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
3700 }
3701
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)3702 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
3703 {
3704 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
3705 }
3706
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)3707 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
3708 {
3709 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
3710 }
3711
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)3712 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
3713 {
3714 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
3715 }
3716
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)3717 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
3718 {
3719 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
3720 }
3721
cpu_show_mds(struct device * dev,struct device_attribute * attr,char * buf)3722 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
3723 {
3724 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
3725 }
3726
cpu_show_tsx_async_abort(struct device * dev,struct device_attribute * attr,char * buf)3727 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
3728 {
3729 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
3730 }
3731
cpu_show_itlb_multihit(struct device * dev,struct device_attribute * attr,char * buf)3732 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
3733 {
3734 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
3735 }
3736
cpu_show_srbds(struct device * dev,struct device_attribute * attr,char * buf)3737 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
3738 {
3739 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
3740 }
3741
cpu_show_mmio_stale_data(struct device * dev,struct device_attribute * attr,char * buf)3742 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
3743 {
3744 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3745 }
3746
cpu_show_retbleed(struct device * dev,struct device_attribute * attr,char * buf)3747 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3748 {
3749 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3750 }
3751
cpu_show_spec_rstack_overflow(struct device * dev,struct device_attribute * attr,char * buf)3752 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3753 {
3754 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3755 }
3756
cpu_show_gds(struct device * dev,struct device_attribute * attr,char * buf)3757 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3758 {
3759 return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3760 }
3761
cpu_show_reg_file_data_sampling(struct device * dev,struct device_attribute * attr,char * buf)3762 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3763 {
3764 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3765 }
3766
cpu_show_old_microcode(struct device * dev,struct device_attribute * attr,char * buf)3767 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
3768 {
3769 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
3770 }
3771
cpu_show_indirect_target_selection(struct device * dev,struct device_attribute * attr,char * buf)3772 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
3773 {
3774 return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
3775 }
3776
cpu_show_tsa(struct device * dev,struct device_attribute * attr,char * buf)3777 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
3778 {
3779 return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
3780 }
3781
cpu_show_vmscape(struct device * dev,struct device_attribute * attr,char * buf)3782 ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
3783 {
3784 return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
3785 }
3786 #endif
3787
__warn_thunk(void)3788 void __warn_thunk(void)
3789 {
3790 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3791 }
3792