1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34
35 #include "cpu.h"
36
37 /*
38 * Speculation Vulnerability Handling
39 *
40 * Each vulnerability is handled with the following functions:
41 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
42 * take into account all relevant command line
43 * options.
44 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
45 * selected a mitigation, in case the selection
46 * may want to change based on other choices
47 * made. This function is optional.
48 * <vuln>_apply_mitigation() -- Enable the selected mitigation.
49 *
50 * The compile-time mitigation in all cases should be AUTO. An explicit
51 * command-line option can override AUTO. If no such option is
52 * provided, <vuln>_select_mitigation() will override AUTO to the best
53 * mitigation option.
54 */
55
56 static void __init spectre_v1_select_mitigation(void);
57 static void __init spectre_v1_apply_mitigation(void);
58 static void __init spectre_v2_select_mitigation(void);
59 static void __init spectre_v2_update_mitigation(void);
60 static void __init spectre_v2_apply_mitigation(void);
61 static void __init retbleed_select_mitigation(void);
62 static void __init retbleed_update_mitigation(void);
63 static void __init retbleed_apply_mitigation(void);
64 static void __init spectre_v2_user_select_mitigation(void);
65 static void __init spectre_v2_user_update_mitigation(void);
66 static void __init spectre_v2_user_apply_mitigation(void);
67 static void __init ssb_select_mitigation(void);
68 static void __init ssb_apply_mitigation(void);
69 static void __init l1tf_select_mitigation(void);
70 static void __init l1tf_apply_mitigation(void);
71 static void __init mds_select_mitigation(void);
72 static void __init mds_update_mitigation(void);
73 static void __init mds_apply_mitigation(void);
74 static void __init taa_select_mitigation(void);
75 static void __init taa_update_mitigation(void);
76 static void __init taa_apply_mitigation(void);
77 static void __init mmio_select_mitigation(void);
78 static void __init mmio_update_mitigation(void);
79 static void __init mmio_apply_mitigation(void);
80 static void __init rfds_select_mitigation(void);
81 static void __init rfds_update_mitigation(void);
82 static void __init rfds_apply_mitigation(void);
83 static void __init srbds_select_mitigation(void);
84 static void __init srbds_apply_mitigation(void);
85 static void __init l1d_flush_select_mitigation(void);
86 static void __init srso_select_mitigation(void);
87 static void __init srso_update_mitigation(void);
88 static void __init srso_apply_mitigation(void);
89 static void __init gds_select_mitigation(void);
90 static void __init gds_apply_mitigation(void);
91 static void __init bhi_select_mitigation(void);
92 static void __init bhi_update_mitigation(void);
93 static void __init bhi_apply_mitigation(void);
94 static void __init its_select_mitigation(void);
95 static void __init its_update_mitigation(void);
96 static void __init its_apply_mitigation(void);
97 static void __init tsa_select_mitigation(void);
98 static void __init tsa_apply_mitigation(void);
99 static void __init vmscape_select_mitigation(void);
100 static void __init vmscape_update_mitigation(void);
101 static void __init vmscape_apply_mitigation(void);
102
103 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
104 u64 x86_spec_ctrl_base;
105 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
106
107 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
108 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
109 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
110
111 /*
112 * Set when the CPU has run a potentially malicious guest. An IBPB will
113 * be needed to before running userspace. That IBPB will flush the branch
114 * predictor content.
115 */
116 DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
117 EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
118
119 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
120
121 static u64 __ro_after_init x86_arch_cap_msr;
122
123 static DEFINE_MUTEX(spec_ctrl_mutex);
124
125 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
126
set_return_thunk(void * thunk)127 static void __init set_return_thunk(void *thunk)
128 {
129 x86_return_thunk = thunk;
130
131 pr_info("active return thunk: %ps\n", thunk);
132 }
133
134 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
update_spec_ctrl(u64 val)135 static void update_spec_ctrl(u64 val)
136 {
137 this_cpu_write(x86_spec_ctrl_current, val);
138 wrmsrq(MSR_IA32_SPEC_CTRL, val);
139 }
140
141 /*
142 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
143 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
144 */
update_spec_ctrl_cond(u64 val)145 void update_spec_ctrl_cond(u64 val)
146 {
147 if (this_cpu_read(x86_spec_ctrl_current) == val)
148 return;
149
150 this_cpu_write(x86_spec_ctrl_current, val);
151
152 /*
153 * When KERNEL_IBRS this MSR is written on return-to-user, unless
154 * forced the update can be delayed until that time.
155 */
156 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
157 wrmsrq(MSR_IA32_SPEC_CTRL, val);
158 }
159
spec_ctrl_current(void)160 noinstr u64 spec_ctrl_current(void)
161 {
162 return this_cpu_read(x86_spec_ctrl_current);
163 }
164 EXPORT_SYMBOL_GPL(spec_ctrl_current);
165
166 /*
167 * AMD specific MSR info for Speculative Store Bypass control.
168 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
169 */
170 u64 __ro_after_init x86_amd_ls_cfg_base;
171 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
172
173 /* Control conditional STIBP in switch_to() */
174 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
175 /* Control conditional IBPB in switch_mm() */
176 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
177 /* Control unconditional IBPB in switch_mm() */
178 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
179
180 /* Control IBPB on vCPU load */
181 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
182 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
183
184 /* Control CPU buffer clear before idling (halt, mwait) */
185 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
186 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
187
188 /*
189 * Controls whether l1d flush based mitigations are enabled,
190 * based on hw features and admin setting via boot parameter
191 * defaults to false
192 */
193 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
194
195 /*
196 * Controls CPU Fill buffer clear before VMenter. This is a subset of
197 * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
198 * mitigation is required.
199 */
200 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
201 EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
202
203 #undef pr_fmt
204 #define pr_fmt(fmt) "mitigations: " fmt
205
cpu_print_attack_vectors(void)206 static void __init cpu_print_attack_vectors(void)
207 {
208 pr_info("Enabled attack vectors: ");
209
210 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
211 pr_cont("user_kernel, ");
212
213 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER))
214 pr_cont("user_user, ");
215
216 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST))
217 pr_cont("guest_host, ");
218
219 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST))
220 pr_cont("guest_guest, ");
221
222 pr_cont("SMT mitigations: ");
223
224 switch (smt_mitigations) {
225 case SMT_MITIGATIONS_OFF:
226 pr_cont("off\n");
227 break;
228 case SMT_MITIGATIONS_AUTO:
229 pr_cont("auto\n");
230 break;
231 case SMT_MITIGATIONS_ON:
232 pr_cont("on\n");
233 }
234 }
235
cpu_select_mitigations(void)236 void __init cpu_select_mitigations(void)
237 {
238 /*
239 * Read the SPEC_CTRL MSR to account for reserved bits which may
240 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
241 * init code as it is not enumerated and depends on the family.
242 */
243 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
244 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
245
246 /*
247 * Previously running kernel (kexec), may have some controls
248 * turned ON. Clear them and let the mitigations setup below
249 * rediscover them based on configuration.
250 */
251 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
252 }
253
254 x86_arch_cap_msr = x86_read_arch_cap_msr();
255
256 cpu_print_attack_vectors();
257
258 /* Select the proper CPU mitigations before patching alternatives: */
259 spectre_v1_select_mitigation();
260 spectre_v2_select_mitigation();
261 retbleed_select_mitigation();
262 spectre_v2_user_select_mitigation();
263 ssb_select_mitigation();
264 l1tf_select_mitigation();
265 mds_select_mitigation();
266 taa_select_mitigation();
267 mmio_select_mitigation();
268 rfds_select_mitigation();
269 srbds_select_mitigation();
270 l1d_flush_select_mitigation();
271 srso_select_mitigation();
272 gds_select_mitigation();
273 its_select_mitigation();
274 bhi_select_mitigation();
275 tsa_select_mitigation();
276 vmscape_select_mitigation();
277
278 /*
279 * After mitigations are selected, some may need to update their
280 * choices.
281 */
282 spectre_v2_update_mitigation();
283 /*
284 * retbleed_update_mitigation() relies on the state set by
285 * spectre_v2_update_mitigation(); specifically it wants to know about
286 * spectre_v2=ibrs.
287 */
288 retbleed_update_mitigation();
289 /*
290 * its_update_mitigation() depends on spectre_v2_update_mitigation()
291 * and retbleed_update_mitigation().
292 */
293 its_update_mitigation();
294
295 /*
296 * spectre_v2_user_update_mitigation() depends on
297 * retbleed_update_mitigation(), specifically the STIBP
298 * selection is forced for UNRET or IBPB.
299 */
300 spectre_v2_user_update_mitigation();
301 mds_update_mitigation();
302 taa_update_mitigation();
303 mmio_update_mitigation();
304 rfds_update_mitigation();
305 bhi_update_mitigation();
306 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
307 srso_update_mitigation();
308 vmscape_update_mitigation();
309
310 spectre_v1_apply_mitigation();
311 spectre_v2_apply_mitigation();
312 retbleed_apply_mitigation();
313 spectre_v2_user_apply_mitigation();
314 ssb_apply_mitigation();
315 l1tf_apply_mitigation();
316 mds_apply_mitigation();
317 taa_apply_mitigation();
318 mmio_apply_mitigation();
319 rfds_apply_mitigation();
320 srbds_apply_mitigation();
321 srso_apply_mitigation();
322 gds_apply_mitigation();
323 its_apply_mitigation();
324 bhi_apply_mitigation();
325 tsa_apply_mitigation();
326 vmscape_apply_mitigation();
327 }
328
329 /*
330 * NOTE: This function is *only* called for SVM, since Intel uses
331 * MSR_IA32_SPEC_CTRL for SSBD.
332 */
333 void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl,bool setguest)334 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
335 {
336 u64 guestval, hostval;
337 struct thread_info *ti = current_thread_info();
338
339 /*
340 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
341 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
342 */
343 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
344 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
345 return;
346
347 /*
348 * If the host has SSBD mitigation enabled, force it in the host's
349 * virtual MSR value. If its not permanently enabled, evaluate
350 * current's TIF_SSBD thread flag.
351 */
352 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
353 hostval = SPEC_CTRL_SSBD;
354 else
355 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
356
357 /* Sanitize the guest value */
358 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
359
360 if (hostval != guestval) {
361 unsigned long tif;
362
363 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
364 ssbd_spec_ctrl_to_tif(hostval);
365
366 speculation_ctrl_update(tif);
367 }
368 }
369 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
370
x86_amd_ssb_disable(void)371 static void x86_amd_ssb_disable(void)
372 {
373 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
374
375 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
376 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
377 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
378 wrmsrq(MSR_AMD64_LS_CFG, msrval);
379 }
380
381 #undef pr_fmt
382 #define pr_fmt(fmt) "MDS: " fmt
383
384 /*
385 * Returns true if vulnerability should be mitigated based on the
386 * selected attack vector controls.
387 *
388 * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
389 */
should_mitigate_vuln(unsigned int bug)390 static bool __init should_mitigate_vuln(unsigned int bug)
391 {
392 switch (bug) {
393 /*
394 * The only runtime-selected spectre_v1 mitigations in the kernel are
395 * related to SWAPGS protection on kernel entry. Therefore, protection
396 * is only required for the user->kernel attack vector.
397 */
398 case X86_BUG_SPECTRE_V1:
399 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL);
400
401 case X86_BUG_SPECTRE_V2:
402 case X86_BUG_RETBLEED:
403 case X86_BUG_L1TF:
404 case X86_BUG_ITS:
405 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
406 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
407
408 case X86_BUG_SPECTRE_V2_USER:
409 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
410 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
411
412 /*
413 * All the vulnerabilities below allow potentially leaking data
414 * across address spaces. Therefore, mitigation is required for
415 * any of these 4 attack vectors.
416 */
417 case X86_BUG_MDS:
418 case X86_BUG_TAA:
419 case X86_BUG_MMIO_STALE_DATA:
420 case X86_BUG_RFDS:
421 case X86_BUG_SRBDS:
422 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
423 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
424 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
425 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
426
427 case X86_BUG_GDS:
428 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
429 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
430 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
431 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
432 (smt_mitigations != SMT_MITIGATIONS_OFF);
433
434 case X86_BUG_SPEC_STORE_BYPASS:
435 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
436
437 case X86_BUG_VMSCAPE:
438 return cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
439
440 default:
441 WARN(1, "Unknown bug %x\n", bug);
442 return false;
443 }
444 }
445
446 /* Default mitigation for MDS-affected CPUs */
447 static enum mds_mitigations mds_mitigation __ro_after_init =
448 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
449 static bool mds_nosmt __ro_after_init = false;
450
451 static const char * const mds_strings[] = {
452 [MDS_MITIGATION_OFF] = "Vulnerable",
453 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
454 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
455 };
456
457 enum taa_mitigations {
458 TAA_MITIGATION_OFF,
459 TAA_MITIGATION_AUTO,
460 TAA_MITIGATION_UCODE_NEEDED,
461 TAA_MITIGATION_VERW,
462 TAA_MITIGATION_TSX_DISABLED,
463 };
464
465 /* Default mitigation for TAA-affected CPUs */
466 static enum taa_mitigations taa_mitigation __ro_after_init =
467 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
468
469 enum mmio_mitigations {
470 MMIO_MITIGATION_OFF,
471 MMIO_MITIGATION_AUTO,
472 MMIO_MITIGATION_UCODE_NEEDED,
473 MMIO_MITIGATION_VERW,
474 };
475
476 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
477 static enum mmio_mitigations mmio_mitigation __ro_after_init =
478 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
479
480 enum rfds_mitigations {
481 RFDS_MITIGATION_OFF,
482 RFDS_MITIGATION_AUTO,
483 RFDS_MITIGATION_VERW,
484 RFDS_MITIGATION_UCODE_NEEDED,
485 };
486
487 /* Default mitigation for Register File Data Sampling */
488 static enum rfds_mitigations rfds_mitigation __ro_after_init =
489 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
490
491 /*
492 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
493 * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
494 */
495 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
496
mds_select_mitigation(void)497 static void __init mds_select_mitigation(void)
498 {
499 if (!boot_cpu_has_bug(X86_BUG_MDS)) {
500 mds_mitigation = MDS_MITIGATION_OFF;
501 return;
502 }
503
504 if (mds_mitigation == MDS_MITIGATION_AUTO) {
505 if (should_mitigate_vuln(X86_BUG_MDS))
506 mds_mitigation = MDS_MITIGATION_FULL;
507 else
508 mds_mitigation = MDS_MITIGATION_OFF;
509 }
510
511 if (mds_mitigation == MDS_MITIGATION_OFF)
512 return;
513
514 verw_clear_cpu_buf_mitigation_selected = true;
515 }
516
mds_update_mitigation(void)517 static void __init mds_update_mitigation(void)
518 {
519 if (!boot_cpu_has_bug(X86_BUG_MDS))
520 return;
521
522 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
523 if (verw_clear_cpu_buf_mitigation_selected)
524 mds_mitigation = MDS_MITIGATION_FULL;
525
526 if (mds_mitigation == MDS_MITIGATION_FULL) {
527 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
528 mds_mitigation = MDS_MITIGATION_VMWERV;
529 }
530
531 pr_info("%s\n", mds_strings[mds_mitigation]);
532 }
533
mds_apply_mitigation(void)534 static void __init mds_apply_mitigation(void)
535 {
536 if (mds_mitigation == MDS_MITIGATION_FULL ||
537 mds_mitigation == MDS_MITIGATION_VMWERV) {
538 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
539 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
540 (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
541 cpu_smt_disable(false);
542 }
543 }
544
mds_cmdline(char * str)545 static int __init mds_cmdline(char *str)
546 {
547 if (!boot_cpu_has_bug(X86_BUG_MDS))
548 return 0;
549
550 if (!str)
551 return -EINVAL;
552
553 if (!strcmp(str, "off"))
554 mds_mitigation = MDS_MITIGATION_OFF;
555 else if (!strcmp(str, "full"))
556 mds_mitigation = MDS_MITIGATION_FULL;
557 else if (!strcmp(str, "full,nosmt")) {
558 mds_mitigation = MDS_MITIGATION_FULL;
559 mds_nosmt = true;
560 }
561
562 return 0;
563 }
564 early_param("mds", mds_cmdline);
565
566 #undef pr_fmt
567 #define pr_fmt(fmt) "TAA: " fmt
568
569 static bool taa_nosmt __ro_after_init;
570
571 static const char * const taa_strings[] = {
572 [TAA_MITIGATION_OFF] = "Vulnerable",
573 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
574 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
575 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
576 };
577
taa_vulnerable(void)578 static bool __init taa_vulnerable(void)
579 {
580 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
581 }
582
taa_select_mitigation(void)583 static void __init taa_select_mitigation(void)
584 {
585 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
586 taa_mitigation = TAA_MITIGATION_OFF;
587 return;
588 }
589
590 /* TSX previously disabled by tsx=off */
591 if (!boot_cpu_has(X86_FEATURE_RTM)) {
592 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
593 return;
594 }
595
596 /* Microcode will be checked in taa_update_mitigation(). */
597 if (taa_mitigation == TAA_MITIGATION_AUTO) {
598 if (should_mitigate_vuln(X86_BUG_TAA))
599 taa_mitigation = TAA_MITIGATION_VERW;
600 else
601 taa_mitigation = TAA_MITIGATION_OFF;
602 }
603
604 if (taa_mitigation != TAA_MITIGATION_OFF)
605 verw_clear_cpu_buf_mitigation_selected = true;
606 }
607
taa_update_mitigation(void)608 static void __init taa_update_mitigation(void)
609 {
610 if (!taa_vulnerable())
611 return;
612
613 if (verw_clear_cpu_buf_mitigation_selected)
614 taa_mitigation = TAA_MITIGATION_VERW;
615
616 if (taa_mitigation == TAA_MITIGATION_VERW) {
617 /* Check if the requisite ucode is available. */
618 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
619 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
620
621 /*
622 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
623 * A microcode update fixes this behavior to clear CPU buffers. It also
624 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
625 * ARCH_CAP_TSX_CTRL_MSR bit.
626 *
627 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
628 * update is required.
629 */
630 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
631 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
632 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
633 }
634
635 pr_info("%s\n", taa_strings[taa_mitigation]);
636 }
637
taa_apply_mitigation(void)638 static void __init taa_apply_mitigation(void)
639 {
640 if (taa_mitigation == TAA_MITIGATION_VERW ||
641 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
642 /*
643 * TSX is enabled, select alternate mitigation for TAA which is
644 * the same as MDS. Enable MDS static branch to clear CPU buffers.
645 *
646 * For guests that can't determine whether the correct microcode is
647 * present on host, enable the mitigation for UCODE_NEEDED as well.
648 */
649 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
650
651 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
652 cpu_smt_disable(false);
653 }
654 }
655
tsx_async_abort_parse_cmdline(char * str)656 static int __init tsx_async_abort_parse_cmdline(char *str)
657 {
658 if (!boot_cpu_has_bug(X86_BUG_TAA))
659 return 0;
660
661 if (!str)
662 return -EINVAL;
663
664 if (!strcmp(str, "off")) {
665 taa_mitigation = TAA_MITIGATION_OFF;
666 } else if (!strcmp(str, "full")) {
667 taa_mitigation = TAA_MITIGATION_VERW;
668 } else if (!strcmp(str, "full,nosmt")) {
669 taa_mitigation = TAA_MITIGATION_VERW;
670 taa_nosmt = true;
671 }
672
673 return 0;
674 }
675 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
676
677 #undef pr_fmt
678 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
679
680 static bool mmio_nosmt __ro_after_init = false;
681
682 static const char * const mmio_strings[] = {
683 [MMIO_MITIGATION_OFF] = "Vulnerable",
684 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
685 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
686 };
687
mmio_select_mitigation(void)688 static void __init mmio_select_mitigation(void)
689 {
690 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
691 mmio_mitigation = MMIO_MITIGATION_OFF;
692 return;
693 }
694
695 /* Microcode will be checked in mmio_update_mitigation(). */
696 if (mmio_mitigation == MMIO_MITIGATION_AUTO) {
697 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA))
698 mmio_mitigation = MMIO_MITIGATION_VERW;
699 else
700 mmio_mitigation = MMIO_MITIGATION_OFF;
701 }
702
703 if (mmio_mitigation == MMIO_MITIGATION_OFF)
704 return;
705
706 /*
707 * Enable CPU buffer clear mitigation for host and VMM, if also affected
708 * by MDS or TAA.
709 */
710 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
711 verw_clear_cpu_buf_mitigation_selected = true;
712 }
713
mmio_update_mitigation(void)714 static void __init mmio_update_mitigation(void)
715 {
716 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
717 return;
718
719 if (verw_clear_cpu_buf_mitigation_selected)
720 mmio_mitigation = MMIO_MITIGATION_VERW;
721
722 if (mmio_mitigation == MMIO_MITIGATION_VERW) {
723 /*
724 * Check if the system has the right microcode.
725 *
726 * CPU Fill buffer clear mitigation is enumerated by either an explicit
727 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
728 * affected systems.
729 */
730 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
731 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
732 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
733 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
734 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
735 }
736
737 pr_info("%s\n", mmio_strings[mmio_mitigation]);
738 }
739
mmio_apply_mitigation(void)740 static void __init mmio_apply_mitigation(void)
741 {
742 if (mmio_mitigation == MMIO_MITIGATION_OFF)
743 return;
744
745 /*
746 * Only enable the VMM mitigation if the CPU buffer clear mitigation is
747 * not being used.
748 */
749 if (verw_clear_cpu_buf_mitigation_selected) {
750 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
751 static_branch_disable(&cpu_buf_vm_clear);
752 } else {
753 static_branch_enable(&cpu_buf_vm_clear);
754 }
755
756 /*
757 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
758 * be propagated to uncore buffers, clearing the Fill buffers on idle
759 * is required irrespective of SMT state.
760 */
761 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
762 static_branch_enable(&cpu_buf_idle_clear);
763
764 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
765 cpu_smt_disable(false);
766 }
767
mmio_stale_data_parse_cmdline(char * str)768 static int __init mmio_stale_data_parse_cmdline(char *str)
769 {
770 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
771 return 0;
772
773 if (!str)
774 return -EINVAL;
775
776 if (!strcmp(str, "off")) {
777 mmio_mitigation = MMIO_MITIGATION_OFF;
778 } else if (!strcmp(str, "full")) {
779 mmio_mitigation = MMIO_MITIGATION_VERW;
780 } else if (!strcmp(str, "full,nosmt")) {
781 mmio_mitigation = MMIO_MITIGATION_VERW;
782 mmio_nosmt = true;
783 }
784
785 return 0;
786 }
787 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
788
789 #undef pr_fmt
790 #define pr_fmt(fmt) "Register File Data Sampling: " fmt
791
792 static const char * const rfds_strings[] = {
793 [RFDS_MITIGATION_OFF] = "Vulnerable",
794 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
795 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
796 };
797
verw_clears_cpu_reg_file(void)798 static inline bool __init verw_clears_cpu_reg_file(void)
799 {
800 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
801 }
802
rfds_select_mitigation(void)803 static void __init rfds_select_mitigation(void)
804 {
805 if (!boot_cpu_has_bug(X86_BUG_RFDS)) {
806 rfds_mitigation = RFDS_MITIGATION_OFF;
807 return;
808 }
809
810 if (rfds_mitigation == RFDS_MITIGATION_AUTO) {
811 if (should_mitigate_vuln(X86_BUG_RFDS))
812 rfds_mitigation = RFDS_MITIGATION_VERW;
813 else
814 rfds_mitigation = RFDS_MITIGATION_OFF;
815 }
816
817 if (rfds_mitigation == RFDS_MITIGATION_OFF)
818 return;
819
820 if (verw_clears_cpu_reg_file())
821 verw_clear_cpu_buf_mitigation_selected = true;
822 }
823
rfds_update_mitigation(void)824 static void __init rfds_update_mitigation(void)
825 {
826 if (!boot_cpu_has_bug(X86_BUG_RFDS))
827 return;
828
829 if (verw_clear_cpu_buf_mitigation_selected)
830 rfds_mitigation = RFDS_MITIGATION_VERW;
831
832 if (rfds_mitigation == RFDS_MITIGATION_VERW) {
833 if (!verw_clears_cpu_reg_file())
834 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
835 }
836
837 pr_info("%s\n", rfds_strings[rfds_mitigation]);
838 }
839
rfds_apply_mitigation(void)840 static void __init rfds_apply_mitigation(void)
841 {
842 if (rfds_mitigation == RFDS_MITIGATION_VERW)
843 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
844 }
845
rfds_parse_cmdline(char * str)846 static __init int rfds_parse_cmdline(char *str)
847 {
848 if (!str)
849 return -EINVAL;
850
851 if (!boot_cpu_has_bug(X86_BUG_RFDS))
852 return 0;
853
854 if (!strcmp(str, "off"))
855 rfds_mitigation = RFDS_MITIGATION_OFF;
856 else if (!strcmp(str, "on"))
857 rfds_mitigation = RFDS_MITIGATION_VERW;
858
859 return 0;
860 }
861 early_param("reg_file_data_sampling", rfds_parse_cmdline);
862
863 #undef pr_fmt
864 #define pr_fmt(fmt) "SRBDS: " fmt
865
866 enum srbds_mitigations {
867 SRBDS_MITIGATION_OFF,
868 SRBDS_MITIGATION_AUTO,
869 SRBDS_MITIGATION_UCODE_NEEDED,
870 SRBDS_MITIGATION_FULL,
871 SRBDS_MITIGATION_TSX_OFF,
872 SRBDS_MITIGATION_HYPERVISOR,
873 };
874
875 static enum srbds_mitigations srbds_mitigation __ro_after_init =
876 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
877
878 static const char * const srbds_strings[] = {
879 [SRBDS_MITIGATION_OFF] = "Vulnerable",
880 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
881 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
882 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
883 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
884 };
885
886 static bool srbds_off;
887
update_srbds_msr(void)888 void update_srbds_msr(void)
889 {
890 u64 mcu_ctrl;
891
892 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
893 return;
894
895 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
896 return;
897
898 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
899 return;
900
901 /*
902 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
903 * being disabled and it hasn't received the SRBDS MSR microcode.
904 */
905 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
906 return;
907
908 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
909
910 switch (srbds_mitigation) {
911 case SRBDS_MITIGATION_OFF:
912 case SRBDS_MITIGATION_TSX_OFF:
913 mcu_ctrl |= RNGDS_MITG_DIS;
914 break;
915 case SRBDS_MITIGATION_FULL:
916 mcu_ctrl &= ~RNGDS_MITG_DIS;
917 break;
918 default:
919 break;
920 }
921
922 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
923 }
924
srbds_select_mitigation(void)925 static void __init srbds_select_mitigation(void)
926 {
927 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) {
928 srbds_mitigation = SRBDS_MITIGATION_OFF;
929 return;
930 }
931
932 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) {
933 if (should_mitigate_vuln(X86_BUG_SRBDS))
934 srbds_mitigation = SRBDS_MITIGATION_FULL;
935 else {
936 srbds_mitigation = SRBDS_MITIGATION_OFF;
937 return;
938 }
939 }
940
941 /*
942 * Check to see if this is one of the MDS_NO systems supporting TSX that
943 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
944 * by Processor MMIO Stale Data vulnerability.
945 */
946 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
947 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
948 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
949 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
950 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
951 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
952 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
953 else if (srbds_off)
954 srbds_mitigation = SRBDS_MITIGATION_OFF;
955
956 pr_info("%s\n", srbds_strings[srbds_mitigation]);
957 }
958
srbds_apply_mitigation(void)959 static void __init srbds_apply_mitigation(void)
960 {
961 update_srbds_msr();
962 }
963
srbds_parse_cmdline(char * str)964 static int __init srbds_parse_cmdline(char *str)
965 {
966 if (!str)
967 return -EINVAL;
968
969 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
970 return 0;
971
972 srbds_off = !strcmp(str, "off");
973 return 0;
974 }
975 early_param("srbds", srbds_parse_cmdline);
976
977 #undef pr_fmt
978 #define pr_fmt(fmt) "L1D Flush : " fmt
979
980 enum l1d_flush_mitigations {
981 L1D_FLUSH_OFF = 0,
982 L1D_FLUSH_ON,
983 };
984
985 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
986
l1d_flush_select_mitigation(void)987 static void __init l1d_flush_select_mitigation(void)
988 {
989 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
990 return;
991
992 static_branch_enable(&switch_mm_cond_l1d_flush);
993 pr_info("Conditional flush on switch_mm() enabled\n");
994 }
995
l1d_flush_parse_cmdline(char * str)996 static int __init l1d_flush_parse_cmdline(char *str)
997 {
998 if (!strcmp(str, "on"))
999 l1d_flush_mitigation = L1D_FLUSH_ON;
1000
1001 return 0;
1002 }
1003 early_param("l1d_flush", l1d_flush_parse_cmdline);
1004
1005 #undef pr_fmt
1006 #define pr_fmt(fmt) "GDS: " fmt
1007
1008 enum gds_mitigations {
1009 GDS_MITIGATION_OFF,
1010 GDS_MITIGATION_AUTO,
1011 GDS_MITIGATION_UCODE_NEEDED,
1012 GDS_MITIGATION_FORCE,
1013 GDS_MITIGATION_FULL,
1014 GDS_MITIGATION_FULL_LOCKED,
1015 GDS_MITIGATION_HYPERVISOR,
1016 };
1017
1018 static enum gds_mitigations gds_mitigation __ro_after_init =
1019 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
1020
1021 static const char * const gds_strings[] = {
1022 [GDS_MITIGATION_OFF] = "Vulnerable",
1023 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1024 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
1025 [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
1026 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
1027 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
1028 };
1029
gds_ucode_mitigated(void)1030 bool gds_ucode_mitigated(void)
1031 {
1032 return (gds_mitigation == GDS_MITIGATION_FULL ||
1033 gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
1034 }
1035 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
1036
update_gds_msr(void)1037 void update_gds_msr(void)
1038 {
1039 u64 mcu_ctrl_after;
1040 u64 mcu_ctrl;
1041
1042 switch (gds_mitigation) {
1043 case GDS_MITIGATION_OFF:
1044 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1045 mcu_ctrl |= GDS_MITG_DIS;
1046 break;
1047 case GDS_MITIGATION_FULL_LOCKED:
1048 /*
1049 * The LOCKED state comes from the boot CPU. APs might not have
1050 * the same state. Make sure the mitigation is enabled on all
1051 * CPUs.
1052 */
1053 case GDS_MITIGATION_FULL:
1054 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1055 mcu_ctrl &= ~GDS_MITG_DIS;
1056 break;
1057 case GDS_MITIGATION_FORCE:
1058 case GDS_MITIGATION_UCODE_NEEDED:
1059 case GDS_MITIGATION_HYPERVISOR:
1060 case GDS_MITIGATION_AUTO:
1061 return;
1062 }
1063
1064 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1065
1066 /*
1067 * Check to make sure that the WRMSR value was not ignored. Writes to
1068 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
1069 * processor was not.
1070 */
1071 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
1072 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
1073 }
1074
gds_select_mitigation(void)1075 static void __init gds_select_mitigation(void)
1076 {
1077 u64 mcu_ctrl;
1078
1079 if (!boot_cpu_has_bug(X86_BUG_GDS))
1080 return;
1081
1082 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1083 gds_mitigation = GDS_MITIGATION_HYPERVISOR;
1084 return;
1085 }
1086
1087 /* Will verify below that mitigation _can_ be disabled */
1088 if (gds_mitigation == GDS_MITIGATION_AUTO) {
1089 if (should_mitigate_vuln(X86_BUG_GDS))
1090 gds_mitigation = GDS_MITIGATION_FULL;
1091 else
1092 gds_mitigation = GDS_MITIGATION_OFF;
1093 }
1094
1095 /* No microcode */
1096 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
1097 if (gds_mitigation != GDS_MITIGATION_FORCE)
1098 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
1099 return;
1100 }
1101
1102 /* Microcode has mitigation, use it */
1103 if (gds_mitigation == GDS_MITIGATION_FORCE)
1104 gds_mitigation = GDS_MITIGATION_FULL;
1105
1106 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
1107 if (mcu_ctrl & GDS_MITG_LOCKED) {
1108 if (gds_mitigation == GDS_MITIGATION_OFF)
1109 pr_warn("Mitigation locked. Disable failed.\n");
1110
1111 /*
1112 * The mitigation is selected from the boot CPU. All other CPUs
1113 * _should_ have the same state. If the boot CPU isn't locked
1114 * but others are then update_gds_msr() will WARN() of the state
1115 * mismatch. If the boot CPU is locked update_gds_msr() will
1116 * ensure the other CPUs have the mitigation enabled.
1117 */
1118 gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
1119 }
1120 }
1121
gds_apply_mitigation(void)1122 static void __init gds_apply_mitigation(void)
1123 {
1124 if (!boot_cpu_has_bug(X86_BUG_GDS))
1125 return;
1126
1127 /* Microcode is present */
1128 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
1129 update_gds_msr();
1130 else if (gds_mitigation == GDS_MITIGATION_FORCE) {
1131 /*
1132 * This only needs to be done on the boot CPU so do it
1133 * here rather than in update_gds_msr()
1134 */
1135 setup_clear_cpu_cap(X86_FEATURE_AVX);
1136 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
1137 }
1138
1139 pr_info("%s\n", gds_strings[gds_mitigation]);
1140 }
1141
gds_parse_cmdline(char * str)1142 static int __init gds_parse_cmdline(char *str)
1143 {
1144 if (!str)
1145 return -EINVAL;
1146
1147 if (!boot_cpu_has_bug(X86_BUG_GDS))
1148 return 0;
1149
1150 if (!strcmp(str, "off"))
1151 gds_mitigation = GDS_MITIGATION_OFF;
1152 else if (!strcmp(str, "force"))
1153 gds_mitigation = GDS_MITIGATION_FORCE;
1154
1155 return 0;
1156 }
1157 early_param("gather_data_sampling", gds_parse_cmdline);
1158
1159 #undef pr_fmt
1160 #define pr_fmt(fmt) "Spectre V1 : " fmt
1161
1162 enum spectre_v1_mitigation {
1163 SPECTRE_V1_MITIGATION_NONE,
1164 SPECTRE_V1_MITIGATION_AUTO,
1165 };
1166
1167 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
1168 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
1169 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
1170
1171 static const char * const spectre_v1_strings[] = {
1172 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
1173 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
1174 };
1175
1176 /*
1177 * Does SMAP provide full mitigation against speculative kernel access to
1178 * userspace?
1179 */
smap_works_speculatively(void)1180 static bool smap_works_speculatively(void)
1181 {
1182 if (!boot_cpu_has(X86_FEATURE_SMAP))
1183 return false;
1184
1185 /*
1186 * On CPUs which are vulnerable to Meltdown, SMAP does not
1187 * prevent speculative access to user data in the L1 cache.
1188 * Consider SMAP to be non-functional as a mitigation on these
1189 * CPUs.
1190 */
1191 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
1192 return false;
1193
1194 return true;
1195 }
1196
spectre_v1_select_mitigation(void)1197 static void __init spectre_v1_select_mitigation(void)
1198 {
1199 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1200 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1201
1202 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1))
1203 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1204 }
1205
spectre_v1_apply_mitigation(void)1206 static void __init spectre_v1_apply_mitigation(void)
1207 {
1208 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1209 return;
1210
1211 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
1212 /*
1213 * With Spectre v1, a user can speculatively control either
1214 * path of a conditional swapgs with a user-controlled GS
1215 * value. The mitigation is to add lfences to both code paths.
1216 *
1217 * If FSGSBASE is enabled, the user can put a kernel address in
1218 * GS, in which case SMAP provides no protection.
1219 *
1220 * If FSGSBASE is disabled, the user can only put a user space
1221 * address in GS. That makes an attack harder, but still
1222 * possible if there's no SMAP protection.
1223 */
1224 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
1225 !smap_works_speculatively()) {
1226 /*
1227 * Mitigation can be provided from SWAPGS itself or
1228 * PTI as the CR3 write in the Meltdown mitigation
1229 * is serializing.
1230 *
1231 * If neither is there, mitigate with an LFENCE to
1232 * stop speculation through swapgs.
1233 */
1234 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
1235 !boot_cpu_has(X86_FEATURE_PTI))
1236 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
1237
1238 /*
1239 * Enable lfences in the kernel entry (non-swapgs)
1240 * paths, to prevent user entry from speculatively
1241 * skipping swapgs.
1242 */
1243 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
1244 }
1245 }
1246
1247 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1248 }
1249
nospectre_v1_cmdline(char * str)1250 static int __init nospectre_v1_cmdline(char *str)
1251 {
1252 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1253 return 0;
1254 }
1255 early_param("nospectre_v1", nospectre_v1_cmdline);
1256
1257 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
1258
1259 /* Depends on spectre_v2 mitigation selected already */
cdt_possible(enum spectre_v2_mitigation mode)1260 static inline bool cdt_possible(enum spectre_v2_mitigation mode)
1261 {
1262 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) ||
1263 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE))
1264 return false;
1265
1266 if (mode == SPECTRE_V2_RETPOLINE ||
1267 mode == SPECTRE_V2_EIBRS_RETPOLINE)
1268 return true;
1269
1270 return false;
1271 }
1272
1273 #undef pr_fmt
1274 #define pr_fmt(fmt) "RETBleed: " fmt
1275
1276 enum its_mitigation {
1277 ITS_MITIGATION_OFF,
1278 ITS_MITIGATION_AUTO,
1279 ITS_MITIGATION_VMEXIT_ONLY,
1280 ITS_MITIGATION_ALIGNED_THUNKS,
1281 ITS_MITIGATION_RETPOLINE_STUFF,
1282 };
1283
1284 static enum its_mitigation its_mitigation __ro_after_init =
1285 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
1286
1287 enum retbleed_mitigation {
1288 RETBLEED_MITIGATION_NONE,
1289 RETBLEED_MITIGATION_AUTO,
1290 RETBLEED_MITIGATION_UNRET,
1291 RETBLEED_MITIGATION_IBPB,
1292 RETBLEED_MITIGATION_IBRS,
1293 RETBLEED_MITIGATION_EIBRS,
1294 RETBLEED_MITIGATION_STUFF,
1295 };
1296
1297 static const char * const retbleed_strings[] = {
1298 [RETBLEED_MITIGATION_NONE] = "Vulnerable",
1299 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
1300 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
1301 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
1302 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
1303 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
1304 };
1305
1306 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
1307 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
1308
1309 static int __ro_after_init retbleed_nosmt = false;
1310
1311 enum srso_mitigation {
1312 SRSO_MITIGATION_NONE,
1313 SRSO_MITIGATION_AUTO,
1314 SRSO_MITIGATION_UCODE_NEEDED,
1315 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
1316 SRSO_MITIGATION_MICROCODE,
1317 SRSO_MITIGATION_NOSMT,
1318 SRSO_MITIGATION_SAFE_RET,
1319 SRSO_MITIGATION_IBPB,
1320 SRSO_MITIGATION_IBPB_ON_VMEXIT,
1321 SRSO_MITIGATION_BP_SPEC_REDUCE,
1322 };
1323
1324 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
1325
retbleed_parse_cmdline(char * str)1326 static int __init retbleed_parse_cmdline(char *str)
1327 {
1328 if (!str)
1329 return -EINVAL;
1330
1331 while (str) {
1332 char *next = strchr(str, ',');
1333 if (next) {
1334 *next = 0;
1335 next++;
1336 }
1337
1338 if (!strcmp(str, "off")) {
1339 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1340 } else if (!strcmp(str, "auto")) {
1341 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1342 } else if (!strcmp(str, "unret")) {
1343 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1344 } else if (!strcmp(str, "ibpb")) {
1345 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1346 } else if (!strcmp(str, "stuff")) {
1347 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1348 } else if (!strcmp(str, "nosmt")) {
1349 retbleed_nosmt = true;
1350 } else if (!strcmp(str, "force")) {
1351 setup_force_cpu_bug(X86_BUG_RETBLEED);
1352 } else {
1353 pr_err("Ignoring unknown retbleed option (%s).", str);
1354 }
1355
1356 str = next;
1357 }
1358
1359 return 0;
1360 }
1361 early_param("retbleed", retbleed_parse_cmdline);
1362
1363 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1364 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1365
retbleed_select_mitigation(void)1366 static void __init retbleed_select_mitigation(void)
1367 {
1368 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) {
1369 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1370 return;
1371 }
1372
1373 switch (retbleed_mitigation) {
1374 case RETBLEED_MITIGATION_UNRET:
1375 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1376 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1377 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1378 }
1379 break;
1380 case RETBLEED_MITIGATION_IBPB:
1381 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1382 pr_err("WARNING: CPU does not support IBPB.\n");
1383 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1384 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1385 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1386 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1387 }
1388 break;
1389 case RETBLEED_MITIGATION_STUFF:
1390 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1391 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1392 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1393 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1394 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
1395 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1396 }
1397 break;
1398 default:
1399 break;
1400 }
1401
1402 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
1403 return;
1404
1405 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) {
1406 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1407 return;
1408 }
1409
1410 /* Intel mitigation selected in retbleed_update_mitigation() */
1411 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1412 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1413 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1414 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1415 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1416 boot_cpu_has(X86_FEATURE_IBPB))
1417 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1418 else
1419 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1420 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1421 /* Final mitigation depends on spectre-v2 selection */
1422 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1423 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1424 else if (boot_cpu_has(X86_FEATURE_IBRS))
1425 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1426 else
1427 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1428 }
1429 }
1430
retbleed_update_mitigation(void)1431 static void __init retbleed_update_mitigation(void)
1432 {
1433 if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
1434 return;
1435
1436 /* ITS can also enable stuffing */
1437 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF)
1438 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1439
1440 /* If SRSO is using IBPB, that works for retbleed too */
1441 if (srso_mitigation == SRSO_MITIGATION_IBPB)
1442 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1443
1444 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF &&
1445 !cdt_possible(spectre_v2_enabled)) {
1446 pr_err("WARNING: retbleed=stuff depends on retpoline\n");
1447 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1448 }
1449
1450 /*
1451 * Let IBRS trump all on Intel without affecting the effects of the
1452 * retbleed= cmdline option except for call depth based stuffing
1453 */
1454 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1455 switch (spectre_v2_enabled) {
1456 case SPECTRE_V2_IBRS:
1457 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1458 break;
1459 case SPECTRE_V2_EIBRS:
1460 case SPECTRE_V2_EIBRS_RETPOLINE:
1461 case SPECTRE_V2_EIBRS_LFENCE:
1462 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1463 break;
1464 default:
1465 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
1466 pr_err(RETBLEED_INTEL_MSG);
1467 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1468 }
1469 }
1470 }
1471
1472 pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1473 }
1474
retbleed_apply_mitigation(void)1475 static void __init retbleed_apply_mitigation(void)
1476 {
1477 bool mitigate_smt = false;
1478
1479 switch (retbleed_mitigation) {
1480 case RETBLEED_MITIGATION_NONE:
1481 return;
1482
1483 case RETBLEED_MITIGATION_UNRET:
1484 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1485 setup_force_cpu_cap(X86_FEATURE_UNRET);
1486
1487 set_return_thunk(retbleed_return_thunk);
1488
1489 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1490 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1491 pr_err(RETBLEED_UNTRAIN_MSG);
1492
1493 mitigate_smt = true;
1494 break;
1495
1496 case RETBLEED_MITIGATION_IBPB:
1497 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1498 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1499 mitigate_smt = true;
1500
1501 /*
1502 * IBPB on entry already obviates the need for
1503 * software-based untraining so clear those in case some
1504 * other mitigation like SRSO has selected them.
1505 */
1506 setup_clear_cpu_cap(X86_FEATURE_UNRET);
1507 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1508
1509 /*
1510 * There is no need for RSB filling: write_ibpb() ensures
1511 * all predictions, including the RSB, are invalidated,
1512 * regardless of IBPB implementation.
1513 */
1514 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1515
1516 break;
1517
1518 case RETBLEED_MITIGATION_STUFF:
1519 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1520 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1521
1522 set_return_thunk(call_depth_return_thunk);
1523 break;
1524
1525 default:
1526 break;
1527 }
1528
1529 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1530 (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
1531 cpu_smt_disable(false);
1532 }
1533
1534 #undef pr_fmt
1535 #define pr_fmt(fmt) "ITS: " fmt
1536
1537 static const char * const its_strings[] = {
1538 [ITS_MITIGATION_OFF] = "Vulnerable",
1539 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
1540 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
1541 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
1542 };
1543
its_parse_cmdline(char * str)1544 static int __init its_parse_cmdline(char *str)
1545 {
1546 if (!str)
1547 return -EINVAL;
1548
1549 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
1550 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
1551 return 0;
1552 }
1553
1554 if (!strcmp(str, "off")) {
1555 its_mitigation = ITS_MITIGATION_OFF;
1556 } else if (!strcmp(str, "on")) {
1557 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1558 } else if (!strcmp(str, "force")) {
1559 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1560 setup_force_cpu_bug(X86_BUG_ITS);
1561 } else if (!strcmp(str, "vmexit")) {
1562 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
1563 } else if (!strcmp(str, "stuff")) {
1564 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1565 } else {
1566 pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
1567 }
1568
1569 return 0;
1570 }
1571 early_param("indirect_target_selection", its_parse_cmdline);
1572
its_select_mitigation(void)1573 static void __init its_select_mitigation(void)
1574 {
1575 if (!boot_cpu_has_bug(X86_BUG_ITS)) {
1576 its_mitigation = ITS_MITIGATION_OFF;
1577 return;
1578 }
1579
1580 if (its_mitigation == ITS_MITIGATION_AUTO) {
1581 if (should_mitigate_vuln(X86_BUG_ITS))
1582 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1583 else
1584 its_mitigation = ITS_MITIGATION_OFF;
1585 }
1586
1587 if (its_mitigation == ITS_MITIGATION_OFF)
1588 return;
1589
1590 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
1591 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
1592 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
1593 its_mitigation = ITS_MITIGATION_OFF;
1594 return;
1595 }
1596
1597 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
1598 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
1599 its_mitigation = ITS_MITIGATION_OFF;
1600 return;
1601 }
1602
1603 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1604 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1605 pr_err("RSB stuff mitigation not supported, using default\n");
1606 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1607 }
1608
1609 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
1610 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
1611 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1612 }
1613
its_update_mitigation(void)1614 static void __init its_update_mitigation(void)
1615 {
1616 if (!boot_cpu_has_bug(X86_BUG_ITS))
1617 return;
1618
1619 switch (spectre_v2_enabled) {
1620 case SPECTRE_V2_NONE:
1621 if (its_mitigation != ITS_MITIGATION_OFF)
1622 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
1623 its_mitigation = ITS_MITIGATION_OFF;
1624 break;
1625 case SPECTRE_V2_RETPOLINE:
1626 case SPECTRE_V2_EIBRS_RETPOLINE:
1627 /* Retpoline+CDT mitigates ITS */
1628 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
1629 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1630 break;
1631 case SPECTRE_V2_LFENCE:
1632 case SPECTRE_V2_EIBRS_LFENCE:
1633 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
1634 its_mitigation = ITS_MITIGATION_OFF;
1635 break;
1636 default:
1637 break;
1638 }
1639
1640 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1641 !cdt_possible(spectre_v2_enabled))
1642 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1643
1644 pr_info("%s\n", its_strings[its_mitigation]);
1645 }
1646
its_apply_mitigation(void)1647 static void __init its_apply_mitigation(void)
1648 {
1649 switch (its_mitigation) {
1650 case ITS_MITIGATION_OFF:
1651 case ITS_MITIGATION_AUTO:
1652 case ITS_MITIGATION_VMEXIT_ONLY:
1653 break;
1654 case ITS_MITIGATION_ALIGNED_THUNKS:
1655 if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
1656 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
1657
1658 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1659 set_return_thunk(its_return_thunk);
1660 break;
1661 case ITS_MITIGATION_RETPOLINE_STUFF:
1662 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1663 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1664 set_return_thunk(call_depth_return_thunk);
1665 break;
1666 }
1667 }
1668
1669 #undef pr_fmt
1670 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
1671
1672 enum tsa_mitigations {
1673 TSA_MITIGATION_NONE,
1674 TSA_MITIGATION_AUTO,
1675 TSA_MITIGATION_UCODE_NEEDED,
1676 TSA_MITIGATION_USER_KERNEL,
1677 TSA_MITIGATION_VM,
1678 TSA_MITIGATION_FULL,
1679 };
1680
1681 static const char * const tsa_strings[] = {
1682 [TSA_MITIGATION_NONE] = "Vulnerable",
1683 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1684 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
1685 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
1686 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
1687 };
1688
1689 static enum tsa_mitigations tsa_mitigation __ro_after_init =
1690 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
1691
tsa_parse_cmdline(char * str)1692 static int __init tsa_parse_cmdline(char *str)
1693 {
1694 if (!str)
1695 return -EINVAL;
1696
1697 if (!strcmp(str, "off"))
1698 tsa_mitigation = TSA_MITIGATION_NONE;
1699 else if (!strcmp(str, "on"))
1700 tsa_mitigation = TSA_MITIGATION_FULL;
1701 else if (!strcmp(str, "user"))
1702 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1703 else if (!strcmp(str, "vm"))
1704 tsa_mitigation = TSA_MITIGATION_VM;
1705 else
1706 pr_err("Ignoring unknown tsa=%s option.\n", str);
1707
1708 return 0;
1709 }
1710 early_param("tsa", tsa_parse_cmdline);
1711
tsa_select_mitigation(void)1712 static void __init tsa_select_mitigation(void)
1713 {
1714 if (!boot_cpu_has_bug(X86_BUG_TSA)) {
1715 tsa_mitigation = TSA_MITIGATION_NONE;
1716 return;
1717 }
1718
1719 if (tsa_mitigation == TSA_MITIGATION_AUTO) {
1720 bool vm = false, uk = false;
1721
1722 tsa_mitigation = TSA_MITIGATION_NONE;
1723
1724 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
1725 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) {
1726 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1727 uk = true;
1728 }
1729
1730 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
1731 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
1732 tsa_mitigation = TSA_MITIGATION_VM;
1733 vm = true;
1734 }
1735
1736 if (uk && vm)
1737 tsa_mitigation = TSA_MITIGATION_FULL;
1738 }
1739
1740 if (tsa_mitigation == TSA_MITIGATION_NONE)
1741 return;
1742
1743 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
1744 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
1745
1746 /*
1747 * No need to set verw_clear_cpu_buf_mitigation_selected - it
1748 * doesn't fit all cases here and it is not needed because this
1749 * is the only VERW-based mitigation on AMD.
1750 */
1751 pr_info("%s\n", tsa_strings[tsa_mitigation]);
1752 }
1753
tsa_apply_mitigation(void)1754 static void __init tsa_apply_mitigation(void)
1755 {
1756 switch (tsa_mitigation) {
1757 case TSA_MITIGATION_USER_KERNEL:
1758 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1759 break;
1760 case TSA_MITIGATION_VM:
1761 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1762 break;
1763 case TSA_MITIGATION_FULL:
1764 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1765 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1766 break;
1767 default:
1768 break;
1769 }
1770 }
1771
1772 #undef pr_fmt
1773 #define pr_fmt(fmt) "Spectre V2 : " fmt
1774
1775 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1776 SPECTRE_V2_USER_NONE;
1777 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1778 SPECTRE_V2_USER_NONE;
1779
1780 #ifdef CONFIG_MITIGATION_RETPOLINE
1781 static bool spectre_v2_bad_module;
1782
retpoline_module_ok(bool has_retpoline)1783 bool retpoline_module_ok(bool has_retpoline)
1784 {
1785 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1786 return true;
1787
1788 pr_err("System may be vulnerable to spectre v2\n");
1789 spectre_v2_bad_module = true;
1790 return false;
1791 }
1792
spectre_v2_module_string(void)1793 static inline const char *spectre_v2_module_string(void)
1794 {
1795 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1796 }
1797 #else
spectre_v2_module_string(void)1798 static inline const char *spectre_v2_module_string(void) { return ""; }
1799 #endif
1800
1801 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1802 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1803 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1804 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1805
1806 #ifdef CONFIG_BPF_SYSCALL
unpriv_ebpf_notify(int new_state)1807 void unpriv_ebpf_notify(int new_state)
1808 {
1809 if (new_state)
1810 return;
1811
1812 /* Unprivileged eBPF is enabled */
1813
1814 switch (spectre_v2_enabled) {
1815 case SPECTRE_V2_EIBRS:
1816 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1817 break;
1818 case SPECTRE_V2_EIBRS_LFENCE:
1819 if (sched_smt_active())
1820 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1821 break;
1822 default:
1823 break;
1824 }
1825 }
1826 #endif
1827
match_option(const char * arg,int arglen,const char * opt)1828 static inline bool match_option(const char *arg, int arglen, const char *opt)
1829 {
1830 int len = strlen(opt);
1831
1832 return len == arglen && !strncmp(arg, opt, len);
1833 }
1834
1835 /* The kernel command line selection for spectre v2 */
1836 enum spectre_v2_mitigation_cmd {
1837 SPECTRE_V2_CMD_NONE,
1838 SPECTRE_V2_CMD_AUTO,
1839 SPECTRE_V2_CMD_FORCE,
1840 SPECTRE_V2_CMD_RETPOLINE,
1841 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1842 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1843 SPECTRE_V2_CMD_EIBRS,
1844 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1845 SPECTRE_V2_CMD_EIBRS_LFENCE,
1846 SPECTRE_V2_CMD_IBRS,
1847 };
1848
1849 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init =
1850 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
1851
1852 enum spectre_v2_user_mitigation_cmd {
1853 SPECTRE_V2_USER_CMD_NONE,
1854 SPECTRE_V2_USER_CMD_AUTO,
1855 SPECTRE_V2_USER_CMD_FORCE,
1856 SPECTRE_V2_USER_CMD_PRCTL,
1857 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1858 SPECTRE_V2_USER_CMD_SECCOMP,
1859 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1860 };
1861
1862 static enum spectre_v2_user_mitigation_cmd spectre_v2_user_cmd __ro_after_init =
1863 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
1864
1865 static const char * const spectre_v2_user_strings[] = {
1866 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
1867 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
1868 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
1869 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
1870 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
1871 };
1872
spectre_v2_user_parse_cmdline(char * str)1873 static int __init spectre_v2_user_parse_cmdline(char *str)
1874 {
1875 if (!str)
1876 return -EINVAL;
1877
1878 if (!strcmp(str, "auto"))
1879 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_AUTO;
1880 else if (!strcmp(str, "off"))
1881 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_NONE;
1882 else if (!strcmp(str, "on"))
1883 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_FORCE;
1884 else if (!strcmp(str, "prctl"))
1885 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL;
1886 else if (!strcmp(str, "prctl,ibpb"))
1887 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL_IBPB;
1888 else if (!strcmp(str, "seccomp"))
1889 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP;
1890 else if (!strcmp(str, "seccomp,ibpb"))
1891 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP_IBPB;
1892 else
1893 pr_err("Ignoring unknown spectre_v2_user option (%s).", str);
1894
1895 return 0;
1896 }
1897 early_param("spectre_v2_user", spectre_v2_user_parse_cmdline);
1898
spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)1899 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1900 {
1901 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1902 }
1903
spectre_v2_user_select_mitigation(void)1904 static void __init spectre_v2_user_select_mitigation(void)
1905 {
1906 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1907 return;
1908
1909 switch (spectre_v2_user_cmd) {
1910 case SPECTRE_V2_USER_CMD_NONE:
1911 return;
1912 case SPECTRE_V2_USER_CMD_FORCE:
1913 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1914 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1915 break;
1916 case SPECTRE_V2_USER_CMD_AUTO:
1917 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER))
1918 break;
1919 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1920 if (smt_mitigations == SMT_MITIGATIONS_OFF)
1921 break;
1922 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1923 break;
1924 case SPECTRE_V2_USER_CMD_PRCTL:
1925 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1926 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1927 break;
1928 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1929 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1930 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1931 break;
1932 case SPECTRE_V2_USER_CMD_SECCOMP:
1933 if (IS_ENABLED(CONFIG_SECCOMP))
1934 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
1935 else
1936 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1937 spectre_v2_user_stibp = spectre_v2_user_ibpb;
1938 break;
1939 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1940 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1941 if (IS_ENABLED(CONFIG_SECCOMP))
1942 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
1943 else
1944 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1945 break;
1946 }
1947
1948 /*
1949 * At this point, an STIBP mode other than "off" has been set.
1950 * If STIBP support is not being forced, check if STIBP always-on
1951 * is preferred.
1952 */
1953 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1954 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
1955 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1956 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1957
1958 if (!boot_cpu_has(X86_FEATURE_IBPB))
1959 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1960
1961 if (!boot_cpu_has(X86_FEATURE_STIBP))
1962 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1963 }
1964
spectre_v2_user_update_mitigation(void)1965 static void __init spectre_v2_user_update_mitigation(void)
1966 {
1967 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1968 return;
1969
1970 /* The spectre_v2 cmd line can override spectre_v2_user options */
1971 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
1972 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1973 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1974 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
1975 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1976 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1977 }
1978
1979 /*
1980 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1981 * is not required.
1982 *
1983 * Intel's Enhanced IBRS also protects against cross-thread branch target
1984 * injection in user-mode as the IBRS bit remains always set which
1985 * implicitly enables cross-thread protections. However, in legacy IBRS
1986 * mode, the IBRS bit is set only on kernel entry and cleared on return
1987 * to userspace. AMD Automatic IBRS also does not protect userspace.
1988 * These modes therefore disable the implicit cross-thread protection,
1989 * so allow for STIBP to be selected in those cases.
1990 */
1991 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1992 !cpu_smt_possible() ||
1993 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1994 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
1995 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1996 return;
1997 }
1998
1999 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
2000 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
2001 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
2002 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
2003 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
2004 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
2005 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
2006 }
2007 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
2008 }
2009
spectre_v2_user_apply_mitigation(void)2010 static void __init spectre_v2_user_apply_mitigation(void)
2011 {
2012 /* Initialize Indirect Branch Prediction Barrier */
2013 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
2014 static_branch_enable(&switch_vcpu_ibpb);
2015
2016 switch (spectre_v2_user_ibpb) {
2017 case SPECTRE_V2_USER_STRICT:
2018 static_branch_enable(&switch_mm_always_ibpb);
2019 break;
2020 case SPECTRE_V2_USER_PRCTL:
2021 case SPECTRE_V2_USER_SECCOMP:
2022 static_branch_enable(&switch_mm_cond_ibpb);
2023 break;
2024 default:
2025 break;
2026 }
2027
2028 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
2029 static_key_enabled(&switch_mm_always_ibpb) ?
2030 "always-on" : "conditional");
2031 }
2032 }
2033
2034 static const char * const spectre_v2_strings[] = {
2035 [SPECTRE_V2_NONE] = "Vulnerable",
2036 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
2037 [SPECTRE_V2_LFENCE] = "Vulnerable: LFENCE",
2038 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
2039 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
2040 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
2041 [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
2042 };
2043
2044 static bool nospectre_v2 __ro_after_init;
2045
nospectre_v2_parse_cmdline(char * str)2046 static int __init nospectre_v2_parse_cmdline(char *str)
2047 {
2048 nospectre_v2 = true;
2049 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
2050 return 0;
2051 }
2052 early_param("nospectre_v2", nospectre_v2_parse_cmdline);
2053
spectre_v2_parse_cmdline(char * str)2054 static int __init spectre_v2_parse_cmdline(char *str)
2055 {
2056 if (!str)
2057 return -EINVAL;
2058
2059 if (nospectre_v2)
2060 return 0;
2061
2062 if (!strcmp(str, "off")) {
2063 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
2064 } else if (!strcmp(str, "on")) {
2065 spectre_v2_cmd = SPECTRE_V2_CMD_FORCE;
2066 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
2067 setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER);
2068 } else if (!strcmp(str, "retpoline")) {
2069 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE;
2070 } else if (!strcmp(str, "retpoline,amd") ||
2071 !strcmp(str, "retpoline,lfence")) {
2072 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_LFENCE;
2073 } else if (!strcmp(str, "retpoline,generic")) {
2074 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_GENERIC;
2075 } else if (!strcmp(str, "eibrs")) {
2076 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS;
2077 } else if (!strcmp(str, "eibrs,lfence")) {
2078 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_LFENCE;
2079 } else if (!strcmp(str, "eibrs,retpoline")) {
2080 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_RETPOLINE;
2081 } else if (!strcmp(str, "auto")) {
2082 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2083 } else if (!strcmp(str, "ibrs")) {
2084 spectre_v2_cmd = SPECTRE_V2_CMD_IBRS;
2085 } else {
2086 pr_err("Ignoring unknown spectre_v2 option (%s).", str);
2087 }
2088
2089 return 0;
2090 }
2091 early_param("spectre_v2", spectre_v2_parse_cmdline);
2092
spectre_v2_select_retpoline(void)2093 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
2094 {
2095 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2096 pr_err("Kernel not compiled with retpoline; no mitigation available!");
2097 return SPECTRE_V2_NONE;
2098 }
2099
2100 return SPECTRE_V2_RETPOLINE;
2101 }
2102
2103 static bool __ro_after_init rrsba_disabled;
2104
2105 /* Disable in-kernel use of non-RSB RET predictors */
spec_ctrl_disable_kernel_rrsba(void)2106 static void __init spec_ctrl_disable_kernel_rrsba(void)
2107 {
2108 if (rrsba_disabled)
2109 return;
2110
2111 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
2112 rrsba_disabled = true;
2113 return;
2114 }
2115
2116 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
2117 return;
2118
2119 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
2120 update_spec_ctrl(x86_spec_ctrl_base);
2121 rrsba_disabled = true;
2122 }
2123
spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)2124 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
2125 {
2126 /*
2127 * WARNING! There are many subtleties to consider when changing *any*
2128 * code related to RSB-related mitigations. Before doing so, carefully
2129 * read the following document, and update if necessary:
2130 *
2131 * Documentation/admin-guide/hw-vuln/rsb.rst
2132 *
2133 * In an overly simplified nutshell:
2134 *
2135 * - User->user RSB attacks are conditionally mitigated during
2136 * context switches by cond_mitigation -> write_ibpb().
2137 *
2138 * - User->kernel and guest->host attacks are mitigated by eIBRS or
2139 * RSB filling.
2140 *
2141 * Though, depending on config, note that other alternative
2142 * mitigations may end up getting used instead, e.g., IBPB on
2143 * entry/vmexit, call depth tracking, or return thunks.
2144 */
2145
2146 switch (mode) {
2147 case SPECTRE_V2_NONE:
2148 break;
2149
2150 case SPECTRE_V2_EIBRS:
2151 case SPECTRE_V2_EIBRS_LFENCE:
2152 case SPECTRE_V2_EIBRS_RETPOLINE:
2153 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2154 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
2155 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
2156 }
2157 break;
2158
2159 case SPECTRE_V2_RETPOLINE:
2160 case SPECTRE_V2_LFENCE:
2161 case SPECTRE_V2_IBRS:
2162 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
2163 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
2164 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
2165 break;
2166
2167 default:
2168 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
2169 dump_stack();
2170 break;
2171 }
2172 }
2173
2174 /*
2175 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
2176 * branch history in userspace. Not needed if BHI_NO is set.
2177 */
spec_ctrl_bhi_dis(void)2178 static bool __init spec_ctrl_bhi_dis(void)
2179 {
2180 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
2181 return false;
2182
2183 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
2184 update_spec_ctrl(x86_spec_ctrl_base);
2185 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
2186
2187 return true;
2188 }
2189
2190 enum bhi_mitigations {
2191 BHI_MITIGATION_OFF,
2192 BHI_MITIGATION_AUTO,
2193 BHI_MITIGATION_ON,
2194 BHI_MITIGATION_VMEXIT_ONLY,
2195 };
2196
2197 static enum bhi_mitigations bhi_mitigation __ro_after_init =
2198 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
2199
spectre_bhi_parse_cmdline(char * str)2200 static int __init spectre_bhi_parse_cmdline(char *str)
2201 {
2202 if (!str)
2203 return -EINVAL;
2204
2205 if (!strcmp(str, "off"))
2206 bhi_mitigation = BHI_MITIGATION_OFF;
2207 else if (!strcmp(str, "on"))
2208 bhi_mitigation = BHI_MITIGATION_ON;
2209 else if (!strcmp(str, "vmexit"))
2210 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2211 else
2212 pr_err("Ignoring unknown spectre_bhi option (%s)", str);
2213
2214 return 0;
2215 }
2216 early_param("spectre_bhi", spectre_bhi_parse_cmdline);
2217
bhi_select_mitigation(void)2218 static void __init bhi_select_mitigation(void)
2219 {
2220 if (!boot_cpu_has(X86_BUG_BHI))
2221 bhi_mitigation = BHI_MITIGATION_OFF;
2222
2223 if (bhi_mitigation != BHI_MITIGATION_AUTO)
2224 return;
2225
2226 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) {
2227 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
2228 bhi_mitigation = BHI_MITIGATION_ON;
2229 else
2230 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2231 } else {
2232 bhi_mitigation = BHI_MITIGATION_OFF;
2233 }
2234 }
2235
bhi_update_mitigation(void)2236 static void __init bhi_update_mitigation(void)
2237 {
2238 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
2239 bhi_mitigation = BHI_MITIGATION_OFF;
2240 }
2241
bhi_apply_mitigation(void)2242 static void __init bhi_apply_mitigation(void)
2243 {
2244 if (bhi_mitigation == BHI_MITIGATION_OFF)
2245 return;
2246
2247 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
2248 if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2249 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
2250 spec_ctrl_disable_kernel_rrsba();
2251 if (rrsba_disabled)
2252 return;
2253 }
2254
2255 if (!IS_ENABLED(CONFIG_X86_64))
2256 return;
2257
2258 /* Mitigate in hardware if supported */
2259 if (spec_ctrl_bhi_dis())
2260 return;
2261
2262 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
2263 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
2264 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2265 return;
2266 }
2267
2268 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
2269 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
2270 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2271 }
2272
spectre_v2_select_mitigation(void)2273 static void __init spectre_v2_select_mitigation(void)
2274 {
2275 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE ||
2276 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2277 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
2278 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2279 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2280 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2281 pr_err("RETPOLINE selected but not compiled in. Switching to AUTO select\n");
2282 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2283 }
2284
2285 if ((spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS ||
2286 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2287 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2288 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2289 pr_err("EIBRS selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n");
2290 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2291 }
2292
2293 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2294 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
2295 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
2296 pr_err("LFENCE selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n");
2297 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2298 }
2299
2300 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
2301 pr_err("IBRS selected but not compiled in. Switching to AUTO select\n");
2302 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2303 }
2304
2305 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
2306 pr_err("IBRS selected but not Intel CPU. Switching to AUTO select\n");
2307 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2308 }
2309
2310 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
2311 pr_err("IBRS selected but CPU doesn't have IBRS. Switching to AUTO select\n");
2312 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2313 }
2314
2315 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
2316 pr_err("IBRS selected but running as XenPV guest. Switching to AUTO select\n");
2317 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2318 }
2319
2320 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) {
2321 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
2322 return;
2323 }
2324
2325 switch (spectre_v2_cmd) {
2326 case SPECTRE_V2_CMD_NONE:
2327 return;
2328
2329 case SPECTRE_V2_CMD_AUTO:
2330 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2))
2331 break;
2332 fallthrough;
2333 case SPECTRE_V2_CMD_FORCE:
2334 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2335 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2336 break;
2337 }
2338
2339 spectre_v2_enabled = spectre_v2_select_retpoline();
2340 break;
2341
2342 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
2343 pr_err(SPECTRE_V2_LFENCE_MSG);
2344 spectre_v2_enabled = SPECTRE_V2_LFENCE;
2345 break;
2346
2347 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
2348 spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
2349 break;
2350
2351 case SPECTRE_V2_CMD_RETPOLINE:
2352 spectre_v2_enabled = spectre_v2_select_retpoline();
2353 break;
2354
2355 case SPECTRE_V2_CMD_IBRS:
2356 spectre_v2_enabled = SPECTRE_V2_IBRS;
2357 break;
2358
2359 case SPECTRE_V2_CMD_EIBRS:
2360 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2361 break;
2362
2363 case SPECTRE_V2_CMD_EIBRS_LFENCE:
2364 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
2365 break;
2366
2367 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
2368 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
2369 break;
2370 }
2371 }
2372
spectre_v2_update_mitigation(void)2373 static void __init spectre_v2_update_mitigation(void)
2374 {
2375 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
2376 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
2377 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
2378 boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2379 retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
2380 retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
2381 boot_cpu_has(X86_FEATURE_IBRS) &&
2382 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
2383 spectre_v2_enabled = SPECTRE_V2_IBRS;
2384 }
2385 }
2386
2387 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2388 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
2389 }
2390
spectre_v2_apply_mitigation(void)2391 static void __init spectre_v2_apply_mitigation(void)
2392 {
2393 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2394 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
2395
2396 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2397 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
2398 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
2399 } else {
2400 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
2401 update_spec_ctrl(x86_spec_ctrl_base);
2402 }
2403 }
2404
2405 switch (spectre_v2_enabled) {
2406 case SPECTRE_V2_NONE:
2407 return;
2408
2409 case SPECTRE_V2_EIBRS:
2410 break;
2411
2412 case SPECTRE_V2_IBRS:
2413 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
2414 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
2415 pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
2416 break;
2417
2418 case SPECTRE_V2_LFENCE:
2419 case SPECTRE_V2_EIBRS_LFENCE:
2420 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
2421 fallthrough;
2422
2423 case SPECTRE_V2_RETPOLINE:
2424 case SPECTRE_V2_EIBRS_RETPOLINE:
2425 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
2426 break;
2427 }
2428
2429 /*
2430 * Disable alternate RSB predictions in kernel when indirect CALLs and
2431 * JMPs gets protection against BHI and Intramode-BTI, but RET
2432 * prediction from a non-RSB predictor is still a risk.
2433 */
2434 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
2435 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
2436 spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
2437 spec_ctrl_disable_kernel_rrsba();
2438
2439 spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
2440
2441 /*
2442 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
2443 * and Enhanced IBRS protect firmware too, so enable IBRS around
2444 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
2445 * otherwise enabled.
2446 *
2447 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
2448 * boot_cpu_has(), because the user might select retpoline on the kernel
2449 * command line and if the CPU supports Enhanced IBRS, kernel might
2450 * un-intentionally not enable IBRS around firmware calls.
2451 */
2452 if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2453 boot_cpu_has(X86_FEATURE_IBPB) &&
2454 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2455 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
2456
2457 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
2458 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
2459 pr_info("Enabling Speculation Barrier for firmware calls\n");
2460 }
2461
2462 } else if (boot_cpu_has(X86_FEATURE_IBRS) &&
2463 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2464 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2465 pr_info("Enabling Restricted Speculation for firmware calls\n");
2466 }
2467 }
2468
update_stibp_msr(void * __unused)2469 static void update_stibp_msr(void * __unused)
2470 {
2471 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
2472 update_spec_ctrl(val);
2473 }
2474
2475 /* Update x86_spec_ctrl_base in case SMT state changed. */
update_stibp_strict(void)2476 static void update_stibp_strict(void)
2477 {
2478 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
2479
2480 if (sched_smt_active())
2481 mask |= SPEC_CTRL_STIBP;
2482
2483 if (mask == x86_spec_ctrl_base)
2484 return;
2485
2486 pr_info("Update user space SMT mitigation: STIBP %s\n",
2487 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
2488 x86_spec_ctrl_base = mask;
2489 on_each_cpu(update_stibp_msr, NULL, 1);
2490 }
2491
2492 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
update_indir_branch_cond(void)2493 static void update_indir_branch_cond(void)
2494 {
2495 if (sched_smt_active())
2496 static_branch_enable(&switch_to_cond_stibp);
2497 else
2498 static_branch_disable(&switch_to_cond_stibp);
2499 }
2500
2501 #undef pr_fmt
2502 #define pr_fmt(fmt) fmt
2503
2504 /* Update the static key controlling the MDS CPU buffer clear in idle */
update_mds_branch_idle(void)2505 static void update_mds_branch_idle(void)
2506 {
2507 /*
2508 * Enable the idle clearing if SMT is active on CPUs which are
2509 * affected only by MSBDS and not any other MDS variant.
2510 *
2511 * The other variants cannot be mitigated when SMT is enabled, so
2512 * clearing the buffers on idle just to prevent the Store Buffer
2513 * repartitioning leak would be a window dressing exercise.
2514 */
2515 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
2516 return;
2517
2518 if (sched_smt_active()) {
2519 static_branch_enable(&cpu_buf_idle_clear);
2520 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
2521 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2522 static_branch_disable(&cpu_buf_idle_clear);
2523 }
2524 }
2525
2526 #undef pr_fmt
2527 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
2528
2529 static enum ssb_mitigation ssb_mode __ro_after_init =
2530 IS_ENABLED(CONFIG_MITIGATION_SSB) ? SPEC_STORE_BYPASS_AUTO : SPEC_STORE_BYPASS_NONE;
2531
2532 static const char * const ssb_strings[] = {
2533 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
2534 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
2535 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
2536 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2537 };
2538
2539 static bool nossb __ro_after_init;
2540
nossb_parse_cmdline(char * str)2541 static int __init nossb_parse_cmdline(char *str)
2542 {
2543 nossb = true;
2544 ssb_mode = SPEC_STORE_BYPASS_NONE;
2545 return 0;
2546 }
2547 early_param("nospec_store_bypass_disable", nossb_parse_cmdline);
2548
ssb_parse_cmdline(char * str)2549 static int __init ssb_parse_cmdline(char *str)
2550 {
2551 if (!str)
2552 return -EINVAL;
2553
2554 if (nossb)
2555 return 0;
2556
2557 if (!strcmp(str, "auto"))
2558 ssb_mode = SPEC_STORE_BYPASS_AUTO;
2559 else if (!strcmp(str, "on"))
2560 ssb_mode = SPEC_STORE_BYPASS_DISABLE;
2561 else if (!strcmp(str, "off"))
2562 ssb_mode = SPEC_STORE_BYPASS_NONE;
2563 else if (!strcmp(str, "prctl"))
2564 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2565 else if (!strcmp(str, "seccomp"))
2566 ssb_mode = IS_ENABLED(CONFIG_SECCOMP) ?
2567 SPEC_STORE_BYPASS_SECCOMP : SPEC_STORE_BYPASS_PRCTL;
2568 else
2569 pr_err("Ignoring unknown spec_store_bypass_disable option (%s).\n",
2570 str);
2571
2572 return 0;
2573 }
2574 early_param("spec_store_bypass_disable", ssb_parse_cmdline);
2575
ssb_select_mitigation(void)2576 static void __init ssb_select_mitigation(void)
2577 {
2578 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) {
2579 ssb_mode = SPEC_STORE_BYPASS_NONE;
2580 return;
2581 }
2582
2583 if (ssb_mode == SPEC_STORE_BYPASS_AUTO) {
2584 if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
2585 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2586 else
2587 ssb_mode = SPEC_STORE_BYPASS_NONE;
2588 }
2589
2590 if (!boot_cpu_has(X86_FEATURE_SSBD))
2591 ssb_mode = SPEC_STORE_BYPASS_NONE;
2592
2593 pr_info("%s\n", ssb_strings[ssb_mode]);
2594 }
2595
ssb_apply_mitigation(void)2596 static void __init ssb_apply_mitigation(void)
2597 {
2598 /*
2599 * We have three CPU feature flags that are in play here:
2600 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2601 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2602 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2603 */
2604 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
2605 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2606 /*
2607 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2608 * use a completely different MSR and bit dependent on family.
2609 */
2610 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2611 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2612 x86_amd_ssb_disable();
2613 } else {
2614 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2615 update_spec_ctrl(x86_spec_ctrl_base);
2616 }
2617 }
2618 }
2619
2620 #undef pr_fmt
2621 #define pr_fmt(fmt) "Speculation prctl: " fmt
2622
task_update_spec_tif(struct task_struct * tsk)2623 static void task_update_spec_tif(struct task_struct *tsk)
2624 {
2625 /* Force the update of the real TIF bits */
2626 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2627
2628 /*
2629 * Immediately update the speculation control MSRs for the current
2630 * task, but for a non-current task delay setting the CPU
2631 * mitigation until it is scheduled next.
2632 *
2633 * This can only happen for SECCOMP mitigation. For PRCTL it's
2634 * always the current task.
2635 */
2636 if (tsk == current)
2637 speculation_ctrl_update_current();
2638 }
2639
l1d_flush_prctl_set(struct task_struct * task,unsigned long ctrl)2640 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2641 {
2642
2643 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2644 return -EPERM;
2645
2646 switch (ctrl) {
2647 case PR_SPEC_ENABLE:
2648 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2649 return 0;
2650 case PR_SPEC_DISABLE:
2651 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2652 return 0;
2653 default:
2654 return -ERANGE;
2655 }
2656 }
2657
ssb_prctl_set(struct task_struct * task,unsigned long ctrl)2658 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2659 {
2660 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2661 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2662 return -ENXIO;
2663
2664 switch (ctrl) {
2665 case PR_SPEC_ENABLE:
2666 /* If speculation is force disabled, enable is not allowed */
2667 if (task_spec_ssb_force_disable(task))
2668 return -EPERM;
2669 task_clear_spec_ssb_disable(task);
2670 task_clear_spec_ssb_noexec(task);
2671 task_update_spec_tif(task);
2672 break;
2673 case PR_SPEC_DISABLE:
2674 task_set_spec_ssb_disable(task);
2675 task_clear_spec_ssb_noexec(task);
2676 task_update_spec_tif(task);
2677 break;
2678 case PR_SPEC_FORCE_DISABLE:
2679 task_set_spec_ssb_disable(task);
2680 task_set_spec_ssb_force_disable(task);
2681 task_clear_spec_ssb_noexec(task);
2682 task_update_spec_tif(task);
2683 break;
2684 case PR_SPEC_DISABLE_NOEXEC:
2685 if (task_spec_ssb_force_disable(task))
2686 return -EPERM;
2687 task_set_spec_ssb_disable(task);
2688 task_set_spec_ssb_noexec(task);
2689 task_update_spec_tif(task);
2690 break;
2691 default:
2692 return -ERANGE;
2693 }
2694 return 0;
2695 }
2696
is_spec_ib_user_controlled(void)2697 static bool is_spec_ib_user_controlled(void)
2698 {
2699 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2700 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2701 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2702 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2703 }
2704
ib_prctl_set(struct task_struct * task,unsigned long ctrl)2705 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2706 {
2707 switch (ctrl) {
2708 case PR_SPEC_ENABLE:
2709 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2710 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2711 return 0;
2712
2713 /*
2714 * With strict mode for both IBPB and STIBP, the instruction
2715 * code paths avoid checking this task flag and instead,
2716 * unconditionally run the instruction. However, STIBP and IBPB
2717 * are independent and either can be set to conditionally
2718 * enabled regardless of the mode of the other.
2719 *
2720 * If either is set to conditional, allow the task flag to be
2721 * updated, unless it was force-disabled by a previous prctl
2722 * call. Currently, this is possible on an AMD CPU which has the
2723 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2724 * kernel is booted with 'spectre_v2_user=seccomp', then
2725 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2726 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2727 */
2728 if (!is_spec_ib_user_controlled() ||
2729 task_spec_ib_force_disable(task))
2730 return -EPERM;
2731
2732 task_clear_spec_ib_disable(task);
2733 task_update_spec_tif(task);
2734 break;
2735 case PR_SPEC_DISABLE:
2736 case PR_SPEC_FORCE_DISABLE:
2737 /*
2738 * Indirect branch speculation is always allowed when
2739 * mitigation is force disabled.
2740 */
2741 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2742 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2743 return -EPERM;
2744
2745 if (!is_spec_ib_user_controlled())
2746 return 0;
2747
2748 task_set_spec_ib_disable(task);
2749 if (ctrl == PR_SPEC_FORCE_DISABLE)
2750 task_set_spec_ib_force_disable(task);
2751 task_update_spec_tif(task);
2752 if (task == current)
2753 indirect_branch_prediction_barrier();
2754 break;
2755 default:
2756 return -ERANGE;
2757 }
2758 return 0;
2759 }
2760
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)2761 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2762 unsigned long ctrl)
2763 {
2764 switch (which) {
2765 case PR_SPEC_STORE_BYPASS:
2766 return ssb_prctl_set(task, ctrl);
2767 case PR_SPEC_INDIRECT_BRANCH:
2768 return ib_prctl_set(task, ctrl);
2769 case PR_SPEC_L1D_FLUSH:
2770 return l1d_flush_prctl_set(task, ctrl);
2771 default:
2772 return -ENODEV;
2773 }
2774 }
2775
2776 #ifdef CONFIG_SECCOMP
arch_seccomp_spec_mitigate(struct task_struct * task)2777 void arch_seccomp_spec_mitigate(struct task_struct *task)
2778 {
2779 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2780 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2781 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2782 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2783 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2784 }
2785 #endif
2786
l1d_flush_prctl_get(struct task_struct * task)2787 static int l1d_flush_prctl_get(struct task_struct *task)
2788 {
2789 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2790 return PR_SPEC_FORCE_DISABLE;
2791
2792 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2793 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2794 else
2795 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2796 }
2797
ssb_prctl_get(struct task_struct * task)2798 static int ssb_prctl_get(struct task_struct *task)
2799 {
2800 switch (ssb_mode) {
2801 case SPEC_STORE_BYPASS_NONE:
2802 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2803 return PR_SPEC_ENABLE;
2804 return PR_SPEC_NOT_AFFECTED;
2805 case SPEC_STORE_BYPASS_DISABLE:
2806 return PR_SPEC_DISABLE;
2807 case SPEC_STORE_BYPASS_SECCOMP:
2808 case SPEC_STORE_BYPASS_PRCTL:
2809 case SPEC_STORE_BYPASS_AUTO:
2810 if (task_spec_ssb_force_disable(task))
2811 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2812 if (task_spec_ssb_noexec(task))
2813 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2814 if (task_spec_ssb_disable(task))
2815 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2816 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2817 }
2818 BUG();
2819 }
2820
ib_prctl_get(struct task_struct * task)2821 static int ib_prctl_get(struct task_struct *task)
2822 {
2823 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2824 return PR_SPEC_NOT_AFFECTED;
2825
2826 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2827 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2828 return PR_SPEC_ENABLE;
2829 else if (is_spec_ib_user_controlled()) {
2830 if (task_spec_ib_force_disable(task))
2831 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2832 if (task_spec_ib_disable(task))
2833 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2834 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2835 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2836 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2837 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2838 return PR_SPEC_DISABLE;
2839 else
2840 return PR_SPEC_NOT_AFFECTED;
2841 }
2842
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)2843 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2844 {
2845 switch (which) {
2846 case PR_SPEC_STORE_BYPASS:
2847 return ssb_prctl_get(task);
2848 case PR_SPEC_INDIRECT_BRANCH:
2849 return ib_prctl_get(task);
2850 case PR_SPEC_L1D_FLUSH:
2851 return l1d_flush_prctl_get(task);
2852 default:
2853 return -ENODEV;
2854 }
2855 }
2856
x86_spec_ctrl_setup_ap(void)2857 void x86_spec_ctrl_setup_ap(void)
2858 {
2859 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2860 update_spec_ctrl(x86_spec_ctrl_base);
2861
2862 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2863 x86_amd_ssb_disable();
2864 }
2865
2866 bool itlb_multihit_kvm_mitigation;
2867 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2868
2869 #undef pr_fmt
2870 #define pr_fmt(fmt) "L1TF: " fmt
2871
2872 /* Default mitigation for L1TF-affected CPUs */
2873 enum l1tf_mitigations l1tf_mitigation __ro_after_init =
2874 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
2875 #if IS_ENABLED(CONFIG_KVM_INTEL)
2876 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2877 #endif
2878 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2879 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2880
2881 /*
2882 * These CPUs all support 44bits physical address space internally in the
2883 * cache but CPUID can report a smaller number of physical address bits.
2884 *
2885 * The L1TF mitigation uses the top most address bit for the inversion of
2886 * non present PTEs. When the installed memory reaches into the top most
2887 * address bit due to memory holes, which has been observed on machines
2888 * which report 36bits physical address bits and have 32G RAM installed,
2889 * then the mitigation range check in l1tf_select_mitigation() triggers.
2890 * This is a false positive because the mitigation is still possible due to
2891 * the fact that the cache uses 44bit internally. Use the cache bits
2892 * instead of the reported physical bits and adjust them on the affected
2893 * machines to 44bit if the reported bits are less than 44.
2894 */
override_cache_bits(struct cpuinfo_x86 * c)2895 static void override_cache_bits(struct cpuinfo_x86 *c)
2896 {
2897 if (c->x86 != 6)
2898 return;
2899
2900 switch (c->x86_vfm) {
2901 case INTEL_NEHALEM:
2902 case INTEL_WESTMERE:
2903 case INTEL_SANDYBRIDGE:
2904 case INTEL_IVYBRIDGE:
2905 case INTEL_HASWELL:
2906 case INTEL_HASWELL_L:
2907 case INTEL_HASWELL_G:
2908 case INTEL_BROADWELL:
2909 case INTEL_BROADWELL_G:
2910 case INTEL_SKYLAKE_L:
2911 case INTEL_SKYLAKE:
2912 case INTEL_KABYLAKE_L:
2913 case INTEL_KABYLAKE:
2914 if (c->x86_cache_bits < 44)
2915 c->x86_cache_bits = 44;
2916 break;
2917 }
2918 }
2919
l1tf_select_mitigation(void)2920 static void __init l1tf_select_mitigation(void)
2921 {
2922 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
2923 l1tf_mitigation = L1TF_MITIGATION_OFF;
2924 return;
2925 }
2926
2927 if (l1tf_mitigation != L1TF_MITIGATION_AUTO)
2928 return;
2929
2930 if (!should_mitigate_vuln(X86_BUG_L1TF)) {
2931 l1tf_mitigation = L1TF_MITIGATION_OFF;
2932 return;
2933 }
2934
2935 if (smt_mitigations == SMT_MITIGATIONS_ON)
2936 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2937 else
2938 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2939 }
2940
l1tf_apply_mitigation(void)2941 static void __init l1tf_apply_mitigation(void)
2942 {
2943 u64 half_pa;
2944
2945 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2946 return;
2947
2948 override_cache_bits(&boot_cpu_data);
2949
2950 switch (l1tf_mitigation) {
2951 case L1TF_MITIGATION_OFF:
2952 case L1TF_MITIGATION_FLUSH_NOWARN:
2953 case L1TF_MITIGATION_FLUSH:
2954 case L1TF_MITIGATION_AUTO:
2955 break;
2956 case L1TF_MITIGATION_FLUSH_NOSMT:
2957 case L1TF_MITIGATION_FULL:
2958 cpu_smt_disable(false);
2959 break;
2960 case L1TF_MITIGATION_FULL_FORCE:
2961 cpu_smt_disable(true);
2962 break;
2963 }
2964
2965 #if CONFIG_PGTABLE_LEVELS == 2
2966 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2967 return;
2968 #endif
2969
2970 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2971 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2972 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2973 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2974 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2975 half_pa);
2976 pr_info("However, doing so will make a part of your RAM unusable.\n");
2977 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2978 return;
2979 }
2980
2981 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2982 }
2983
l1tf_cmdline(char * str)2984 static int __init l1tf_cmdline(char *str)
2985 {
2986 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2987 return 0;
2988
2989 if (!str)
2990 return -EINVAL;
2991
2992 if (!strcmp(str, "off"))
2993 l1tf_mitigation = L1TF_MITIGATION_OFF;
2994 else if (!strcmp(str, "flush,nowarn"))
2995 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2996 else if (!strcmp(str, "flush"))
2997 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2998 else if (!strcmp(str, "flush,nosmt"))
2999 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
3000 else if (!strcmp(str, "full"))
3001 l1tf_mitigation = L1TF_MITIGATION_FULL;
3002 else if (!strcmp(str, "full,force"))
3003 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
3004
3005 return 0;
3006 }
3007 early_param("l1tf", l1tf_cmdline);
3008
3009 #undef pr_fmt
3010 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
3011
3012 static const char * const srso_strings[] = {
3013 [SRSO_MITIGATION_NONE] = "Vulnerable",
3014 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
3015 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
3016 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
3017 [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled",
3018 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
3019 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
3020 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
3021 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
3022 };
3023
srso_parse_cmdline(char * str)3024 static int __init srso_parse_cmdline(char *str)
3025 {
3026 if (!str)
3027 return -EINVAL;
3028
3029 if (!strcmp(str, "off"))
3030 srso_mitigation = SRSO_MITIGATION_NONE;
3031 else if (!strcmp(str, "microcode"))
3032 srso_mitigation = SRSO_MITIGATION_MICROCODE;
3033 else if (!strcmp(str, "safe-ret"))
3034 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3035 else if (!strcmp(str, "ibpb"))
3036 srso_mitigation = SRSO_MITIGATION_IBPB;
3037 else if (!strcmp(str, "ibpb-vmexit"))
3038 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3039 else
3040 pr_err("Ignoring unknown SRSO option (%s).", str);
3041
3042 return 0;
3043 }
3044 early_param("spec_rstack_overflow", srso_parse_cmdline);
3045
3046 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
3047
srso_select_mitigation(void)3048 static void __init srso_select_mitigation(void)
3049 {
3050 if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
3051 srso_mitigation = SRSO_MITIGATION_NONE;
3052 return;
3053 }
3054
3055 if (srso_mitigation == SRSO_MITIGATION_AUTO) {
3056 /*
3057 * Use safe-RET if user->kernel or guest->host protection is
3058 * required. Otherwise the 'microcode' mitigation is sufficient
3059 * to protect the user->user and guest->guest vectors.
3060 */
3061 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
3062 (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&
3063 !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {
3064 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3065 } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
3066 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
3067 srso_mitigation = SRSO_MITIGATION_MICROCODE;
3068 } else {
3069 srso_mitigation = SRSO_MITIGATION_NONE;
3070 return;
3071 }
3072 }
3073
3074 /* Zen1/2 with SMT off aren't vulnerable to SRSO. */
3075 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
3076 srso_mitigation = SRSO_MITIGATION_NOSMT;
3077 return;
3078 }
3079
3080 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) {
3081 pr_warn("IBPB-extending microcode not applied!\n");
3082 pr_warn(SRSO_NOTICE);
3083
3084 /*
3085 * Safe-RET provides partial mitigation without microcode, but
3086 * other mitigations require microcode to provide any
3087 * mitigations.
3088 */
3089 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET)
3090 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
3091 else
3092 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
3093 }
3094
3095 switch (srso_mitigation) {
3096 case SRSO_MITIGATION_SAFE_RET:
3097 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3098 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
3099 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3100 goto ibpb_on_vmexit;
3101 }
3102
3103 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
3104 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
3105 srso_mitigation = SRSO_MITIGATION_NONE;
3106 }
3107 break;
3108 ibpb_on_vmexit:
3109 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3110 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
3111 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
3112 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
3113 break;
3114 }
3115 fallthrough;
3116 case SRSO_MITIGATION_IBPB:
3117 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
3118 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
3119 srso_mitigation = SRSO_MITIGATION_NONE;
3120 }
3121 break;
3122 default:
3123 break;
3124 }
3125 }
3126
srso_update_mitigation(void)3127 static void __init srso_update_mitigation(void)
3128 {
3129 if (!boot_cpu_has_bug(X86_BUG_SRSO))
3130 return;
3131
3132 /* If retbleed is using IBPB, that works for SRSO as well */
3133 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
3134 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
3135 srso_mitigation = SRSO_MITIGATION_IBPB;
3136
3137 pr_info("%s\n", srso_strings[srso_mitigation]);
3138 }
3139
srso_apply_mitigation(void)3140 static void __init srso_apply_mitigation(void)
3141 {
3142 /*
3143 * Clear the feature flag if this mitigation is not selected as that
3144 * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
3145 */
3146 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
3147 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
3148
3149 if (srso_mitigation == SRSO_MITIGATION_NONE) {
3150 if (boot_cpu_has(X86_FEATURE_SBPB))
3151 x86_pred_cmd = PRED_CMD_SBPB;
3152 return;
3153 }
3154
3155 switch (srso_mitigation) {
3156 case SRSO_MITIGATION_SAFE_RET:
3157 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3158 /*
3159 * Enable the return thunk for generated code
3160 * like ftrace, static_call, etc.
3161 */
3162 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
3163 setup_force_cpu_cap(X86_FEATURE_UNRET);
3164
3165 if (boot_cpu_data.x86 == 0x19) {
3166 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
3167 set_return_thunk(srso_alias_return_thunk);
3168 } else {
3169 setup_force_cpu_cap(X86_FEATURE_SRSO);
3170 set_return_thunk(srso_return_thunk);
3171 }
3172 break;
3173 case SRSO_MITIGATION_IBPB:
3174 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
3175 /*
3176 * IBPB on entry already obviates the need for
3177 * software-based untraining so clear those in case some
3178 * other mitigation like Retbleed has selected them.
3179 */
3180 setup_clear_cpu_cap(X86_FEATURE_UNRET);
3181 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
3182 fallthrough;
3183 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3184 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
3185 /*
3186 * There is no need for RSB filling: entry_ibpb() ensures
3187 * all predictions, including the RSB, are invalidated,
3188 * regardless of IBPB implementation.
3189 */
3190 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
3191 break;
3192 default:
3193 break;
3194 }
3195 }
3196
3197 #undef pr_fmt
3198 #define pr_fmt(fmt) "VMSCAPE: " fmt
3199
3200 enum vmscape_mitigations {
3201 VMSCAPE_MITIGATION_NONE,
3202 VMSCAPE_MITIGATION_AUTO,
3203 VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
3204 VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
3205 };
3206
3207 static const char * const vmscape_strings[] = {
3208 [VMSCAPE_MITIGATION_NONE] = "Vulnerable",
3209 /* [VMSCAPE_MITIGATION_AUTO] */
3210 [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace",
3211 [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT",
3212 };
3213
3214 static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
3215 IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
3216
vmscape_parse_cmdline(char * str)3217 static int __init vmscape_parse_cmdline(char *str)
3218 {
3219 if (!str)
3220 return -EINVAL;
3221
3222 if (!strcmp(str, "off")) {
3223 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3224 } else if (!strcmp(str, "ibpb")) {
3225 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3226 } else if (!strcmp(str, "force")) {
3227 setup_force_cpu_bug(X86_BUG_VMSCAPE);
3228 vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
3229 } else {
3230 pr_err("Ignoring unknown vmscape=%s option.\n", str);
3231 }
3232
3233 return 0;
3234 }
3235 early_param("vmscape", vmscape_parse_cmdline);
3236
vmscape_select_mitigation(void)3237 static void __init vmscape_select_mitigation(void)
3238 {
3239 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
3240 !boot_cpu_has(X86_FEATURE_IBPB)) {
3241 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3242 return;
3243 }
3244
3245 if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) {
3246 if (should_mitigate_vuln(X86_BUG_VMSCAPE))
3247 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3248 else
3249 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3250 }
3251 }
3252
vmscape_update_mitigation(void)3253 static void __init vmscape_update_mitigation(void)
3254 {
3255 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE))
3256 return;
3257
3258 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
3259 srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
3260 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
3261
3262 pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
3263 }
3264
vmscape_apply_mitigation(void)3265 static void __init vmscape_apply_mitigation(void)
3266 {
3267 if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
3268 setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
3269 }
3270
3271 #undef pr_fmt
3272 #define pr_fmt(fmt) fmt
3273
3274 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
3275 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
3276 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
3277 #define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
3278
cpu_bugs_smt_update(void)3279 void cpu_bugs_smt_update(void)
3280 {
3281 mutex_lock(&spec_ctrl_mutex);
3282
3283 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3284 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3285 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
3286
3287 switch (spectre_v2_user_stibp) {
3288 case SPECTRE_V2_USER_NONE:
3289 break;
3290 case SPECTRE_V2_USER_STRICT:
3291 case SPECTRE_V2_USER_STRICT_PREFERRED:
3292 update_stibp_strict();
3293 break;
3294 case SPECTRE_V2_USER_PRCTL:
3295 case SPECTRE_V2_USER_SECCOMP:
3296 update_indir_branch_cond();
3297 break;
3298 }
3299
3300 switch (mds_mitigation) {
3301 case MDS_MITIGATION_FULL:
3302 case MDS_MITIGATION_AUTO:
3303 case MDS_MITIGATION_VMWERV:
3304 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
3305 pr_warn_once(MDS_MSG_SMT);
3306 update_mds_branch_idle();
3307 break;
3308 case MDS_MITIGATION_OFF:
3309 break;
3310 }
3311
3312 switch (taa_mitigation) {
3313 case TAA_MITIGATION_VERW:
3314 case TAA_MITIGATION_AUTO:
3315 case TAA_MITIGATION_UCODE_NEEDED:
3316 if (sched_smt_active())
3317 pr_warn_once(TAA_MSG_SMT);
3318 break;
3319 case TAA_MITIGATION_TSX_DISABLED:
3320 case TAA_MITIGATION_OFF:
3321 break;
3322 }
3323
3324 switch (mmio_mitigation) {
3325 case MMIO_MITIGATION_VERW:
3326 case MMIO_MITIGATION_AUTO:
3327 case MMIO_MITIGATION_UCODE_NEEDED:
3328 if (sched_smt_active())
3329 pr_warn_once(MMIO_MSG_SMT);
3330 break;
3331 case MMIO_MITIGATION_OFF:
3332 break;
3333 }
3334
3335 switch (tsa_mitigation) {
3336 case TSA_MITIGATION_USER_KERNEL:
3337 case TSA_MITIGATION_VM:
3338 case TSA_MITIGATION_AUTO:
3339 case TSA_MITIGATION_FULL:
3340 /*
3341 * TSA-SQ can potentially lead to info leakage between
3342 * SMT threads.
3343 */
3344 if (sched_smt_active())
3345 static_branch_enable(&cpu_buf_idle_clear);
3346 else
3347 static_branch_disable(&cpu_buf_idle_clear);
3348 break;
3349 case TSA_MITIGATION_NONE:
3350 case TSA_MITIGATION_UCODE_NEEDED:
3351 break;
3352 }
3353
3354 switch (vmscape_mitigation) {
3355 case VMSCAPE_MITIGATION_NONE:
3356 case VMSCAPE_MITIGATION_AUTO:
3357 break;
3358 case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
3359 case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
3360 /*
3361 * Hypervisors can be attacked across-threads, warn for SMT when
3362 * STIBP is not already enabled system-wide.
3363 *
3364 * Intel eIBRS (!AUTOIBRS) implies STIBP on.
3365 */
3366 if (!sched_smt_active() ||
3367 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3368 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
3369 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3370 !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
3371 break;
3372 pr_warn_once(VMSCAPE_MSG_SMT);
3373 break;
3374 }
3375
3376 mutex_unlock(&spec_ctrl_mutex);
3377 }
3378
3379 #ifdef CONFIG_SYSFS
3380
3381 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
3382
3383 #if IS_ENABLED(CONFIG_KVM_INTEL)
3384 static const char * const l1tf_vmx_states[] = {
3385 [VMENTER_L1D_FLUSH_AUTO] = "auto",
3386 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
3387 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
3388 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
3389 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
3390 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
3391 };
3392
l1tf_show_state(char * buf)3393 static ssize_t l1tf_show_state(char *buf)
3394 {
3395 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
3396 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3397
3398 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
3399 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
3400 sched_smt_active())) {
3401 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
3402 l1tf_vmx_states[l1tf_vmx_mitigation]);
3403 }
3404
3405 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
3406 l1tf_vmx_states[l1tf_vmx_mitigation],
3407 sched_smt_active() ? "vulnerable" : "disabled");
3408 }
3409
itlb_multihit_show_state(char * buf)3410 static ssize_t itlb_multihit_show_state(char *buf)
3411 {
3412 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
3413 !boot_cpu_has(X86_FEATURE_VMX))
3414 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
3415 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
3416 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
3417 else if (itlb_multihit_kvm_mitigation)
3418 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
3419 else
3420 return sysfs_emit(buf, "KVM: Vulnerable\n");
3421 }
3422 #else
l1tf_show_state(char * buf)3423 static ssize_t l1tf_show_state(char *buf)
3424 {
3425 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3426 }
3427
itlb_multihit_show_state(char * buf)3428 static ssize_t itlb_multihit_show_state(char *buf)
3429 {
3430 return sysfs_emit(buf, "Processor vulnerable\n");
3431 }
3432 #endif
3433
mds_show_state(char * buf)3434 static ssize_t mds_show_state(char *buf)
3435 {
3436 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3437 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3438 mds_strings[mds_mitigation]);
3439 }
3440
3441 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
3442 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3443 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
3444 sched_smt_active() ? "mitigated" : "disabled"));
3445 }
3446
3447 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3448 sched_smt_active() ? "vulnerable" : "disabled");
3449 }
3450
tsx_async_abort_show_state(char * buf)3451 static ssize_t tsx_async_abort_show_state(char *buf)
3452 {
3453 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
3454 (taa_mitigation == TAA_MITIGATION_OFF))
3455 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
3456
3457 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3458 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3459 taa_strings[taa_mitigation]);
3460 }
3461
3462 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
3463 sched_smt_active() ? "vulnerable" : "disabled");
3464 }
3465
mmio_stale_data_show_state(char * buf)3466 static ssize_t mmio_stale_data_show_state(char *buf)
3467 {
3468 if (mmio_mitigation == MMIO_MITIGATION_OFF)
3469 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
3470
3471 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3472 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3473 mmio_strings[mmio_mitigation]);
3474 }
3475
3476 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
3477 sched_smt_active() ? "vulnerable" : "disabled");
3478 }
3479
rfds_show_state(char * buf)3480 static ssize_t rfds_show_state(char *buf)
3481 {
3482 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
3483 }
3484
old_microcode_show_state(char * buf)3485 static ssize_t old_microcode_show_state(char *buf)
3486 {
3487 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
3488 return sysfs_emit(buf, "Unknown: running under hypervisor");
3489
3490 return sysfs_emit(buf, "Vulnerable\n");
3491 }
3492
its_show_state(char * buf)3493 static ssize_t its_show_state(char *buf)
3494 {
3495 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
3496 }
3497
stibp_state(void)3498 static char *stibp_state(void)
3499 {
3500 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3501 !boot_cpu_has(X86_FEATURE_AUTOIBRS))
3502 return "";
3503
3504 switch (spectre_v2_user_stibp) {
3505 case SPECTRE_V2_USER_NONE:
3506 return "; STIBP: disabled";
3507 case SPECTRE_V2_USER_STRICT:
3508 return "; STIBP: forced";
3509 case SPECTRE_V2_USER_STRICT_PREFERRED:
3510 return "; STIBP: always-on";
3511 case SPECTRE_V2_USER_PRCTL:
3512 case SPECTRE_V2_USER_SECCOMP:
3513 if (static_key_enabled(&switch_to_cond_stibp))
3514 return "; STIBP: conditional";
3515 }
3516 return "";
3517 }
3518
ibpb_state(void)3519 static char *ibpb_state(void)
3520 {
3521 if (boot_cpu_has(X86_FEATURE_IBPB)) {
3522 if (static_key_enabled(&switch_mm_always_ibpb))
3523 return "; IBPB: always-on";
3524 if (static_key_enabled(&switch_mm_cond_ibpb))
3525 return "; IBPB: conditional";
3526 return "; IBPB: disabled";
3527 }
3528 return "";
3529 }
3530
pbrsb_eibrs_state(void)3531 static char *pbrsb_eibrs_state(void)
3532 {
3533 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
3534 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
3535 boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
3536 return "; PBRSB-eIBRS: SW sequence";
3537 else
3538 return "; PBRSB-eIBRS: Vulnerable";
3539 } else {
3540 return "; PBRSB-eIBRS: Not affected";
3541 }
3542 }
3543
spectre_bhi_state(void)3544 static const char *spectre_bhi_state(void)
3545 {
3546 if (!boot_cpu_has_bug(X86_BUG_BHI))
3547 return "; BHI: Not affected";
3548 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
3549 return "; BHI: BHI_DIS_S";
3550 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
3551 return "; BHI: SW loop, KVM: SW loop";
3552 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
3553 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
3554 rrsba_disabled)
3555 return "; BHI: Retpoline";
3556 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
3557 return "; BHI: Vulnerable, KVM: SW loop";
3558
3559 return "; BHI: Vulnerable";
3560 }
3561
spectre_v2_show_state(char * buf)3562 static ssize_t spectre_v2_show_state(char *buf)
3563 {
3564 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
3565 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
3566
3567 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3568 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3569 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
3570
3571 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
3572 spectre_v2_strings[spectre_v2_enabled],
3573 ibpb_state(),
3574 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
3575 stibp_state(),
3576 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
3577 pbrsb_eibrs_state(),
3578 spectre_bhi_state(),
3579 /* this should always be at the end */
3580 spectre_v2_module_string());
3581 }
3582
srbds_show_state(char * buf)3583 static ssize_t srbds_show_state(char *buf)
3584 {
3585 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
3586 }
3587
retbleed_show_state(char * buf)3588 static ssize_t retbleed_show_state(char *buf)
3589 {
3590 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
3591 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
3592 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
3593 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
3594 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
3595
3596 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
3597 !sched_smt_active() ? "disabled" :
3598 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3599 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
3600 "enabled with STIBP protection" : "vulnerable");
3601 }
3602
3603 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
3604 }
3605
srso_show_state(char * buf)3606 static ssize_t srso_show_state(char *buf)
3607 {
3608 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
3609 }
3610
gds_show_state(char * buf)3611 static ssize_t gds_show_state(char *buf)
3612 {
3613 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
3614 }
3615
tsa_show_state(char * buf)3616 static ssize_t tsa_show_state(char *buf)
3617 {
3618 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
3619 }
3620
vmscape_show_state(char * buf)3621 static ssize_t vmscape_show_state(char *buf)
3622 {
3623 return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
3624 }
3625
cpu_show_common(struct device * dev,struct device_attribute * attr,char * buf,unsigned int bug)3626 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
3627 char *buf, unsigned int bug)
3628 {
3629 if (!boot_cpu_has_bug(bug))
3630 return sysfs_emit(buf, "Not affected\n");
3631
3632 switch (bug) {
3633 case X86_BUG_CPU_MELTDOWN:
3634 if (boot_cpu_has(X86_FEATURE_PTI))
3635 return sysfs_emit(buf, "Mitigation: PTI\n");
3636
3637 if (hypervisor_is_type(X86_HYPER_XEN_PV))
3638 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
3639
3640 break;
3641
3642 case X86_BUG_SPECTRE_V1:
3643 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
3644
3645 case X86_BUG_SPECTRE_V2:
3646 return spectre_v2_show_state(buf);
3647
3648 case X86_BUG_SPEC_STORE_BYPASS:
3649 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
3650
3651 case X86_BUG_L1TF:
3652 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
3653 return l1tf_show_state(buf);
3654 break;
3655
3656 case X86_BUG_MDS:
3657 return mds_show_state(buf);
3658
3659 case X86_BUG_TAA:
3660 return tsx_async_abort_show_state(buf);
3661
3662 case X86_BUG_ITLB_MULTIHIT:
3663 return itlb_multihit_show_state(buf);
3664
3665 case X86_BUG_SRBDS:
3666 return srbds_show_state(buf);
3667
3668 case X86_BUG_MMIO_STALE_DATA:
3669 return mmio_stale_data_show_state(buf);
3670
3671 case X86_BUG_RETBLEED:
3672 return retbleed_show_state(buf);
3673
3674 case X86_BUG_SRSO:
3675 return srso_show_state(buf);
3676
3677 case X86_BUG_GDS:
3678 return gds_show_state(buf);
3679
3680 case X86_BUG_RFDS:
3681 return rfds_show_state(buf);
3682
3683 case X86_BUG_OLD_MICROCODE:
3684 return old_microcode_show_state(buf);
3685
3686 case X86_BUG_ITS:
3687 return its_show_state(buf);
3688
3689 case X86_BUG_TSA:
3690 return tsa_show_state(buf);
3691
3692 case X86_BUG_VMSCAPE:
3693 return vmscape_show_state(buf);
3694
3695 default:
3696 break;
3697 }
3698
3699 return sysfs_emit(buf, "Vulnerable\n");
3700 }
3701
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)3702 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
3703 {
3704 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
3705 }
3706
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)3707 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
3708 {
3709 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
3710 }
3711
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)3712 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
3713 {
3714 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
3715 }
3716
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)3717 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
3718 {
3719 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
3720 }
3721
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)3722 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
3723 {
3724 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
3725 }
3726
cpu_show_mds(struct device * dev,struct device_attribute * attr,char * buf)3727 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
3728 {
3729 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
3730 }
3731
cpu_show_tsx_async_abort(struct device * dev,struct device_attribute * attr,char * buf)3732 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
3733 {
3734 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
3735 }
3736
cpu_show_itlb_multihit(struct device * dev,struct device_attribute * attr,char * buf)3737 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
3738 {
3739 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
3740 }
3741
cpu_show_srbds(struct device * dev,struct device_attribute * attr,char * buf)3742 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
3743 {
3744 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
3745 }
3746
cpu_show_mmio_stale_data(struct device * dev,struct device_attribute * attr,char * buf)3747 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
3748 {
3749 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3750 }
3751
cpu_show_retbleed(struct device * dev,struct device_attribute * attr,char * buf)3752 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3753 {
3754 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3755 }
3756
cpu_show_spec_rstack_overflow(struct device * dev,struct device_attribute * attr,char * buf)3757 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3758 {
3759 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3760 }
3761
cpu_show_gds(struct device * dev,struct device_attribute * attr,char * buf)3762 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3763 {
3764 return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3765 }
3766
cpu_show_reg_file_data_sampling(struct device * dev,struct device_attribute * attr,char * buf)3767 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3768 {
3769 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3770 }
3771
cpu_show_old_microcode(struct device * dev,struct device_attribute * attr,char * buf)3772 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
3773 {
3774 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
3775 }
3776
cpu_show_indirect_target_selection(struct device * dev,struct device_attribute * attr,char * buf)3777 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
3778 {
3779 return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
3780 }
3781
cpu_show_tsa(struct device * dev,struct device_attribute * attr,char * buf)3782 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
3783 {
3784 return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
3785 }
3786
cpu_show_vmscape(struct device * dev,struct device_attribute * attr,char * buf)3787 ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
3788 {
3789 return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
3790 }
3791 #endif
3792
__warn_thunk(void)3793 void __warn_thunk(void)
3794 {
3795 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3796 }
3797