1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19 #include <linux/kvm_types.h>
20
21 #include <asm/spec-ctrl.h>
22 #include <asm/cmdline.h>
23 #include <asm/bugs.h>
24 #include <asm/processor.h>
25 #include <asm/processor-flags.h>
26 #include <asm/fpu/api.h>
27 #include <asm/msr.h>
28 #include <asm/vmx.h>
29 #include <asm/paravirt.h>
30 #include <asm/cpu_device_id.h>
31 #include <asm/e820/api.h>
32 #include <asm/hypervisor.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cpu.h>
35
36 #include "cpu.h"
37
38 /*
39 * Speculation Vulnerability Handling
40 *
41 * Each vulnerability is handled with the following functions:
42 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
43 * take into account all relevant command line
44 * options.
45 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
46 * selected a mitigation, in case the selection
47 * may want to change based on other choices
48 * made. This function is optional.
49 * <vuln>_apply_mitigation() -- Enable the selected mitigation.
50 *
51 * The compile-time mitigation in all cases should be AUTO. An explicit
52 * command-line option can override AUTO. If no such option is
53 * provided, <vuln>_select_mitigation() will override AUTO to the best
54 * mitigation option.
55 */
56
57 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
58 u64 x86_spec_ctrl_base;
59
60 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
61 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
62 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
63
64 /*
65 * Set when the CPU has run a potentially malicious guest. An IBPB will
66 * be needed to before running userspace. That IBPB will flush the branch
67 * predictor content.
68 */
69 DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
70 EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
71
72 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
73
74 static u64 __ro_after_init x86_arch_cap_msr;
75
76 static DEFINE_MUTEX(spec_ctrl_mutex);
77
78 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
79
set_return_thunk(void * thunk)80 static void __init set_return_thunk(void *thunk)
81 {
82 x86_return_thunk = thunk;
83
84 pr_info("active return thunk: %ps\n", thunk);
85 }
86
87 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
update_spec_ctrl(u64 val)88 static void update_spec_ctrl(u64 val)
89 {
90 this_cpu_write(x86_spec_ctrl_current, val);
91 wrmsrq(MSR_IA32_SPEC_CTRL, val);
92 }
93
94 /*
95 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
96 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
97 */
update_spec_ctrl_cond(u64 val)98 void update_spec_ctrl_cond(u64 val)
99 {
100 if (this_cpu_read(x86_spec_ctrl_current) == val)
101 return;
102
103 this_cpu_write(x86_spec_ctrl_current, val);
104
105 /*
106 * When KERNEL_IBRS this MSR is written on return-to-user, unless
107 * forced the update can be delayed until that time.
108 */
109 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
110 wrmsrq(MSR_IA32_SPEC_CTRL, val);
111 }
112
spec_ctrl_current(void)113 noinstr u64 spec_ctrl_current(void)
114 {
115 return this_cpu_read(x86_spec_ctrl_current);
116 }
117 EXPORT_SYMBOL_GPL(spec_ctrl_current);
118
119 /*
120 * AMD specific MSR info for Speculative Store Bypass control.
121 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
122 */
123 u64 __ro_after_init x86_amd_ls_cfg_base;
124 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
125
126 /* Control conditional STIBP in switch_to() */
127 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
128 /* Control conditional IBPB in switch_mm() */
129 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
130 /* Control unconditional IBPB in switch_mm() */
131 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
132
133 /* Control IBPB on vCPU load */
134 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
135 EXPORT_SYMBOL_FOR_KVM(switch_vcpu_ibpb);
136
137 /* Control CPU buffer clear before idling (halt, mwait) */
138 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
139 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
140
141 /*
142 * Controls whether l1d flush based mitigations are enabled,
143 * based on hw features and admin setting via boot parameter
144 * defaults to false
145 */
146 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
147
148 #undef pr_fmt
149 #define pr_fmt(fmt) "mitigations: " fmt
150
cpu_print_attack_vectors(void)151 static void __init cpu_print_attack_vectors(void)
152 {
153 pr_info("Enabled attack vectors: ");
154
155 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
156 pr_cont("user_kernel, ");
157
158 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER))
159 pr_cont("user_user, ");
160
161 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST))
162 pr_cont("guest_host, ");
163
164 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST))
165 pr_cont("guest_guest, ");
166
167 pr_cont("SMT mitigations: ");
168
169 switch (smt_mitigations) {
170 case SMT_MITIGATIONS_OFF:
171 pr_cont("off\n");
172 break;
173 case SMT_MITIGATIONS_AUTO:
174 pr_cont("auto\n");
175 break;
176 case SMT_MITIGATIONS_ON:
177 pr_cont("on\n");
178 }
179 }
180
181 /*
182 * NOTE: This function is *only* called for SVM, since Intel uses
183 * MSR_IA32_SPEC_CTRL for SSBD.
184 */
185 void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl,bool setguest)186 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
187 {
188 u64 guestval, hostval;
189 struct thread_info *ti = current_thread_info();
190
191 /*
192 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
193 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
194 */
195 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
196 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
197 return;
198
199 /*
200 * If the host has SSBD mitigation enabled, force it in the host's
201 * virtual MSR value. If its not permanently enabled, evaluate
202 * current's TIF_SSBD thread flag.
203 */
204 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
205 hostval = SPEC_CTRL_SSBD;
206 else
207 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
208
209 /* Sanitize the guest value */
210 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
211
212 if (hostval != guestval) {
213 unsigned long tif;
214
215 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
216 ssbd_spec_ctrl_to_tif(hostval);
217
218 speculation_ctrl_update(tif);
219 }
220 }
221 EXPORT_SYMBOL_FOR_KVM(x86_virt_spec_ctrl);
222
x86_amd_ssb_disable(void)223 static void x86_amd_ssb_disable(void)
224 {
225 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
226
227 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
228 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
229 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
230 wrmsrq(MSR_AMD64_LS_CFG, msrval);
231 }
232
233 #undef pr_fmt
234 #define pr_fmt(fmt) "MDS: " fmt
235
236 /*
237 * Returns true if vulnerability should be mitigated based on the
238 * selected attack vector controls.
239 *
240 * See Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
241 */
should_mitigate_vuln(unsigned int bug)242 static bool __init should_mitigate_vuln(unsigned int bug)
243 {
244 switch (bug) {
245 /*
246 * The only runtime-selected spectre_v1 mitigations in the kernel are
247 * related to SWAPGS protection on kernel entry. Therefore, protection
248 * is only required for the user->kernel attack vector.
249 */
250 case X86_BUG_SPECTRE_V1:
251 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL);
252
253 case X86_BUG_SPECTRE_V2:
254 case X86_BUG_RETBLEED:
255 case X86_BUG_L1TF:
256 case X86_BUG_ITS:
257 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
258 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
259
260 case X86_BUG_SPECTRE_V2_USER:
261 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
262 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
263
264 /*
265 * All the vulnerabilities below allow potentially leaking data
266 * across address spaces. Therefore, mitigation is required for
267 * any of these 4 attack vectors.
268 */
269 case X86_BUG_MDS:
270 case X86_BUG_TAA:
271 case X86_BUG_MMIO_STALE_DATA:
272 case X86_BUG_RFDS:
273 case X86_BUG_SRBDS:
274 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
275 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
276 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
277 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST);
278
279 case X86_BUG_GDS:
280 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
281 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
282 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
283 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
284 (smt_mitigations != SMT_MITIGATIONS_OFF);
285
286 case X86_BUG_SPEC_STORE_BYPASS:
287 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
288
289 case X86_BUG_VMSCAPE:
290 return cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST);
291
292 default:
293 WARN(1, "Unknown bug %x\n", bug);
294 return false;
295 }
296 }
297
298 /* Default mitigation for MDS-affected CPUs */
299 static enum mds_mitigations mds_mitigation __ro_after_init =
300 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
301 static bool mds_nosmt __ro_after_init = false;
302
303 static const char * const mds_strings[] = {
304 [MDS_MITIGATION_OFF] = "Vulnerable",
305 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
306 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
307 };
308
309 enum taa_mitigations {
310 TAA_MITIGATION_OFF,
311 TAA_MITIGATION_AUTO,
312 TAA_MITIGATION_UCODE_NEEDED,
313 TAA_MITIGATION_VERW,
314 TAA_MITIGATION_TSX_DISABLED,
315 };
316
317 /* Default mitigation for TAA-affected CPUs */
318 static enum taa_mitigations taa_mitigation __ro_after_init =
319 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
320
321 enum mmio_mitigations {
322 MMIO_MITIGATION_OFF,
323 MMIO_MITIGATION_AUTO,
324 MMIO_MITIGATION_UCODE_NEEDED,
325 MMIO_MITIGATION_VERW,
326 };
327
328 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
329 static enum mmio_mitigations mmio_mitigation __ro_after_init =
330 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
331
332 enum rfds_mitigations {
333 RFDS_MITIGATION_OFF,
334 RFDS_MITIGATION_AUTO,
335 RFDS_MITIGATION_VERW,
336 RFDS_MITIGATION_UCODE_NEEDED,
337 };
338
339 /* Default mitigation for Register File Data Sampling */
340 static enum rfds_mitigations rfds_mitigation __ro_after_init =
341 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
342
343 /*
344 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing on exit to
345 * userspace *and* on entry to KVM guests.
346 */
347 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
348
mds_select_mitigation(void)349 static void __init mds_select_mitigation(void)
350 {
351 if (!boot_cpu_has_bug(X86_BUG_MDS)) {
352 mds_mitigation = MDS_MITIGATION_OFF;
353 return;
354 }
355
356 if (mds_mitigation == MDS_MITIGATION_AUTO) {
357 if (should_mitigate_vuln(X86_BUG_MDS))
358 mds_mitigation = MDS_MITIGATION_FULL;
359 else
360 mds_mitigation = MDS_MITIGATION_OFF;
361 }
362
363 if (mds_mitigation == MDS_MITIGATION_OFF)
364 return;
365
366 verw_clear_cpu_buf_mitigation_selected = true;
367 }
368
mds_update_mitigation(void)369 static void __init mds_update_mitigation(void)
370 {
371 if (!boot_cpu_has_bug(X86_BUG_MDS))
372 return;
373
374 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
375 if (verw_clear_cpu_buf_mitigation_selected)
376 mds_mitigation = MDS_MITIGATION_FULL;
377
378 if (mds_mitigation == MDS_MITIGATION_FULL) {
379 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
380 mds_mitigation = MDS_MITIGATION_VMWERV;
381 }
382
383 pr_info("%s\n", mds_strings[mds_mitigation]);
384 }
385
mds_apply_mitigation(void)386 static void __init mds_apply_mitigation(void)
387 {
388 if (mds_mitigation == MDS_MITIGATION_FULL ||
389 mds_mitigation == MDS_MITIGATION_VMWERV) {
390 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
391 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
392 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
393 (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
394 cpu_smt_disable(false);
395 }
396 }
397
mds_cmdline(char * str)398 static int __init mds_cmdline(char *str)
399 {
400 if (!boot_cpu_has_bug(X86_BUG_MDS))
401 return 0;
402
403 if (!str)
404 return -EINVAL;
405
406 if (!strcmp(str, "off"))
407 mds_mitigation = MDS_MITIGATION_OFF;
408 else if (!strcmp(str, "full"))
409 mds_mitigation = MDS_MITIGATION_FULL;
410 else if (!strcmp(str, "full,nosmt")) {
411 mds_mitigation = MDS_MITIGATION_FULL;
412 mds_nosmt = true;
413 }
414
415 return 0;
416 }
417 early_param("mds", mds_cmdline);
418
419 #undef pr_fmt
420 #define pr_fmt(fmt) "TAA: " fmt
421
422 static bool taa_nosmt __ro_after_init;
423
424 static const char * const taa_strings[] = {
425 [TAA_MITIGATION_OFF] = "Vulnerable",
426 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
427 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
428 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
429 };
430
taa_vulnerable(void)431 static bool __init taa_vulnerable(void)
432 {
433 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
434 }
435
taa_select_mitigation(void)436 static void __init taa_select_mitigation(void)
437 {
438 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
439 taa_mitigation = TAA_MITIGATION_OFF;
440 return;
441 }
442
443 /* TSX previously disabled by tsx=off */
444 if (!boot_cpu_has(X86_FEATURE_RTM)) {
445 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
446 return;
447 }
448
449 /* Microcode will be checked in taa_update_mitigation(). */
450 if (taa_mitigation == TAA_MITIGATION_AUTO) {
451 if (should_mitigate_vuln(X86_BUG_TAA))
452 taa_mitigation = TAA_MITIGATION_VERW;
453 else
454 taa_mitigation = TAA_MITIGATION_OFF;
455 }
456
457 if (taa_mitigation != TAA_MITIGATION_OFF)
458 verw_clear_cpu_buf_mitigation_selected = true;
459 }
460
taa_update_mitigation(void)461 static void __init taa_update_mitigation(void)
462 {
463 if (!taa_vulnerable())
464 return;
465
466 if (verw_clear_cpu_buf_mitigation_selected)
467 taa_mitigation = TAA_MITIGATION_VERW;
468
469 if (taa_mitigation == TAA_MITIGATION_VERW) {
470 /* Check if the requisite ucode is available. */
471 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
472 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
473
474 /*
475 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
476 * A microcode update fixes this behavior to clear CPU buffers. It also
477 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
478 * ARCH_CAP_TSX_CTRL_MSR bit.
479 *
480 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
481 * update is required.
482 */
483 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
484 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
485 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
486 }
487
488 pr_info("%s\n", taa_strings[taa_mitigation]);
489 }
490
taa_apply_mitigation(void)491 static void __init taa_apply_mitigation(void)
492 {
493 if (taa_mitigation == TAA_MITIGATION_VERW ||
494 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
495 /*
496 * TSX is enabled, select alternate mitigation for TAA which is
497 * the same as MDS. Enable MDS static branch to clear CPU buffers.
498 *
499 * For guests that can't determine whether the correct microcode is
500 * present on host, enable the mitigation for UCODE_NEEDED as well.
501 */
502 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
503 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
504
505 if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
506 cpu_smt_disable(false);
507 }
508 }
509
tsx_async_abort_parse_cmdline(char * str)510 static int __init tsx_async_abort_parse_cmdline(char *str)
511 {
512 if (!boot_cpu_has_bug(X86_BUG_TAA))
513 return 0;
514
515 if (!str)
516 return -EINVAL;
517
518 if (!strcmp(str, "off")) {
519 taa_mitigation = TAA_MITIGATION_OFF;
520 } else if (!strcmp(str, "full")) {
521 taa_mitigation = TAA_MITIGATION_VERW;
522 } else if (!strcmp(str, "full,nosmt")) {
523 taa_mitigation = TAA_MITIGATION_VERW;
524 taa_nosmt = true;
525 }
526
527 return 0;
528 }
529 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
530
531 #undef pr_fmt
532 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
533
534 static bool mmio_nosmt __ro_after_init = false;
535
536 static const char * const mmio_strings[] = {
537 [MMIO_MITIGATION_OFF] = "Vulnerable",
538 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
539 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
540 };
541
mmio_select_mitigation(void)542 static void __init mmio_select_mitigation(void)
543 {
544 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
545 mmio_mitigation = MMIO_MITIGATION_OFF;
546 return;
547 }
548
549 /* Microcode will be checked in mmio_update_mitigation(). */
550 if (mmio_mitigation == MMIO_MITIGATION_AUTO) {
551 if (should_mitigate_vuln(X86_BUG_MMIO_STALE_DATA))
552 mmio_mitigation = MMIO_MITIGATION_VERW;
553 else
554 mmio_mitigation = MMIO_MITIGATION_OFF;
555 }
556
557 if (mmio_mitigation == MMIO_MITIGATION_OFF)
558 return;
559
560 /*
561 * Enable CPU buffer clear mitigation for host and VMM, if also affected
562 * by MDS or TAA.
563 */
564 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
565 verw_clear_cpu_buf_mitigation_selected = true;
566 }
567
mmio_update_mitigation(void)568 static void __init mmio_update_mitigation(void)
569 {
570 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
571 return;
572
573 if (verw_clear_cpu_buf_mitigation_selected)
574 mmio_mitigation = MMIO_MITIGATION_VERW;
575
576 if (mmio_mitigation == MMIO_MITIGATION_VERW) {
577 /*
578 * Check if the system has the right microcode.
579 *
580 * CPU Fill buffer clear mitigation is enumerated by either an explicit
581 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
582 * affected systems.
583 */
584 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
585 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
586 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
587 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
588 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
589 }
590
591 pr_info("%s\n", mmio_strings[mmio_mitigation]);
592 }
593
mmio_apply_mitigation(void)594 static void __init mmio_apply_mitigation(void)
595 {
596 if (mmio_mitigation == MMIO_MITIGATION_OFF)
597 return;
598
599 /*
600 * Only enable the VMM mitigation if the CPU buffer clear mitigation is
601 * not being used.
602 */
603 if (verw_clear_cpu_buf_mitigation_selected) {
604 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
605 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
606 } else {
607 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO);
608 }
609
610 /*
611 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
612 * be propagated to uncore buffers, clearing the Fill buffers on idle
613 * is required irrespective of SMT state.
614 */
615 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
616 static_branch_enable(&cpu_buf_idle_clear);
617
618 if (mmio_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)
619 cpu_smt_disable(false);
620 }
621
mmio_stale_data_parse_cmdline(char * str)622 static int __init mmio_stale_data_parse_cmdline(char *str)
623 {
624 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
625 return 0;
626
627 if (!str)
628 return -EINVAL;
629
630 if (!strcmp(str, "off")) {
631 mmio_mitigation = MMIO_MITIGATION_OFF;
632 } else if (!strcmp(str, "full")) {
633 mmio_mitigation = MMIO_MITIGATION_VERW;
634 } else if (!strcmp(str, "full,nosmt")) {
635 mmio_mitigation = MMIO_MITIGATION_VERW;
636 mmio_nosmt = true;
637 }
638
639 return 0;
640 }
641 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
642
643 #undef pr_fmt
644 #define pr_fmt(fmt) "Register File Data Sampling: " fmt
645
646 static const char * const rfds_strings[] = {
647 [RFDS_MITIGATION_OFF] = "Vulnerable",
648 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
649 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
650 };
651
verw_clears_cpu_reg_file(void)652 static inline bool __init verw_clears_cpu_reg_file(void)
653 {
654 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
655 }
656
rfds_select_mitigation(void)657 static void __init rfds_select_mitigation(void)
658 {
659 if (!boot_cpu_has_bug(X86_BUG_RFDS)) {
660 rfds_mitigation = RFDS_MITIGATION_OFF;
661 return;
662 }
663
664 if (rfds_mitigation == RFDS_MITIGATION_AUTO) {
665 if (should_mitigate_vuln(X86_BUG_RFDS))
666 rfds_mitigation = RFDS_MITIGATION_VERW;
667 else
668 rfds_mitigation = RFDS_MITIGATION_OFF;
669 }
670
671 if (rfds_mitigation == RFDS_MITIGATION_OFF)
672 return;
673
674 if (verw_clears_cpu_reg_file())
675 verw_clear_cpu_buf_mitigation_selected = true;
676 }
677
rfds_update_mitigation(void)678 static void __init rfds_update_mitigation(void)
679 {
680 if (!boot_cpu_has_bug(X86_BUG_RFDS))
681 return;
682
683 if (verw_clear_cpu_buf_mitigation_selected)
684 rfds_mitigation = RFDS_MITIGATION_VERW;
685
686 if (rfds_mitigation == RFDS_MITIGATION_VERW) {
687 if (!verw_clears_cpu_reg_file())
688 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
689 }
690
691 pr_info("%s\n", rfds_strings[rfds_mitigation]);
692 }
693
rfds_apply_mitigation(void)694 static void __init rfds_apply_mitigation(void)
695 {
696 if (rfds_mitigation == RFDS_MITIGATION_VERW) {
697 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
698 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
699 }
700 }
701
rfds_parse_cmdline(char * str)702 static __init int rfds_parse_cmdline(char *str)
703 {
704 if (!str)
705 return -EINVAL;
706
707 if (!boot_cpu_has_bug(X86_BUG_RFDS))
708 return 0;
709
710 if (!strcmp(str, "off"))
711 rfds_mitigation = RFDS_MITIGATION_OFF;
712 else if (!strcmp(str, "on"))
713 rfds_mitigation = RFDS_MITIGATION_VERW;
714
715 return 0;
716 }
717 early_param("reg_file_data_sampling", rfds_parse_cmdline);
718
719 #undef pr_fmt
720 #define pr_fmt(fmt) "SRBDS: " fmt
721
722 enum srbds_mitigations {
723 SRBDS_MITIGATION_OFF,
724 SRBDS_MITIGATION_AUTO,
725 SRBDS_MITIGATION_UCODE_NEEDED,
726 SRBDS_MITIGATION_FULL,
727 SRBDS_MITIGATION_TSX_OFF,
728 SRBDS_MITIGATION_HYPERVISOR,
729 };
730
731 static enum srbds_mitigations srbds_mitigation __ro_after_init =
732 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
733
734 static const char * const srbds_strings[] = {
735 [SRBDS_MITIGATION_OFF] = "Vulnerable",
736 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
737 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
738 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
739 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
740 };
741
742 static bool srbds_off;
743
update_srbds_msr(void)744 void update_srbds_msr(void)
745 {
746 u64 mcu_ctrl;
747
748 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
749 return;
750
751 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
752 return;
753
754 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
755 return;
756
757 /*
758 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
759 * being disabled and it hasn't received the SRBDS MSR microcode.
760 */
761 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
762 return;
763
764 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
765
766 switch (srbds_mitigation) {
767 case SRBDS_MITIGATION_OFF:
768 case SRBDS_MITIGATION_TSX_OFF:
769 mcu_ctrl |= RNGDS_MITG_DIS;
770 break;
771 case SRBDS_MITIGATION_FULL:
772 mcu_ctrl &= ~RNGDS_MITG_DIS;
773 break;
774 default:
775 break;
776 }
777
778 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
779 }
780
srbds_select_mitigation(void)781 static void __init srbds_select_mitigation(void)
782 {
783 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) {
784 srbds_mitigation = SRBDS_MITIGATION_OFF;
785 return;
786 }
787
788 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) {
789 if (should_mitigate_vuln(X86_BUG_SRBDS))
790 srbds_mitigation = SRBDS_MITIGATION_FULL;
791 else {
792 srbds_mitigation = SRBDS_MITIGATION_OFF;
793 return;
794 }
795 }
796
797 /*
798 * Check to see if this is one of the MDS_NO systems supporting TSX that
799 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
800 * by Processor MMIO Stale Data vulnerability.
801 */
802 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
803 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
804 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
805 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
806 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
807 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
808 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
809 else if (srbds_off)
810 srbds_mitigation = SRBDS_MITIGATION_OFF;
811
812 pr_info("%s\n", srbds_strings[srbds_mitigation]);
813 }
814
srbds_apply_mitigation(void)815 static void __init srbds_apply_mitigation(void)
816 {
817 update_srbds_msr();
818 }
819
srbds_parse_cmdline(char * str)820 static int __init srbds_parse_cmdline(char *str)
821 {
822 if (!str)
823 return -EINVAL;
824
825 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
826 return 0;
827
828 srbds_off = !strcmp(str, "off");
829 return 0;
830 }
831 early_param("srbds", srbds_parse_cmdline);
832
833 #undef pr_fmt
834 #define pr_fmt(fmt) "L1D Flush : " fmt
835
836 enum l1d_flush_mitigations {
837 L1D_FLUSH_OFF = 0,
838 L1D_FLUSH_ON,
839 };
840
841 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
842
l1d_flush_select_mitigation(void)843 static void __init l1d_flush_select_mitigation(void)
844 {
845 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
846 return;
847
848 static_branch_enable(&switch_mm_cond_l1d_flush);
849 pr_info("Conditional flush on switch_mm() enabled\n");
850 }
851
l1d_flush_parse_cmdline(char * str)852 static int __init l1d_flush_parse_cmdline(char *str)
853 {
854 if (!strcmp(str, "on"))
855 l1d_flush_mitigation = L1D_FLUSH_ON;
856
857 return 0;
858 }
859 early_param("l1d_flush", l1d_flush_parse_cmdline);
860
861 #undef pr_fmt
862 #define pr_fmt(fmt) "GDS: " fmt
863
864 enum gds_mitigations {
865 GDS_MITIGATION_OFF,
866 GDS_MITIGATION_AUTO,
867 GDS_MITIGATION_UCODE_NEEDED,
868 GDS_MITIGATION_FORCE,
869 GDS_MITIGATION_FULL,
870 GDS_MITIGATION_FULL_LOCKED,
871 GDS_MITIGATION_HYPERVISOR,
872 };
873
874 static enum gds_mitigations gds_mitigation __ro_after_init =
875 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
876
877 static const char * const gds_strings[] = {
878 [GDS_MITIGATION_OFF] = "Vulnerable",
879 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
880 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
881 [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
882 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
883 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
884 };
885
gds_ucode_mitigated(void)886 bool gds_ucode_mitigated(void)
887 {
888 return (gds_mitigation == GDS_MITIGATION_FULL ||
889 gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
890 }
891 EXPORT_SYMBOL_FOR_KVM(gds_ucode_mitigated);
892
update_gds_msr(void)893 void update_gds_msr(void)
894 {
895 u64 mcu_ctrl_after;
896 u64 mcu_ctrl;
897
898 switch (gds_mitigation) {
899 case GDS_MITIGATION_OFF:
900 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
901 mcu_ctrl |= GDS_MITG_DIS;
902 break;
903 case GDS_MITIGATION_FULL_LOCKED:
904 /*
905 * The LOCKED state comes from the boot CPU. APs might not have
906 * the same state. Make sure the mitigation is enabled on all
907 * CPUs.
908 */
909 case GDS_MITIGATION_FULL:
910 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
911 mcu_ctrl &= ~GDS_MITG_DIS;
912 break;
913 case GDS_MITIGATION_FORCE:
914 case GDS_MITIGATION_UCODE_NEEDED:
915 case GDS_MITIGATION_HYPERVISOR:
916 case GDS_MITIGATION_AUTO:
917 return;
918 }
919
920 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
921
922 /*
923 * Check to make sure that the WRMSR value was not ignored. Writes to
924 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
925 * processor was not.
926 */
927 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
928 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
929 }
930
gds_select_mitigation(void)931 static void __init gds_select_mitigation(void)
932 {
933 u64 mcu_ctrl;
934
935 if (!boot_cpu_has_bug(X86_BUG_GDS))
936 return;
937
938 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
939 gds_mitigation = GDS_MITIGATION_HYPERVISOR;
940 return;
941 }
942
943 /* Will verify below that mitigation _can_ be disabled */
944 if (gds_mitigation == GDS_MITIGATION_AUTO) {
945 if (should_mitigate_vuln(X86_BUG_GDS))
946 gds_mitigation = GDS_MITIGATION_FULL;
947 else
948 gds_mitigation = GDS_MITIGATION_OFF;
949 }
950
951 /* No microcode */
952 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
953 if (gds_mitigation != GDS_MITIGATION_FORCE)
954 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
955 return;
956 }
957
958 /* Microcode has mitigation, use it */
959 if (gds_mitigation == GDS_MITIGATION_FORCE)
960 gds_mitigation = GDS_MITIGATION_FULL;
961
962 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
963 if (mcu_ctrl & GDS_MITG_LOCKED) {
964 if (gds_mitigation == GDS_MITIGATION_OFF)
965 pr_warn("Mitigation locked. Disable failed.\n");
966
967 /*
968 * The mitigation is selected from the boot CPU. All other CPUs
969 * _should_ have the same state. If the boot CPU isn't locked
970 * but others are then update_gds_msr() will WARN() of the state
971 * mismatch. If the boot CPU is locked update_gds_msr() will
972 * ensure the other CPUs have the mitigation enabled.
973 */
974 gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
975 }
976 }
977
gds_apply_mitigation(void)978 static void __init gds_apply_mitigation(void)
979 {
980 if (!boot_cpu_has_bug(X86_BUG_GDS))
981 return;
982
983 /* Microcode is present */
984 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
985 update_gds_msr();
986 else if (gds_mitigation == GDS_MITIGATION_FORCE) {
987 /*
988 * This only needs to be done on the boot CPU so do it
989 * here rather than in update_gds_msr()
990 */
991 setup_clear_cpu_cap(X86_FEATURE_AVX);
992 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
993 }
994
995 pr_info("%s\n", gds_strings[gds_mitigation]);
996 }
997
gds_parse_cmdline(char * str)998 static int __init gds_parse_cmdline(char *str)
999 {
1000 if (!str)
1001 return -EINVAL;
1002
1003 if (!boot_cpu_has_bug(X86_BUG_GDS))
1004 return 0;
1005
1006 if (!strcmp(str, "off"))
1007 gds_mitigation = GDS_MITIGATION_OFF;
1008 else if (!strcmp(str, "force"))
1009 gds_mitigation = GDS_MITIGATION_FORCE;
1010
1011 return 0;
1012 }
1013 early_param("gather_data_sampling", gds_parse_cmdline);
1014
1015 #undef pr_fmt
1016 #define pr_fmt(fmt) "Spectre V1 : " fmt
1017
1018 enum spectre_v1_mitigation {
1019 SPECTRE_V1_MITIGATION_NONE,
1020 SPECTRE_V1_MITIGATION_AUTO,
1021 };
1022
1023 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
1024 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
1025 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
1026
1027 static const char * const spectre_v1_strings[] = {
1028 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
1029 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
1030 };
1031
1032 /*
1033 * Does SMAP provide full mitigation against speculative kernel access to
1034 * userspace?
1035 */
smap_works_speculatively(void)1036 static bool smap_works_speculatively(void)
1037 {
1038 if (!boot_cpu_has(X86_FEATURE_SMAP))
1039 return false;
1040
1041 /*
1042 * On CPUs which are vulnerable to Meltdown, SMAP does not
1043 * prevent speculative access to user data in the L1 cache.
1044 * Consider SMAP to be non-functional as a mitigation on these
1045 * CPUs.
1046 */
1047 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
1048 return false;
1049
1050 return true;
1051 }
1052
spectre_v1_select_mitigation(void)1053 static void __init spectre_v1_select_mitigation(void)
1054 {
1055 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1056 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1057
1058 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V1))
1059 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1060 }
1061
spectre_v1_apply_mitigation(void)1062 static void __init spectre_v1_apply_mitigation(void)
1063 {
1064 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
1065 return;
1066
1067 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
1068 /*
1069 * With Spectre v1, a user can speculatively control either
1070 * path of a conditional swapgs with a user-controlled GS
1071 * value. The mitigation is to add lfences to both code paths.
1072 *
1073 * If FSGSBASE is enabled, the user can put a kernel address in
1074 * GS, in which case SMAP provides no protection.
1075 *
1076 * If FSGSBASE is disabled, the user can only put a user space
1077 * address in GS. That makes an attack harder, but still
1078 * possible if there's no SMAP protection.
1079 */
1080 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
1081 !smap_works_speculatively()) {
1082 /*
1083 * Mitigation can be provided from SWAPGS itself or
1084 * PTI as the CR3 write in the Meltdown mitigation
1085 * is serializing.
1086 *
1087 * If neither is there, mitigate with an LFENCE to
1088 * stop speculation through swapgs.
1089 */
1090 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
1091 !boot_cpu_has(X86_FEATURE_PTI))
1092 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
1093
1094 /*
1095 * Enable lfences in the kernel entry (non-swapgs)
1096 * paths, to prevent user entry from speculatively
1097 * skipping swapgs.
1098 */
1099 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
1100 }
1101 }
1102
1103 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1104 }
1105
nospectre_v1_cmdline(char * str)1106 static int __init nospectre_v1_cmdline(char *str)
1107 {
1108 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1109 return 0;
1110 }
1111 early_param("nospectre_v1", nospectre_v1_cmdline);
1112
1113 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
1114
1115 /* Depends on spectre_v2 mitigation selected already */
cdt_possible(enum spectre_v2_mitigation mode)1116 static inline bool cdt_possible(enum spectre_v2_mitigation mode)
1117 {
1118 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) ||
1119 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE))
1120 return false;
1121
1122 if (mode == SPECTRE_V2_RETPOLINE ||
1123 mode == SPECTRE_V2_EIBRS_RETPOLINE)
1124 return true;
1125
1126 return false;
1127 }
1128
1129 #undef pr_fmt
1130 #define pr_fmt(fmt) "RETBleed: " fmt
1131
1132 enum its_mitigation {
1133 ITS_MITIGATION_OFF,
1134 ITS_MITIGATION_AUTO,
1135 ITS_MITIGATION_VMEXIT_ONLY,
1136 ITS_MITIGATION_ALIGNED_THUNKS,
1137 ITS_MITIGATION_RETPOLINE_STUFF,
1138 };
1139
1140 static enum its_mitigation its_mitigation __ro_after_init =
1141 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
1142
1143 enum retbleed_mitigation {
1144 RETBLEED_MITIGATION_NONE,
1145 RETBLEED_MITIGATION_AUTO,
1146 RETBLEED_MITIGATION_UNRET,
1147 RETBLEED_MITIGATION_IBPB,
1148 RETBLEED_MITIGATION_IBRS,
1149 RETBLEED_MITIGATION_EIBRS,
1150 RETBLEED_MITIGATION_STUFF,
1151 };
1152
1153 static const char * const retbleed_strings[] = {
1154 [RETBLEED_MITIGATION_NONE] = "Vulnerable",
1155 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
1156 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
1157 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
1158 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
1159 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
1160 };
1161
1162 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
1163 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
1164
1165 static int __ro_after_init retbleed_nosmt = false;
1166
1167 enum srso_mitigation {
1168 SRSO_MITIGATION_NONE,
1169 SRSO_MITIGATION_AUTO,
1170 SRSO_MITIGATION_UCODE_NEEDED,
1171 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
1172 SRSO_MITIGATION_MICROCODE,
1173 SRSO_MITIGATION_NOSMT,
1174 SRSO_MITIGATION_SAFE_RET,
1175 SRSO_MITIGATION_IBPB,
1176 SRSO_MITIGATION_IBPB_ON_VMEXIT,
1177 SRSO_MITIGATION_BP_SPEC_REDUCE,
1178 };
1179
1180 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
1181
retbleed_parse_cmdline(char * str)1182 static int __init retbleed_parse_cmdline(char *str)
1183 {
1184 if (!str)
1185 return -EINVAL;
1186
1187 while (str) {
1188 char *next = strchr(str, ',');
1189 if (next) {
1190 *next = 0;
1191 next++;
1192 }
1193
1194 if (!strcmp(str, "off")) {
1195 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1196 } else if (!strcmp(str, "auto")) {
1197 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1198 } else if (!strcmp(str, "unret")) {
1199 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1200 } else if (!strcmp(str, "ibpb")) {
1201 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1202 } else if (!strcmp(str, "stuff")) {
1203 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1204 } else if (!strcmp(str, "nosmt")) {
1205 retbleed_nosmt = true;
1206 } else if (!strcmp(str, "force")) {
1207 setup_force_cpu_bug(X86_BUG_RETBLEED);
1208 } else {
1209 pr_err("Ignoring unknown retbleed option (%s).", str);
1210 }
1211
1212 str = next;
1213 }
1214
1215 return 0;
1216 }
1217 early_param("retbleed", retbleed_parse_cmdline);
1218
1219 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1220 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1221
retbleed_select_mitigation(void)1222 static void __init retbleed_select_mitigation(void)
1223 {
1224 if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) {
1225 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1226 return;
1227 }
1228
1229 switch (retbleed_mitigation) {
1230 case RETBLEED_MITIGATION_UNRET:
1231 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1232 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1233 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1234 }
1235 break;
1236 case RETBLEED_MITIGATION_IBPB:
1237 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1238 pr_err("WARNING: CPU does not support IBPB.\n");
1239 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1240 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1241 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1242 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1243 }
1244 break;
1245 case RETBLEED_MITIGATION_STUFF:
1246 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1247 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1248 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1249 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1250 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
1251 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1252 }
1253 break;
1254 default:
1255 break;
1256 }
1257
1258 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
1259 return;
1260
1261 if (!should_mitigate_vuln(X86_BUG_RETBLEED)) {
1262 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1263 return;
1264 }
1265
1266 /* Intel mitigation selected in retbleed_update_mitigation() */
1267 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1268 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1269 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1270 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1271 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1272 boot_cpu_has(X86_FEATURE_IBPB))
1273 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1274 else
1275 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1276 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1277 /* Final mitigation depends on spectre-v2 selection */
1278 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1279 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1280 else if (boot_cpu_has(X86_FEATURE_IBRS))
1281 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1282 else
1283 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1284 }
1285 }
1286
retbleed_update_mitigation(void)1287 static void __init retbleed_update_mitigation(void)
1288 {
1289 if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
1290 return;
1291
1292 /* ITS can also enable stuffing */
1293 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF)
1294 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1295
1296 /* If SRSO is using IBPB, that works for retbleed too */
1297 if (srso_mitigation == SRSO_MITIGATION_IBPB)
1298 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1299
1300 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF &&
1301 !cdt_possible(spectre_v2_enabled)) {
1302 pr_err("WARNING: retbleed=stuff depends on retpoline\n");
1303 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1304 }
1305
1306 /*
1307 * Let IBRS trump all on Intel without affecting the effects of the
1308 * retbleed= cmdline option except for call depth based stuffing
1309 */
1310 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1311 switch (spectre_v2_enabled) {
1312 case SPECTRE_V2_IBRS:
1313 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1314 break;
1315 case SPECTRE_V2_EIBRS:
1316 case SPECTRE_V2_EIBRS_RETPOLINE:
1317 case SPECTRE_V2_EIBRS_LFENCE:
1318 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1319 break;
1320 default:
1321 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
1322 if (retbleed_mitigation != RETBLEED_MITIGATION_NONE)
1323 pr_err(RETBLEED_INTEL_MSG);
1324
1325 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1326 }
1327 }
1328 }
1329
1330 pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1331 }
1332
retbleed_apply_mitigation(void)1333 static void __init retbleed_apply_mitigation(void)
1334 {
1335 bool mitigate_smt = false;
1336
1337 switch (retbleed_mitigation) {
1338 case RETBLEED_MITIGATION_NONE:
1339 return;
1340
1341 case RETBLEED_MITIGATION_UNRET:
1342 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1343 setup_force_cpu_cap(X86_FEATURE_UNRET);
1344
1345 set_return_thunk(retbleed_return_thunk);
1346
1347 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1348 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1349 pr_err(RETBLEED_UNTRAIN_MSG);
1350
1351 mitigate_smt = true;
1352 break;
1353
1354 case RETBLEED_MITIGATION_IBPB:
1355 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1356 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1357 mitigate_smt = true;
1358
1359 /*
1360 * IBPB on entry already obviates the need for
1361 * software-based untraining so clear those in case some
1362 * other mitigation like SRSO has selected them.
1363 */
1364 setup_clear_cpu_cap(X86_FEATURE_UNRET);
1365 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1366
1367 /*
1368 * There is no need for RSB filling: write_ibpb() ensures
1369 * all predictions, including the RSB, are invalidated,
1370 * regardless of IBPB implementation.
1371 */
1372 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1373
1374 break;
1375
1376 case RETBLEED_MITIGATION_STUFF:
1377 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1378 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1379
1380 set_return_thunk(call_depth_return_thunk);
1381 break;
1382
1383 default:
1384 break;
1385 }
1386
1387 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1388 (retbleed_nosmt || smt_mitigations == SMT_MITIGATIONS_ON))
1389 cpu_smt_disable(false);
1390 }
1391
1392 #undef pr_fmt
1393 #define pr_fmt(fmt) "ITS: " fmt
1394
1395 static const char * const its_strings[] = {
1396 [ITS_MITIGATION_OFF] = "Vulnerable",
1397 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
1398 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
1399 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
1400 };
1401
its_parse_cmdline(char * str)1402 static int __init its_parse_cmdline(char *str)
1403 {
1404 if (!str)
1405 return -EINVAL;
1406
1407 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
1408 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
1409 return 0;
1410 }
1411
1412 if (!strcmp(str, "off")) {
1413 its_mitigation = ITS_MITIGATION_OFF;
1414 } else if (!strcmp(str, "on")) {
1415 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1416 } else if (!strcmp(str, "force")) {
1417 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1418 setup_force_cpu_bug(X86_BUG_ITS);
1419 } else if (!strcmp(str, "vmexit")) {
1420 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
1421 } else if (!strcmp(str, "stuff")) {
1422 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1423 } else {
1424 pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
1425 }
1426
1427 return 0;
1428 }
1429 early_param("indirect_target_selection", its_parse_cmdline);
1430
its_select_mitigation(void)1431 static void __init its_select_mitigation(void)
1432 {
1433 if (!boot_cpu_has_bug(X86_BUG_ITS)) {
1434 its_mitigation = ITS_MITIGATION_OFF;
1435 return;
1436 }
1437
1438 if (its_mitigation == ITS_MITIGATION_AUTO) {
1439 if (should_mitigate_vuln(X86_BUG_ITS))
1440 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1441 else
1442 its_mitigation = ITS_MITIGATION_OFF;
1443 }
1444
1445 if (its_mitigation == ITS_MITIGATION_OFF)
1446 return;
1447
1448 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
1449 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
1450 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
1451 its_mitigation = ITS_MITIGATION_OFF;
1452 return;
1453 }
1454
1455 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
1456 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
1457 its_mitigation = ITS_MITIGATION_OFF;
1458 return;
1459 }
1460
1461 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1462 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1463 pr_err("RSB stuff mitigation not supported, using default\n");
1464 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1465 }
1466
1467 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
1468 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
1469 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1470 }
1471
its_update_mitigation(void)1472 static void __init its_update_mitigation(void)
1473 {
1474 if (!boot_cpu_has_bug(X86_BUG_ITS))
1475 return;
1476
1477 switch (spectre_v2_enabled) {
1478 case SPECTRE_V2_NONE:
1479 if (its_mitigation != ITS_MITIGATION_OFF)
1480 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
1481 its_mitigation = ITS_MITIGATION_OFF;
1482 break;
1483 case SPECTRE_V2_RETPOLINE:
1484 case SPECTRE_V2_EIBRS_RETPOLINE:
1485 /* Retpoline+CDT mitigates ITS */
1486 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
1487 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1488 break;
1489 case SPECTRE_V2_LFENCE:
1490 case SPECTRE_V2_EIBRS_LFENCE:
1491 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
1492 its_mitigation = ITS_MITIGATION_OFF;
1493 break;
1494 default:
1495 break;
1496 }
1497
1498 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1499 !cdt_possible(spectre_v2_enabled))
1500 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1501
1502 pr_info("%s\n", its_strings[its_mitigation]);
1503 }
1504
its_apply_mitigation(void)1505 static void __init its_apply_mitigation(void)
1506 {
1507 switch (its_mitigation) {
1508 case ITS_MITIGATION_OFF:
1509 case ITS_MITIGATION_AUTO:
1510 case ITS_MITIGATION_VMEXIT_ONLY:
1511 break;
1512 case ITS_MITIGATION_ALIGNED_THUNKS:
1513 if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
1514 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
1515
1516 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1517 set_return_thunk(its_return_thunk);
1518 break;
1519 case ITS_MITIGATION_RETPOLINE_STUFF:
1520 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1521 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1522 set_return_thunk(call_depth_return_thunk);
1523 break;
1524 }
1525 }
1526
1527 #undef pr_fmt
1528 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
1529
1530 enum tsa_mitigations {
1531 TSA_MITIGATION_NONE,
1532 TSA_MITIGATION_AUTO,
1533 TSA_MITIGATION_UCODE_NEEDED,
1534 TSA_MITIGATION_USER_KERNEL,
1535 TSA_MITIGATION_VM,
1536 TSA_MITIGATION_FULL,
1537 };
1538
1539 static const char * const tsa_strings[] = {
1540 [TSA_MITIGATION_NONE] = "Vulnerable",
1541 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1542 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
1543 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
1544 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
1545 };
1546
1547 static enum tsa_mitigations tsa_mitigation __ro_after_init =
1548 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
1549
tsa_parse_cmdline(char * str)1550 static int __init tsa_parse_cmdline(char *str)
1551 {
1552 if (!str)
1553 return -EINVAL;
1554
1555 if (!strcmp(str, "off"))
1556 tsa_mitigation = TSA_MITIGATION_NONE;
1557 else if (!strcmp(str, "on"))
1558 tsa_mitigation = TSA_MITIGATION_FULL;
1559 else if (!strcmp(str, "user"))
1560 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1561 else if (!strcmp(str, "vm"))
1562 tsa_mitigation = TSA_MITIGATION_VM;
1563 else
1564 pr_err("Ignoring unknown tsa=%s option.\n", str);
1565
1566 return 0;
1567 }
1568 early_param("tsa", tsa_parse_cmdline);
1569
tsa_select_mitigation(void)1570 static void __init tsa_select_mitigation(void)
1571 {
1572 if (!boot_cpu_has_bug(X86_BUG_TSA)) {
1573 tsa_mitigation = TSA_MITIGATION_NONE;
1574 return;
1575 }
1576
1577 if (tsa_mitigation == TSA_MITIGATION_AUTO) {
1578 bool vm = false, uk = false;
1579
1580 tsa_mitigation = TSA_MITIGATION_NONE;
1581
1582 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
1583 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER)) {
1584 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1585 uk = true;
1586 }
1587
1588 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
1589 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
1590 tsa_mitigation = TSA_MITIGATION_VM;
1591 vm = true;
1592 }
1593
1594 if (uk && vm)
1595 tsa_mitigation = TSA_MITIGATION_FULL;
1596 }
1597
1598 if (tsa_mitigation == TSA_MITIGATION_NONE)
1599 return;
1600
1601 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
1602 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
1603
1604 /*
1605 * No need to set verw_clear_cpu_buf_mitigation_selected - it
1606 * doesn't fit all cases here and it is not needed because this
1607 * is the only VERW-based mitigation on AMD.
1608 */
1609 pr_info("%s\n", tsa_strings[tsa_mitigation]);
1610 }
1611
tsa_apply_mitigation(void)1612 static void __init tsa_apply_mitigation(void)
1613 {
1614 switch (tsa_mitigation) {
1615 case TSA_MITIGATION_USER_KERNEL:
1616 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1617 break;
1618 case TSA_MITIGATION_VM:
1619 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1620 break;
1621 case TSA_MITIGATION_FULL:
1622 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1623 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1624 break;
1625 default:
1626 break;
1627 }
1628 }
1629
1630 #undef pr_fmt
1631 #define pr_fmt(fmt) "Spectre V2 : " fmt
1632
1633 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1634 SPECTRE_V2_USER_NONE;
1635 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1636 SPECTRE_V2_USER_NONE;
1637
1638 #ifdef CONFIG_MITIGATION_RETPOLINE
1639 static bool spectre_v2_bad_module;
1640
retpoline_module_ok(bool has_retpoline)1641 bool retpoline_module_ok(bool has_retpoline)
1642 {
1643 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1644 return true;
1645
1646 pr_err("System may be vulnerable to spectre v2\n");
1647 spectre_v2_bad_module = true;
1648 return false;
1649 }
1650
spectre_v2_module_string(void)1651 static inline const char *spectre_v2_module_string(void)
1652 {
1653 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1654 }
1655 #else
spectre_v2_module_string(void)1656 static inline const char *spectre_v2_module_string(void) { return ""; }
1657 #endif
1658
1659 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1660 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1661 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1662 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1663
1664 #ifdef CONFIG_BPF_SYSCALL
unpriv_ebpf_notify(int new_state)1665 void unpriv_ebpf_notify(int new_state)
1666 {
1667 if (new_state)
1668 return;
1669
1670 /* Unprivileged eBPF is enabled */
1671
1672 switch (spectre_v2_enabled) {
1673 case SPECTRE_V2_EIBRS:
1674 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1675 break;
1676 case SPECTRE_V2_EIBRS_LFENCE:
1677 if (sched_smt_active())
1678 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1679 break;
1680 default:
1681 break;
1682 }
1683 }
1684 #endif
1685
1686 /* The kernel command line selection for spectre v2 */
1687 enum spectre_v2_mitigation_cmd {
1688 SPECTRE_V2_CMD_NONE,
1689 SPECTRE_V2_CMD_AUTO,
1690 SPECTRE_V2_CMD_FORCE,
1691 SPECTRE_V2_CMD_RETPOLINE,
1692 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1693 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1694 SPECTRE_V2_CMD_EIBRS,
1695 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1696 SPECTRE_V2_CMD_EIBRS_LFENCE,
1697 SPECTRE_V2_CMD_IBRS,
1698 };
1699
1700 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init =
1701 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
1702
1703 enum spectre_v2_user_mitigation_cmd {
1704 SPECTRE_V2_USER_CMD_NONE,
1705 SPECTRE_V2_USER_CMD_AUTO,
1706 SPECTRE_V2_USER_CMD_FORCE,
1707 SPECTRE_V2_USER_CMD_PRCTL,
1708 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1709 SPECTRE_V2_USER_CMD_SECCOMP,
1710 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1711 };
1712
1713 static enum spectre_v2_user_mitigation_cmd spectre_v2_user_cmd __ro_after_init =
1714 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
1715
1716 static const char * const spectre_v2_user_strings[] = {
1717 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
1718 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
1719 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
1720 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
1721 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
1722 };
1723
spectre_v2_user_parse_cmdline(char * str)1724 static int __init spectre_v2_user_parse_cmdline(char *str)
1725 {
1726 if (!str)
1727 return -EINVAL;
1728
1729 if (!strcmp(str, "auto"))
1730 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_AUTO;
1731 else if (!strcmp(str, "off"))
1732 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_NONE;
1733 else if (!strcmp(str, "on"))
1734 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_FORCE;
1735 else if (!strcmp(str, "prctl"))
1736 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL;
1737 else if (!strcmp(str, "prctl,ibpb"))
1738 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_PRCTL_IBPB;
1739 else if (!strcmp(str, "seccomp"))
1740 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP;
1741 else if (!strcmp(str, "seccomp,ibpb"))
1742 spectre_v2_user_cmd = SPECTRE_V2_USER_CMD_SECCOMP_IBPB;
1743 else
1744 pr_err("Ignoring unknown spectre_v2_user option (%s).", str);
1745
1746 return 0;
1747 }
1748 early_param("spectre_v2_user", spectre_v2_user_parse_cmdline);
1749
spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)1750 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1751 {
1752 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1753 }
1754
spectre_v2_user_select_mitigation(void)1755 static void __init spectre_v2_user_select_mitigation(void)
1756 {
1757 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1758 return;
1759
1760 switch (spectre_v2_user_cmd) {
1761 case SPECTRE_V2_USER_CMD_NONE:
1762 return;
1763 case SPECTRE_V2_USER_CMD_FORCE:
1764 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1765 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1766 break;
1767 case SPECTRE_V2_USER_CMD_AUTO:
1768 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2_USER))
1769 break;
1770 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1771 if (smt_mitigations == SMT_MITIGATIONS_OFF)
1772 break;
1773 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1774 break;
1775 case SPECTRE_V2_USER_CMD_PRCTL:
1776 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1777 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1778 break;
1779 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1780 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1781 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1782 break;
1783 case SPECTRE_V2_USER_CMD_SECCOMP:
1784 if (IS_ENABLED(CONFIG_SECCOMP))
1785 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
1786 else
1787 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1788 spectre_v2_user_stibp = spectre_v2_user_ibpb;
1789 break;
1790 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1791 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1792 if (IS_ENABLED(CONFIG_SECCOMP))
1793 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
1794 else
1795 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1796 break;
1797 }
1798
1799 /*
1800 * At this point, an STIBP mode other than "off" has been set.
1801 * If STIBP support is not being forced, check if STIBP always-on
1802 * is preferred.
1803 */
1804 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1805 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
1806 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1807 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1808
1809 if (!boot_cpu_has(X86_FEATURE_IBPB))
1810 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1811
1812 if (!boot_cpu_has(X86_FEATURE_STIBP))
1813 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1814 }
1815
spectre_v2_user_update_mitigation(void)1816 static void __init spectre_v2_user_update_mitigation(void)
1817 {
1818 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1819 return;
1820
1821 /* The spectre_v2 cmd line can override spectre_v2_user options */
1822 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
1823 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1824 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1825 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
1826 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1827 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1828 }
1829
1830 /*
1831 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1832 * is not required.
1833 *
1834 * Intel's Enhanced IBRS also protects against cross-thread branch target
1835 * injection in user-mode as the IBRS bit remains always set which
1836 * implicitly enables cross-thread protections. However, in legacy IBRS
1837 * mode, the IBRS bit is set only on kernel entry and cleared on return
1838 * to userspace. AMD Automatic IBRS also does not protect userspace.
1839 * These modes therefore disable the implicit cross-thread protection,
1840 * so allow for STIBP to be selected in those cases.
1841 */
1842 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1843 !cpu_smt_possible() ||
1844 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1845 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
1846 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1847 return;
1848 }
1849
1850 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
1851 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1852 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
1853 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
1854 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
1855 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1856 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1857 }
1858 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
1859 }
1860
spectre_v2_user_apply_mitigation(void)1861 static void __init spectre_v2_user_apply_mitigation(void)
1862 {
1863 /* Initialize Indirect Branch Prediction Barrier */
1864 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
1865 static_branch_enable(&switch_vcpu_ibpb);
1866
1867 switch (spectre_v2_user_ibpb) {
1868 case SPECTRE_V2_USER_STRICT:
1869 static_branch_enable(&switch_mm_always_ibpb);
1870 break;
1871 case SPECTRE_V2_USER_PRCTL:
1872 case SPECTRE_V2_USER_SECCOMP:
1873 static_branch_enable(&switch_mm_cond_ibpb);
1874 break;
1875 default:
1876 break;
1877 }
1878
1879 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1880 static_key_enabled(&switch_mm_always_ibpb) ?
1881 "always-on" : "conditional");
1882 }
1883 }
1884
1885 static const char * const spectre_v2_strings[] = {
1886 [SPECTRE_V2_NONE] = "Vulnerable",
1887 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
1888 [SPECTRE_V2_LFENCE] = "Vulnerable: LFENCE",
1889 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
1890 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1891 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1892 [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
1893 };
1894
1895 static bool nospectre_v2 __ro_after_init;
1896
nospectre_v2_parse_cmdline(char * str)1897 static int __init nospectre_v2_parse_cmdline(char *str)
1898 {
1899 nospectre_v2 = true;
1900 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
1901 return 0;
1902 }
1903 early_param("nospectre_v2", nospectre_v2_parse_cmdline);
1904
spectre_v2_parse_cmdline(char * str)1905 static int __init spectre_v2_parse_cmdline(char *str)
1906 {
1907 if (!str)
1908 return -EINVAL;
1909
1910 if (nospectre_v2)
1911 return 0;
1912
1913 if (!strcmp(str, "off")) {
1914 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
1915 } else if (!strcmp(str, "on")) {
1916 spectre_v2_cmd = SPECTRE_V2_CMD_FORCE;
1917 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1918 setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER);
1919 } else if (!strcmp(str, "retpoline")) {
1920 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE;
1921 } else if (!strcmp(str, "retpoline,amd") ||
1922 !strcmp(str, "retpoline,lfence")) {
1923 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_LFENCE;
1924 } else if (!strcmp(str, "retpoline,generic")) {
1925 spectre_v2_cmd = SPECTRE_V2_CMD_RETPOLINE_GENERIC;
1926 } else if (!strcmp(str, "eibrs")) {
1927 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS;
1928 } else if (!strcmp(str, "eibrs,lfence")) {
1929 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_LFENCE;
1930 } else if (!strcmp(str, "eibrs,retpoline")) {
1931 spectre_v2_cmd = SPECTRE_V2_CMD_EIBRS_RETPOLINE;
1932 } else if (!strcmp(str, "auto")) {
1933 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
1934 } else if (!strcmp(str, "ibrs")) {
1935 spectre_v2_cmd = SPECTRE_V2_CMD_IBRS;
1936 } else {
1937 pr_err("Ignoring unknown spectre_v2 option (%s).", str);
1938 }
1939
1940 return 0;
1941 }
1942 early_param("spectre_v2", spectre_v2_parse_cmdline);
1943
spectre_v2_select_retpoline(void)1944 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1945 {
1946 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1947 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1948 return SPECTRE_V2_NONE;
1949 }
1950
1951 return SPECTRE_V2_RETPOLINE;
1952 }
1953
1954 static bool __ro_after_init rrsba_disabled;
1955
1956 /* Disable in-kernel use of non-RSB RET predictors */
spec_ctrl_disable_kernel_rrsba(void)1957 static void __init spec_ctrl_disable_kernel_rrsba(void)
1958 {
1959 if (rrsba_disabled)
1960 return;
1961
1962 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
1963 rrsba_disabled = true;
1964 return;
1965 }
1966
1967 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1968 return;
1969
1970 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1971 update_spec_ctrl(x86_spec_ctrl_base);
1972 rrsba_disabled = true;
1973 }
1974
spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)1975 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
1976 {
1977 /*
1978 * WARNING! There are many subtleties to consider when changing *any*
1979 * code related to RSB-related mitigations. Before doing so, carefully
1980 * read the following document, and update if necessary:
1981 *
1982 * Documentation/admin-guide/hw-vuln/rsb.rst
1983 *
1984 * In an overly simplified nutshell:
1985 *
1986 * - User->user RSB attacks are conditionally mitigated during
1987 * context switches by cond_mitigation -> write_ibpb().
1988 *
1989 * - User->kernel and guest->host attacks are mitigated by eIBRS or
1990 * RSB filling.
1991 *
1992 * Though, depending on config, note that other alternative
1993 * mitigations may end up getting used instead, e.g., IBPB on
1994 * entry/vmexit, call depth tracking, or return thunks.
1995 */
1996
1997 switch (mode) {
1998 case SPECTRE_V2_NONE:
1999 break;
2000
2001 case SPECTRE_V2_EIBRS:
2002 case SPECTRE_V2_EIBRS_LFENCE:
2003 case SPECTRE_V2_EIBRS_RETPOLINE:
2004 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2005 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
2006 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
2007 }
2008 break;
2009
2010 case SPECTRE_V2_RETPOLINE:
2011 case SPECTRE_V2_LFENCE:
2012 case SPECTRE_V2_IBRS:
2013 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
2014 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
2015 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
2016 break;
2017
2018 default:
2019 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
2020 dump_stack();
2021 break;
2022 }
2023 }
2024
2025 /*
2026 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
2027 * branch history in userspace. Not needed if BHI_NO is set.
2028 */
spec_ctrl_bhi_dis(void)2029 static bool __init spec_ctrl_bhi_dis(void)
2030 {
2031 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
2032 return false;
2033
2034 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
2035 update_spec_ctrl(x86_spec_ctrl_base);
2036 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
2037
2038 return true;
2039 }
2040
2041 enum bhi_mitigations {
2042 BHI_MITIGATION_OFF,
2043 BHI_MITIGATION_AUTO,
2044 BHI_MITIGATION_ON,
2045 BHI_MITIGATION_VMEXIT_ONLY,
2046 };
2047
2048 static enum bhi_mitigations bhi_mitigation __ro_after_init =
2049 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
2050
spectre_bhi_parse_cmdline(char * str)2051 static int __init spectre_bhi_parse_cmdline(char *str)
2052 {
2053 if (!str)
2054 return -EINVAL;
2055
2056 if (!strcmp(str, "off"))
2057 bhi_mitigation = BHI_MITIGATION_OFF;
2058 else if (!strcmp(str, "on"))
2059 bhi_mitigation = BHI_MITIGATION_ON;
2060 else if (!strcmp(str, "vmexit"))
2061 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2062 else
2063 pr_err("Ignoring unknown spectre_bhi option (%s)", str);
2064
2065 return 0;
2066 }
2067 early_param("spectre_bhi", spectre_bhi_parse_cmdline);
2068
bhi_select_mitigation(void)2069 static void __init bhi_select_mitigation(void)
2070 {
2071 if (!boot_cpu_has(X86_BUG_BHI))
2072 bhi_mitigation = BHI_MITIGATION_OFF;
2073
2074 if (bhi_mitigation != BHI_MITIGATION_AUTO)
2075 return;
2076
2077 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST)) {
2078 if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL))
2079 bhi_mitigation = BHI_MITIGATION_ON;
2080 else
2081 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2082 } else {
2083 bhi_mitigation = BHI_MITIGATION_OFF;
2084 }
2085 }
2086
bhi_update_mitigation(void)2087 static void __init bhi_update_mitigation(void)
2088 {
2089 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
2090 bhi_mitigation = BHI_MITIGATION_OFF;
2091 }
2092
bhi_apply_mitigation(void)2093 static void __init bhi_apply_mitigation(void)
2094 {
2095 if (bhi_mitigation == BHI_MITIGATION_OFF)
2096 return;
2097
2098 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
2099 if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2100 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
2101 spec_ctrl_disable_kernel_rrsba();
2102 if (rrsba_disabled)
2103 return;
2104 }
2105
2106 if (!IS_ENABLED(CONFIG_X86_64))
2107 return;
2108
2109 /* Mitigate in hardware if supported */
2110 if (spec_ctrl_bhi_dis())
2111 return;
2112
2113 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
2114 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
2115 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2116 return;
2117 }
2118
2119 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
2120 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
2121 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2122 }
2123
spectre_v2_select_mitigation(void)2124 static void __init spectre_v2_select_mitigation(void)
2125 {
2126 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE ||
2127 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2128 spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
2129 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2130 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2131 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
2132 pr_err("RETPOLINE selected but not compiled in. Switching to AUTO select\n");
2133 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2134 }
2135
2136 if ((spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS ||
2137 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
2138 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
2139 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2140 pr_err("EIBRS selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n");
2141 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2142 }
2143
2144 if ((spectre_v2_cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
2145 spectre_v2_cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
2146 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
2147 pr_err("LFENCE selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n");
2148 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2149 }
2150
2151 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
2152 pr_err("IBRS selected but not compiled in. Switching to AUTO select\n");
2153 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2154 }
2155
2156 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
2157 pr_err("IBRS selected but not Intel CPU. Switching to AUTO select\n");
2158 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2159 }
2160
2161 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
2162 pr_err("IBRS selected but CPU doesn't have IBRS. Switching to AUTO select\n");
2163 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2164 }
2165
2166 if (spectre_v2_cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
2167 pr_err("IBRS selected but running as XenPV guest. Switching to AUTO select\n");
2168 spectre_v2_cmd = SPECTRE_V2_CMD_AUTO;
2169 }
2170
2171 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) {
2172 spectre_v2_cmd = SPECTRE_V2_CMD_NONE;
2173 return;
2174 }
2175
2176 switch (spectre_v2_cmd) {
2177 case SPECTRE_V2_CMD_NONE:
2178 return;
2179
2180 case SPECTRE_V2_CMD_AUTO:
2181 if (!should_mitigate_vuln(X86_BUG_SPECTRE_V2))
2182 break;
2183 fallthrough;
2184 case SPECTRE_V2_CMD_FORCE:
2185 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2186 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2187 break;
2188 }
2189
2190 spectre_v2_enabled = spectre_v2_select_retpoline();
2191 break;
2192
2193 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
2194 pr_err(SPECTRE_V2_LFENCE_MSG);
2195 spectre_v2_enabled = SPECTRE_V2_LFENCE;
2196 break;
2197
2198 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
2199 spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
2200 break;
2201
2202 case SPECTRE_V2_CMD_RETPOLINE:
2203 spectre_v2_enabled = spectre_v2_select_retpoline();
2204 break;
2205
2206 case SPECTRE_V2_CMD_IBRS:
2207 spectre_v2_enabled = SPECTRE_V2_IBRS;
2208 break;
2209
2210 case SPECTRE_V2_CMD_EIBRS:
2211 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2212 break;
2213
2214 case SPECTRE_V2_CMD_EIBRS_LFENCE:
2215 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
2216 break;
2217
2218 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
2219 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
2220 break;
2221 }
2222 }
2223
spectre_v2_update_mitigation(void)2224 static void __init spectre_v2_update_mitigation(void)
2225 {
2226 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
2227 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
2228 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
2229 boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2230 retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
2231 retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
2232 boot_cpu_has(X86_FEATURE_IBRS) &&
2233 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
2234 spectre_v2_enabled = SPECTRE_V2_IBRS;
2235 }
2236 }
2237
2238 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2239 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
2240 }
2241
spectre_v2_apply_mitigation(void)2242 static void __init spectre_v2_apply_mitigation(void)
2243 {
2244 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2245 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
2246
2247 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2248 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
2249 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
2250 } else {
2251 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
2252 update_spec_ctrl(x86_spec_ctrl_base);
2253 }
2254 }
2255
2256 switch (spectre_v2_enabled) {
2257 case SPECTRE_V2_NONE:
2258 return;
2259
2260 case SPECTRE_V2_EIBRS:
2261 break;
2262
2263 case SPECTRE_V2_IBRS:
2264 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
2265 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
2266 pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
2267 break;
2268
2269 case SPECTRE_V2_LFENCE:
2270 case SPECTRE_V2_EIBRS_LFENCE:
2271 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
2272 fallthrough;
2273
2274 case SPECTRE_V2_RETPOLINE:
2275 case SPECTRE_V2_EIBRS_RETPOLINE:
2276 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
2277 break;
2278 }
2279
2280 /*
2281 * Disable alternate RSB predictions in kernel when indirect CALLs and
2282 * JMPs gets protection against BHI and Intramode-BTI, but RET
2283 * prediction from a non-RSB predictor is still a risk.
2284 */
2285 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
2286 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
2287 spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
2288 spec_ctrl_disable_kernel_rrsba();
2289
2290 spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
2291
2292 /*
2293 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
2294 * and Enhanced IBRS protect firmware too, so enable IBRS around
2295 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
2296 * otherwise enabled.
2297 *
2298 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
2299 * boot_cpu_has(), because the user might select retpoline on the kernel
2300 * command line and if the CPU supports Enhanced IBRS, kernel might
2301 * un-intentionally not enable IBRS around firmware calls.
2302 */
2303 if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2304 boot_cpu_has(X86_FEATURE_IBPB) &&
2305 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2306 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
2307
2308 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
2309 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
2310 pr_info("Enabling Speculation Barrier for firmware calls\n");
2311 }
2312
2313 } else if (boot_cpu_has(X86_FEATURE_IBRS) &&
2314 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2315 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2316 pr_info("Enabling Restricted Speculation for firmware calls\n");
2317 }
2318 }
2319
update_stibp_msr(void * __unused)2320 static void update_stibp_msr(void * __unused)
2321 {
2322 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
2323 update_spec_ctrl(val);
2324 }
2325
2326 /* Update x86_spec_ctrl_base in case SMT state changed. */
update_stibp_strict(void)2327 static void update_stibp_strict(void)
2328 {
2329 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
2330
2331 if (sched_smt_active())
2332 mask |= SPEC_CTRL_STIBP;
2333
2334 if (mask == x86_spec_ctrl_base)
2335 return;
2336
2337 pr_info("Update user space SMT mitigation: STIBP %s\n",
2338 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
2339 x86_spec_ctrl_base = mask;
2340 on_each_cpu(update_stibp_msr, NULL, 1);
2341 }
2342
2343 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
update_indir_branch_cond(void)2344 static void update_indir_branch_cond(void)
2345 {
2346 if (sched_smt_active())
2347 static_branch_enable(&switch_to_cond_stibp);
2348 else
2349 static_branch_disable(&switch_to_cond_stibp);
2350 }
2351
2352 #undef pr_fmt
2353 #define pr_fmt(fmt) fmt
2354
2355 /* Update the static key controlling the MDS CPU buffer clear in idle */
update_mds_branch_idle(void)2356 static void update_mds_branch_idle(void)
2357 {
2358 /*
2359 * Enable the idle clearing if SMT is active on CPUs which are
2360 * affected only by MSBDS and not any other MDS variant.
2361 *
2362 * The other variants cannot be mitigated when SMT is enabled, so
2363 * clearing the buffers on idle just to prevent the Store Buffer
2364 * repartitioning leak would be a window dressing exercise.
2365 */
2366 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
2367 return;
2368
2369 if (sched_smt_active()) {
2370 static_branch_enable(&cpu_buf_idle_clear);
2371 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
2372 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2373 static_branch_disable(&cpu_buf_idle_clear);
2374 }
2375 }
2376
2377 #undef pr_fmt
2378 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
2379
2380 static enum ssb_mitigation ssb_mode __ro_after_init =
2381 IS_ENABLED(CONFIG_MITIGATION_SSB) ? SPEC_STORE_BYPASS_AUTO : SPEC_STORE_BYPASS_NONE;
2382
2383 static const char * const ssb_strings[] = {
2384 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
2385 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
2386 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
2387 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2388 };
2389
2390 static bool nossb __ro_after_init;
2391
nossb_parse_cmdline(char * str)2392 static int __init nossb_parse_cmdline(char *str)
2393 {
2394 nossb = true;
2395 ssb_mode = SPEC_STORE_BYPASS_NONE;
2396 return 0;
2397 }
2398 early_param("nospec_store_bypass_disable", nossb_parse_cmdline);
2399
ssb_parse_cmdline(char * str)2400 static int __init ssb_parse_cmdline(char *str)
2401 {
2402 if (!str)
2403 return -EINVAL;
2404
2405 if (nossb)
2406 return 0;
2407
2408 if (!strcmp(str, "auto"))
2409 ssb_mode = SPEC_STORE_BYPASS_AUTO;
2410 else if (!strcmp(str, "on"))
2411 ssb_mode = SPEC_STORE_BYPASS_DISABLE;
2412 else if (!strcmp(str, "off"))
2413 ssb_mode = SPEC_STORE_BYPASS_NONE;
2414 else if (!strcmp(str, "prctl"))
2415 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2416 else if (!strcmp(str, "seccomp"))
2417 ssb_mode = IS_ENABLED(CONFIG_SECCOMP) ?
2418 SPEC_STORE_BYPASS_SECCOMP : SPEC_STORE_BYPASS_PRCTL;
2419 else
2420 pr_err("Ignoring unknown spec_store_bypass_disable option (%s).\n",
2421 str);
2422
2423 return 0;
2424 }
2425 early_param("spec_store_bypass_disable", ssb_parse_cmdline);
2426
ssb_select_mitigation(void)2427 static void __init ssb_select_mitigation(void)
2428 {
2429 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) {
2430 ssb_mode = SPEC_STORE_BYPASS_NONE;
2431 return;
2432 }
2433
2434 if (ssb_mode == SPEC_STORE_BYPASS_AUTO) {
2435 if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
2436 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2437 else
2438 ssb_mode = SPEC_STORE_BYPASS_NONE;
2439 }
2440
2441 if (!boot_cpu_has(X86_FEATURE_SSBD))
2442 ssb_mode = SPEC_STORE_BYPASS_NONE;
2443
2444 pr_info("%s\n", ssb_strings[ssb_mode]);
2445 }
2446
ssb_apply_mitigation(void)2447 static void __init ssb_apply_mitigation(void)
2448 {
2449 /*
2450 * We have three CPU feature flags that are in play here:
2451 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2452 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2453 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2454 */
2455 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
2456 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2457 /*
2458 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2459 * use a completely different MSR and bit dependent on family.
2460 */
2461 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2462 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2463 x86_amd_ssb_disable();
2464 } else {
2465 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2466 update_spec_ctrl(x86_spec_ctrl_base);
2467 }
2468 }
2469 }
2470
2471 #undef pr_fmt
2472 #define pr_fmt(fmt) "Speculation prctl: " fmt
2473
task_update_spec_tif(struct task_struct * tsk)2474 static void task_update_spec_tif(struct task_struct *tsk)
2475 {
2476 /* Force the update of the real TIF bits */
2477 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2478
2479 /*
2480 * Immediately update the speculation control MSRs for the current
2481 * task, but for a non-current task delay setting the CPU
2482 * mitigation until it is scheduled next.
2483 *
2484 * This can only happen for SECCOMP mitigation. For PRCTL it's
2485 * always the current task.
2486 */
2487 if (tsk == current)
2488 speculation_ctrl_update_current();
2489 }
2490
l1d_flush_prctl_set(struct task_struct * task,unsigned long ctrl)2491 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2492 {
2493
2494 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2495 return -EPERM;
2496
2497 switch (ctrl) {
2498 case PR_SPEC_ENABLE:
2499 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2500 return 0;
2501 case PR_SPEC_DISABLE:
2502 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2503 return 0;
2504 default:
2505 return -ERANGE;
2506 }
2507 }
2508
ssb_prctl_set(struct task_struct * task,unsigned long ctrl)2509 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2510 {
2511 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2512 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2513 return -ENXIO;
2514
2515 switch (ctrl) {
2516 case PR_SPEC_ENABLE:
2517 /* If speculation is force disabled, enable is not allowed */
2518 if (task_spec_ssb_force_disable(task))
2519 return -EPERM;
2520 task_clear_spec_ssb_disable(task);
2521 task_clear_spec_ssb_noexec(task);
2522 task_update_spec_tif(task);
2523 break;
2524 case PR_SPEC_DISABLE:
2525 task_set_spec_ssb_disable(task);
2526 task_clear_spec_ssb_noexec(task);
2527 task_update_spec_tif(task);
2528 break;
2529 case PR_SPEC_FORCE_DISABLE:
2530 task_set_spec_ssb_disable(task);
2531 task_set_spec_ssb_force_disable(task);
2532 task_clear_spec_ssb_noexec(task);
2533 task_update_spec_tif(task);
2534 break;
2535 case PR_SPEC_DISABLE_NOEXEC:
2536 if (task_spec_ssb_force_disable(task))
2537 return -EPERM;
2538 task_set_spec_ssb_disable(task);
2539 task_set_spec_ssb_noexec(task);
2540 task_update_spec_tif(task);
2541 break;
2542 default:
2543 return -ERANGE;
2544 }
2545 return 0;
2546 }
2547
is_spec_ib_user_controlled(void)2548 static bool is_spec_ib_user_controlled(void)
2549 {
2550 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2551 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2552 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2553 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2554 }
2555
ib_prctl_set(struct task_struct * task,unsigned long ctrl)2556 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2557 {
2558 switch (ctrl) {
2559 case PR_SPEC_ENABLE:
2560 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2561 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2562 return 0;
2563
2564 /*
2565 * With strict mode for both IBPB and STIBP, the instruction
2566 * code paths avoid checking this task flag and instead,
2567 * unconditionally run the instruction. However, STIBP and IBPB
2568 * are independent and either can be set to conditionally
2569 * enabled regardless of the mode of the other.
2570 *
2571 * If either is set to conditional, allow the task flag to be
2572 * updated, unless it was force-disabled by a previous prctl
2573 * call. Currently, this is possible on an AMD CPU which has the
2574 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2575 * kernel is booted with 'spectre_v2_user=seccomp', then
2576 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2577 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2578 */
2579 if (!is_spec_ib_user_controlled() ||
2580 task_spec_ib_force_disable(task))
2581 return -EPERM;
2582
2583 task_clear_spec_ib_disable(task);
2584 task_update_spec_tif(task);
2585 break;
2586 case PR_SPEC_DISABLE:
2587 case PR_SPEC_FORCE_DISABLE:
2588 /*
2589 * Indirect branch speculation is always allowed when
2590 * mitigation is force disabled.
2591 */
2592 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2593 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2594 return -EPERM;
2595
2596 if (!is_spec_ib_user_controlled())
2597 return 0;
2598
2599 task_set_spec_ib_disable(task);
2600 if (ctrl == PR_SPEC_FORCE_DISABLE)
2601 task_set_spec_ib_force_disable(task);
2602 task_update_spec_tif(task);
2603 if (task == current)
2604 indirect_branch_prediction_barrier();
2605 break;
2606 default:
2607 return -ERANGE;
2608 }
2609 return 0;
2610 }
2611
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)2612 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2613 unsigned long ctrl)
2614 {
2615 switch (which) {
2616 case PR_SPEC_STORE_BYPASS:
2617 return ssb_prctl_set(task, ctrl);
2618 case PR_SPEC_INDIRECT_BRANCH:
2619 return ib_prctl_set(task, ctrl);
2620 case PR_SPEC_L1D_FLUSH:
2621 return l1d_flush_prctl_set(task, ctrl);
2622 default:
2623 return -ENODEV;
2624 }
2625 }
2626
2627 #ifdef CONFIG_SECCOMP
arch_seccomp_spec_mitigate(struct task_struct * task)2628 void arch_seccomp_spec_mitigate(struct task_struct *task)
2629 {
2630 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2631 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2632 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2633 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2634 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2635 }
2636 #endif
2637
l1d_flush_prctl_get(struct task_struct * task)2638 static int l1d_flush_prctl_get(struct task_struct *task)
2639 {
2640 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2641 return PR_SPEC_FORCE_DISABLE;
2642
2643 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2644 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2645 else
2646 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2647 }
2648
ssb_prctl_get(struct task_struct * task)2649 static int ssb_prctl_get(struct task_struct *task)
2650 {
2651 switch (ssb_mode) {
2652 case SPEC_STORE_BYPASS_NONE:
2653 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2654 return PR_SPEC_ENABLE;
2655 return PR_SPEC_NOT_AFFECTED;
2656 case SPEC_STORE_BYPASS_DISABLE:
2657 return PR_SPEC_DISABLE;
2658 case SPEC_STORE_BYPASS_SECCOMP:
2659 case SPEC_STORE_BYPASS_PRCTL:
2660 case SPEC_STORE_BYPASS_AUTO:
2661 if (task_spec_ssb_force_disable(task))
2662 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2663 if (task_spec_ssb_noexec(task))
2664 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2665 if (task_spec_ssb_disable(task))
2666 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2667 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2668 }
2669 BUG();
2670 }
2671
ib_prctl_get(struct task_struct * task)2672 static int ib_prctl_get(struct task_struct *task)
2673 {
2674 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2675 return PR_SPEC_NOT_AFFECTED;
2676
2677 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2678 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2679 return PR_SPEC_ENABLE;
2680 else if (is_spec_ib_user_controlled()) {
2681 if (task_spec_ib_force_disable(task))
2682 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2683 if (task_spec_ib_disable(task))
2684 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2685 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2686 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2687 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2688 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2689 return PR_SPEC_DISABLE;
2690 else
2691 return PR_SPEC_NOT_AFFECTED;
2692 }
2693
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)2694 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2695 {
2696 switch (which) {
2697 case PR_SPEC_STORE_BYPASS:
2698 return ssb_prctl_get(task);
2699 case PR_SPEC_INDIRECT_BRANCH:
2700 return ib_prctl_get(task);
2701 case PR_SPEC_L1D_FLUSH:
2702 return l1d_flush_prctl_get(task);
2703 default:
2704 return -ENODEV;
2705 }
2706 }
2707
x86_spec_ctrl_setup_ap(void)2708 void x86_spec_ctrl_setup_ap(void)
2709 {
2710 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2711 update_spec_ctrl(x86_spec_ctrl_base);
2712
2713 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2714 x86_amd_ssb_disable();
2715 }
2716
2717 bool itlb_multihit_kvm_mitigation;
2718 EXPORT_SYMBOL_FOR_KVM(itlb_multihit_kvm_mitigation);
2719
2720 #undef pr_fmt
2721 #define pr_fmt(fmt) "L1TF: " fmt
2722
2723 /* Default mitigation for L1TF-affected CPUs */
2724 enum l1tf_mitigations l1tf_mitigation __ro_after_init =
2725 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
2726 EXPORT_SYMBOL_FOR_KVM(l1tf_mitigation);
2727 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2728 EXPORT_SYMBOL_FOR_KVM(l1tf_vmx_mitigation);
2729
2730 /*
2731 * These CPUs all support 44bits physical address space internally in the
2732 * cache but CPUID can report a smaller number of physical address bits.
2733 *
2734 * The L1TF mitigation uses the top most address bit for the inversion of
2735 * non present PTEs. When the installed memory reaches into the top most
2736 * address bit due to memory holes, which has been observed on machines
2737 * which report 36bits physical address bits and have 32G RAM installed,
2738 * then the mitigation range check in l1tf_select_mitigation() triggers.
2739 * This is a false positive because the mitigation is still possible due to
2740 * the fact that the cache uses 44bit internally. Use the cache bits
2741 * instead of the reported physical bits and adjust them on the affected
2742 * machines to 44bit if the reported bits are less than 44.
2743 */
override_cache_bits(struct cpuinfo_x86 * c)2744 static void override_cache_bits(struct cpuinfo_x86 *c)
2745 {
2746 if (c->x86 != 6)
2747 return;
2748
2749 switch (c->x86_vfm) {
2750 case INTEL_NEHALEM:
2751 case INTEL_WESTMERE:
2752 case INTEL_SANDYBRIDGE:
2753 case INTEL_IVYBRIDGE:
2754 case INTEL_HASWELL:
2755 case INTEL_HASWELL_L:
2756 case INTEL_HASWELL_G:
2757 case INTEL_BROADWELL:
2758 case INTEL_BROADWELL_G:
2759 case INTEL_SKYLAKE_L:
2760 case INTEL_SKYLAKE:
2761 case INTEL_KABYLAKE_L:
2762 case INTEL_KABYLAKE:
2763 if (c->x86_cache_bits < 44)
2764 c->x86_cache_bits = 44;
2765 break;
2766 }
2767 }
2768
l1tf_select_mitigation(void)2769 static void __init l1tf_select_mitigation(void)
2770 {
2771 if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
2772 l1tf_mitigation = L1TF_MITIGATION_OFF;
2773 return;
2774 }
2775
2776 if (l1tf_mitigation != L1TF_MITIGATION_AUTO)
2777 return;
2778
2779 if (!should_mitigate_vuln(X86_BUG_L1TF)) {
2780 l1tf_mitigation = L1TF_MITIGATION_OFF;
2781 return;
2782 }
2783
2784 if (smt_mitigations == SMT_MITIGATIONS_ON)
2785 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2786 else
2787 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2788 }
2789
l1tf_apply_mitigation(void)2790 static void __init l1tf_apply_mitigation(void)
2791 {
2792 u64 half_pa;
2793
2794 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2795 return;
2796
2797 override_cache_bits(&boot_cpu_data);
2798
2799 switch (l1tf_mitigation) {
2800 case L1TF_MITIGATION_OFF:
2801 case L1TF_MITIGATION_FLUSH_NOWARN:
2802 case L1TF_MITIGATION_FLUSH:
2803 case L1TF_MITIGATION_AUTO:
2804 break;
2805 case L1TF_MITIGATION_FLUSH_NOSMT:
2806 case L1TF_MITIGATION_FULL:
2807 cpu_smt_disable(false);
2808 break;
2809 case L1TF_MITIGATION_FULL_FORCE:
2810 cpu_smt_disable(true);
2811 break;
2812 }
2813
2814 #if CONFIG_PGTABLE_LEVELS == 2
2815 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2816 return;
2817 #endif
2818
2819 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2820 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2821 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2822 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2823 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2824 half_pa);
2825 pr_info("However, doing so will make a part of your RAM unusable.\n");
2826 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2827 return;
2828 }
2829
2830 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2831 }
2832
l1tf_cmdline(char * str)2833 static int __init l1tf_cmdline(char *str)
2834 {
2835 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2836 return 0;
2837
2838 if (!str)
2839 return -EINVAL;
2840
2841 if (!strcmp(str, "off"))
2842 l1tf_mitigation = L1TF_MITIGATION_OFF;
2843 else if (!strcmp(str, "flush,nowarn"))
2844 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2845 else if (!strcmp(str, "flush"))
2846 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2847 else if (!strcmp(str, "flush,nosmt"))
2848 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2849 else if (!strcmp(str, "full"))
2850 l1tf_mitigation = L1TF_MITIGATION_FULL;
2851 else if (!strcmp(str, "full,force"))
2852 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2853
2854 return 0;
2855 }
2856 early_param("l1tf", l1tf_cmdline);
2857
2858 #undef pr_fmt
2859 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
2860
2861 static const char * const srso_strings[] = {
2862 [SRSO_MITIGATION_NONE] = "Vulnerable",
2863 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
2864 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
2865 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
2866 [SRSO_MITIGATION_NOSMT] = "Mitigation: SMT disabled",
2867 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
2868 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
2869 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
2870 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
2871 };
2872
srso_parse_cmdline(char * str)2873 static int __init srso_parse_cmdline(char *str)
2874 {
2875 if (!str)
2876 return -EINVAL;
2877
2878 if (!strcmp(str, "off"))
2879 srso_mitigation = SRSO_MITIGATION_NONE;
2880 else if (!strcmp(str, "microcode"))
2881 srso_mitigation = SRSO_MITIGATION_MICROCODE;
2882 else if (!strcmp(str, "safe-ret"))
2883 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2884 else if (!strcmp(str, "ibpb"))
2885 srso_mitigation = SRSO_MITIGATION_IBPB;
2886 else if (!strcmp(str, "ibpb-vmexit"))
2887 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2888 else
2889 pr_err("Ignoring unknown SRSO option (%s).", str);
2890
2891 return 0;
2892 }
2893 early_param("spec_rstack_overflow", srso_parse_cmdline);
2894
2895 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2896
srso_select_mitigation(void)2897 static void __init srso_select_mitigation(void)
2898 {
2899 if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
2900 srso_mitigation = SRSO_MITIGATION_NONE;
2901 return;
2902 }
2903
2904 if (srso_mitigation == SRSO_MITIGATION_AUTO) {
2905 /*
2906 * Use safe-RET if user->kernel or guest->host protection is
2907 * required. Otherwise the 'microcode' mitigation is sufficient
2908 * to protect the user->user and guest->guest vectors.
2909 */
2910 if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
2911 (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&
2912 !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {
2913 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2914 } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
2915 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
2916 srso_mitigation = SRSO_MITIGATION_MICROCODE;
2917 } else {
2918 srso_mitigation = SRSO_MITIGATION_NONE;
2919 return;
2920 }
2921 }
2922
2923 /* Zen1/2 with SMT off aren't vulnerable to SRSO. */
2924 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
2925 srso_mitigation = SRSO_MITIGATION_NOSMT;
2926 return;
2927 }
2928
2929 if (!boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) {
2930 pr_warn("IBPB-extending microcode not applied!\n");
2931 pr_warn(SRSO_NOTICE);
2932
2933 /*
2934 * Safe-RET provides partial mitigation without microcode, but
2935 * other mitigations require microcode to provide any
2936 * mitigations.
2937 */
2938 if (srso_mitigation == SRSO_MITIGATION_SAFE_RET)
2939 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
2940 else
2941 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
2942 }
2943
2944 switch (srso_mitigation) {
2945 case SRSO_MITIGATION_SAFE_RET:
2946 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
2947 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
2948 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2949 goto ibpb_on_vmexit;
2950 }
2951
2952 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
2953 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2954 srso_mitigation = SRSO_MITIGATION_NONE;
2955 }
2956 break;
2957 ibpb_on_vmexit:
2958 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
2959 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
2960 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
2961 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
2962 break;
2963 }
2964 fallthrough;
2965 case SRSO_MITIGATION_IBPB:
2966 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
2967 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
2968 srso_mitigation = SRSO_MITIGATION_NONE;
2969 }
2970 break;
2971 default:
2972 break;
2973 }
2974 }
2975
srso_update_mitigation(void)2976 static void __init srso_update_mitigation(void)
2977 {
2978 if (!boot_cpu_has_bug(X86_BUG_SRSO))
2979 return;
2980
2981 /* If retbleed is using IBPB, that works for SRSO as well */
2982 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
2983 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
2984 srso_mitigation = SRSO_MITIGATION_IBPB;
2985
2986 pr_info("%s\n", srso_strings[srso_mitigation]);
2987 }
2988
srso_apply_mitigation(void)2989 static void __init srso_apply_mitigation(void)
2990 {
2991 /*
2992 * Clear the feature flag if this mitigation is not selected as that
2993 * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
2994 */
2995 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
2996 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
2997
2998 if (srso_mitigation == SRSO_MITIGATION_NONE) {
2999 if (boot_cpu_has(X86_FEATURE_SBPB))
3000 x86_pred_cmd = PRED_CMD_SBPB;
3001 return;
3002 }
3003
3004 switch (srso_mitigation) {
3005 case SRSO_MITIGATION_SAFE_RET:
3006 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3007 /*
3008 * Enable the return thunk for generated code
3009 * like ftrace, static_call, etc.
3010 */
3011 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
3012 setup_force_cpu_cap(X86_FEATURE_UNRET);
3013
3014 if (boot_cpu_data.x86 == 0x19) {
3015 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
3016 set_return_thunk(srso_alias_return_thunk);
3017 } else {
3018 setup_force_cpu_cap(X86_FEATURE_SRSO);
3019 set_return_thunk(srso_return_thunk);
3020 }
3021 break;
3022 case SRSO_MITIGATION_IBPB:
3023 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
3024 /*
3025 * IBPB on entry already obviates the need for
3026 * software-based untraining so clear those in case some
3027 * other mitigation like Retbleed has selected them.
3028 */
3029 setup_clear_cpu_cap(X86_FEATURE_UNRET);
3030 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
3031 fallthrough;
3032 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3033 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
3034 /*
3035 * There is no need for RSB filling: entry_ibpb() ensures
3036 * all predictions, including the RSB, are invalidated,
3037 * regardless of IBPB implementation.
3038 */
3039 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
3040 break;
3041 default:
3042 break;
3043 }
3044 }
3045
3046 #undef pr_fmt
3047 #define pr_fmt(fmt) "VMSCAPE: " fmt
3048
3049 enum vmscape_mitigations {
3050 VMSCAPE_MITIGATION_NONE,
3051 VMSCAPE_MITIGATION_AUTO,
3052 VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
3053 VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
3054 };
3055
3056 static const char * const vmscape_strings[] = {
3057 [VMSCAPE_MITIGATION_NONE] = "Vulnerable",
3058 /* [VMSCAPE_MITIGATION_AUTO] */
3059 [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace",
3060 [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT",
3061 };
3062
3063 static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
3064 IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
3065
vmscape_parse_cmdline(char * str)3066 static int __init vmscape_parse_cmdline(char *str)
3067 {
3068 if (!str)
3069 return -EINVAL;
3070
3071 if (!strcmp(str, "off")) {
3072 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3073 } else if (!strcmp(str, "ibpb")) {
3074 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3075 } else if (!strcmp(str, "force")) {
3076 setup_force_cpu_bug(X86_BUG_VMSCAPE);
3077 vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
3078 } else {
3079 pr_err("Ignoring unknown vmscape=%s option.\n", str);
3080 }
3081
3082 return 0;
3083 }
3084 early_param("vmscape", vmscape_parse_cmdline);
3085
vmscape_select_mitigation(void)3086 static void __init vmscape_select_mitigation(void)
3087 {
3088 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
3089 !boot_cpu_has(X86_FEATURE_IBPB)) {
3090 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3091 return;
3092 }
3093
3094 if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO) {
3095 if (should_mitigate_vuln(X86_BUG_VMSCAPE))
3096 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
3097 else
3098 vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
3099 }
3100 }
3101
vmscape_update_mitigation(void)3102 static void __init vmscape_update_mitigation(void)
3103 {
3104 if (!boot_cpu_has_bug(X86_BUG_VMSCAPE))
3105 return;
3106
3107 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
3108 srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
3109 vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
3110
3111 pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
3112 }
3113
vmscape_apply_mitigation(void)3114 static void __init vmscape_apply_mitigation(void)
3115 {
3116 if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
3117 setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
3118 }
3119
3120 #undef pr_fmt
3121 #define pr_fmt(fmt) fmt
3122
3123 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
3124 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
3125 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
3126 #define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
3127
cpu_bugs_smt_update(void)3128 void cpu_bugs_smt_update(void)
3129 {
3130 mutex_lock(&spec_ctrl_mutex);
3131
3132 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3133 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3134 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
3135
3136 switch (spectre_v2_user_stibp) {
3137 case SPECTRE_V2_USER_NONE:
3138 break;
3139 case SPECTRE_V2_USER_STRICT:
3140 case SPECTRE_V2_USER_STRICT_PREFERRED:
3141 update_stibp_strict();
3142 break;
3143 case SPECTRE_V2_USER_PRCTL:
3144 case SPECTRE_V2_USER_SECCOMP:
3145 update_indir_branch_cond();
3146 break;
3147 }
3148
3149 switch (mds_mitigation) {
3150 case MDS_MITIGATION_FULL:
3151 case MDS_MITIGATION_AUTO:
3152 case MDS_MITIGATION_VMWERV:
3153 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
3154 pr_warn_once(MDS_MSG_SMT);
3155 update_mds_branch_idle();
3156 break;
3157 case MDS_MITIGATION_OFF:
3158 break;
3159 }
3160
3161 switch (taa_mitigation) {
3162 case TAA_MITIGATION_VERW:
3163 case TAA_MITIGATION_AUTO:
3164 case TAA_MITIGATION_UCODE_NEEDED:
3165 if (sched_smt_active())
3166 pr_warn_once(TAA_MSG_SMT);
3167 break;
3168 case TAA_MITIGATION_TSX_DISABLED:
3169 case TAA_MITIGATION_OFF:
3170 break;
3171 }
3172
3173 switch (mmio_mitigation) {
3174 case MMIO_MITIGATION_VERW:
3175 case MMIO_MITIGATION_AUTO:
3176 case MMIO_MITIGATION_UCODE_NEEDED:
3177 if (sched_smt_active())
3178 pr_warn_once(MMIO_MSG_SMT);
3179 break;
3180 case MMIO_MITIGATION_OFF:
3181 break;
3182 }
3183
3184 switch (tsa_mitigation) {
3185 case TSA_MITIGATION_USER_KERNEL:
3186 case TSA_MITIGATION_VM:
3187 case TSA_MITIGATION_AUTO:
3188 case TSA_MITIGATION_FULL:
3189 /*
3190 * TSA-SQ can potentially lead to info leakage between
3191 * SMT threads.
3192 */
3193 if (sched_smt_active())
3194 static_branch_enable(&cpu_buf_idle_clear);
3195 else
3196 static_branch_disable(&cpu_buf_idle_clear);
3197 break;
3198 case TSA_MITIGATION_NONE:
3199 case TSA_MITIGATION_UCODE_NEEDED:
3200 break;
3201 }
3202
3203 switch (vmscape_mitigation) {
3204 case VMSCAPE_MITIGATION_NONE:
3205 case VMSCAPE_MITIGATION_AUTO:
3206 break;
3207 case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
3208 case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
3209 /*
3210 * Hypervisors can be attacked across-threads, warn for SMT when
3211 * STIBP is not already enabled system-wide.
3212 *
3213 * Intel eIBRS (!AUTOIBRS) implies STIBP on.
3214 */
3215 if (!sched_smt_active() ||
3216 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3217 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
3218 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3219 !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
3220 break;
3221 pr_warn_once(VMSCAPE_MSG_SMT);
3222 break;
3223 }
3224
3225 mutex_unlock(&spec_ctrl_mutex);
3226 }
3227
cpu_select_mitigations(void)3228 void __init cpu_select_mitigations(void)
3229 {
3230 /*
3231 * Read the SPEC_CTRL MSR to account for reserved bits which may
3232 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
3233 * init code as it is not enumerated and depends on the family.
3234 */
3235 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
3236 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
3237
3238 /*
3239 * Previously running kernel (kexec), may have some controls
3240 * turned ON. Clear them and let the mitigations setup below
3241 * rediscover them based on configuration.
3242 */
3243 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
3244 }
3245
3246 x86_arch_cap_msr = x86_read_arch_cap_msr();
3247
3248 cpu_print_attack_vectors();
3249
3250 /* Select the proper CPU mitigations before patching alternatives: */
3251 spectre_v1_select_mitigation();
3252 spectre_v2_select_mitigation();
3253 retbleed_select_mitigation();
3254 spectre_v2_user_select_mitigation();
3255 ssb_select_mitigation();
3256 l1tf_select_mitigation();
3257 mds_select_mitigation();
3258 taa_select_mitigation();
3259 mmio_select_mitigation();
3260 rfds_select_mitigation();
3261 srbds_select_mitigation();
3262 l1d_flush_select_mitigation();
3263 srso_select_mitigation();
3264 gds_select_mitigation();
3265 its_select_mitigation();
3266 bhi_select_mitigation();
3267 tsa_select_mitigation();
3268 vmscape_select_mitigation();
3269
3270 /*
3271 * After mitigations are selected, some may need to update their
3272 * choices.
3273 */
3274 spectre_v2_update_mitigation();
3275 /*
3276 * retbleed_update_mitigation() relies on the state set by
3277 * spectre_v2_update_mitigation(); specifically it wants to know about
3278 * spectre_v2=ibrs.
3279 */
3280 retbleed_update_mitigation();
3281 /*
3282 * its_update_mitigation() depends on spectre_v2_update_mitigation()
3283 * and retbleed_update_mitigation().
3284 */
3285 its_update_mitigation();
3286
3287 /*
3288 * spectre_v2_user_update_mitigation() depends on
3289 * retbleed_update_mitigation(), specifically the STIBP
3290 * selection is forced for UNRET or IBPB.
3291 */
3292 spectre_v2_user_update_mitigation();
3293 mds_update_mitigation();
3294 taa_update_mitigation();
3295 mmio_update_mitigation();
3296 rfds_update_mitigation();
3297 bhi_update_mitigation();
3298 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
3299 srso_update_mitigation();
3300 vmscape_update_mitigation();
3301
3302 spectre_v1_apply_mitigation();
3303 spectre_v2_apply_mitigation();
3304 retbleed_apply_mitigation();
3305 spectre_v2_user_apply_mitigation();
3306 ssb_apply_mitigation();
3307 l1tf_apply_mitigation();
3308 mds_apply_mitigation();
3309 taa_apply_mitigation();
3310 mmio_apply_mitigation();
3311 rfds_apply_mitigation();
3312 srbds_apply_mitigation();
3313 srso_apply_mitigation();
3314 gds_apply_mitigation();
3315 its_apply_mitigation();
3316 bhi_apply_mitigation();
3317 tsa_apply_mitigation();
3318 vmscape_apply_mitigation();
3319 }
3320
3321 #ifdef CONFIG_SYSFS
3322
3323 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
3324
3325 #if IS_ENABLED(CONFIG_KVM_INTEL)
3326 static const char * const l1tf_vmx_states[] = {
3327 [VMENTER_L1D_FLUSH_AUTO] = "auto",
3328 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
3329 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
3330 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
3331 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
3332 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
3333 };
3334
l1tf_show_state(char * buf)3335 static ssize_t l1tf_show_state(char *buf)
3336 {
3337 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
3338 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3339
3340 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
3341 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
3342 sched_smt_active())) {
3343 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
3344 l1tf_vmx_states[l1tf_vmx_mitigation]);
3345 }
3346
3347 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
3348 l1tf_vmx_states[l1tf_vmx_mitigation],
3349 sched_smt_active() ? "vulnerable" : "disabled");
3350 }
3351
itlb_multihit_show_state(char * buf)3352 static ssize_t itlb_multihit_show_state(char *buf)
3353 {
3354 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
3355 !boot_cpu_has(X86_FEATURE_VMX))
3356 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
3357 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
3358 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
3359 else if (itlb_multihit_kvm_mitigation)
3360 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
3361 else
3362 return sysfs_emit(buf, "KVM: Vulnerable\n");
3363 }
3364 #else
l1tf_show_state(char * buf)3365 static ssize_t l1tf_show_state(char *buf)
3366 {
3367 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3368 }
3369
itlb_multihit_show_state(char * buf)3370 static ssize_t itlb_multihit_show_state(char *buf)
3371 {
3372 return sysfs_emit(buf, "Processor vulnerable\n");
3373 }
3374 #endif
3375
mds_show_state(char * buf)3376 static ssize_t mds_show_state(char *buf)
3377 {
3378 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3379 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3380 mds_strings[mds_mitigation]);
3381 }
3382
3383 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
3384 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3385 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
3386 sched_smt_active() ? "mitigated" : "disabled"));
3387 }
3388
3389 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3390 sched_smt_active() ? "vulnerable" : "disabled");
3391 }
3392
tsx_async_abort_show_state(char * buf)3393 static ssize_t tsx_async_abort_show_state(char *buf)
3394 {
3395 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
3396 (taa_mitigation == TAA_MITIGATION_OFF))
3397 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
3398
3399 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3400 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3401 taa_strings[taa_mitigation]);
3402 }
3403
3404 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
3405 sched_smt_active() ? "vulnerable" : "disabled");
3406 }
3407
mmio_stale_data_show_state(char * buf)3408 static ssize_t mmio_stale_data_show_state(char *buf)
3409 {
3410 if (mmio_mitigation == MMIO_MITIGATION_OFF)
3411 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
3412
3413 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3414 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3415 mmio_strings[mmio_mitigation]);
3416 }
3417
3418 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
3419 sched_smt_active() ? "vulnerable" : "disabled");
3420 }
3421
rfds_show_state(char * buf)3422 static ssize_t rfds_show_state(char *buf)
3423 {
3424 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
3425 }
3426
old_microcode_show_state(char * buf)3427 static ssize_t old_microcode_show_state(char *buf)
3428 {
3429 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
3430 return sysfs_emit(buf, "Unknown: running under hypervisor");
3431
3432 return sysfs_emit(buf, "Vulnerable\n");
3433 }
3434
its_show_state(char * buf)3435 static ssize_t its_show_state(char *buf)
3436 {
3437 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
3438 }
3439
stibp_state(void)3440 static char *stibp_state(void)
3441 {
3442 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3443 !boot_cpu_has(X86_FEATURE_AUTOIBRS))
3444 return "";
3445
3446 switch (spectre_v2_user_stibp) {
3447 case SPECTRE_V2_USER_NONE:
3448 return "; STIBP: disabled";
3449 case SPECTRE_V2_USER_STRICT:
3450 return "; STIBP: forced";
3451 case SPECTRE_V2_USER_STRICT_PREFERRED:
3452 return "; STIBP: always-on";
3453 case SPECTRE_V2_USER_PRCTL:
3454 case SPECTRE_V2_USER_SECCOMP:
3455 if (static_key_enabled(&switch_to_cond_stibp))
3456 return "; STIBP: conditional";
3457 }
3458 return "";
3459 }
3460
ibpb_state(void)3461 static char *ibpb_state(void)
3462 {
3463 if (boot_cpu_has(X86_FEATURE_IBPB)) {
3464 if (static_key_enabled(&switch_mm_always_ibpb))
3465 return "; IBPB: always-on";
3466 if (static_key_enabled(&switch_mm_cond_ibpb))
3467 return "; IBPB: conditional";
3468 return "; IBPB: disabled";
3469 }
3470 return "";
3471 }
3472
pbrsb_eibrs_state(void)3473 static char *pbrsb_eibrs_state(void)
3474 {
3475 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
3476 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
3477 boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
3478 return "; PBRSB-eIBRS: SW sequence";
3479 else
3480 return "; PBRSB-eIBRS: Vulnerable";
3481 } else {
3482 return "; PBRSB-eIBRS: Not affected";
3483 }
3484 }
3485
spectre_bhi_state(void)3486 static const char *spectre_bhi_state(void)
3487 {
3488 if (!boot_cpu_has_bug(X86_BUG_BHI))
3489 return "; BHI: Not affected";
3490 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
3491 return "; BHI: BHI_DIS_S";
3492 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
3493 return "; BHI: SW loop, KVM: SW loop";
3494 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
3495 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
3496 rrsba_disabled)
3497 return "; BHI: Retpoline";
3498 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
3499 return "; BHI: Vulnerable, KVM: SW loop";
3500
3501 return "; BHI: Vulnerable";
3502 }
3503
spectre_v2_show_state(char * buf)3504 static ssize_t spectre_v2_show_state(char *buf)
3505 {
3506 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
3507 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
3508
3509 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3510 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3511 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
3512
3513 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
3514 spectre_v2_strings[spectre_v2_enabled],
3515 ibpb_state(),
3516 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
3517 stibp_state(),
3518 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
3519 pbrsb_eibrs_state(),
3520 spectre_bhi_state(),
3521 /* this should always be at the end */
3522 spectre_v2_module_string());
3523 }
3524
srbds_show_state(char * buf)3525 static ssize_t srbds_show_state(char *buf)
3526 {
3527 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
3528 }
3529
retbleed_show_state(char * buf)3530 static ssize_t retbleed_show_state(char *buf)
3531 {
3532 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
3533 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
3534 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
3535 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
3536 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
3537
3538 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
3539 !sched_smt_active() ? "disabled" :
3540 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3541 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
3542 "enabled with STIBP protection" : "vulnerable");
3543 }
3544
3545 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
3546 }
3547
srso_show_state(char * buf)3548 static ssize_t srso_show_state(char *buf)
3549 {
3550 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
3551 }
3552
gds_show_state(char * buf)3553 static ssize_t gds_show_state(char *buf)
3554 {
3555 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
3556 }
3557
tsa_show_state(char * buf)3558 static ssize_t tsa_show_state(char *buf)
3559 {
3560 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
3561 }
3562
vmscape_show_state(char * buf)3563 static ssize_t vmscape_show_state(char *buf)
3564 {
3565 return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
3566 }
3567
cpu_show_common(struct device * dev,struct device_attribute * attr,char * buf,unsigned int bug)3568 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
3569 char *buf, unsigned int bug)
3570 {
3571 if (!boot_cpu_has_bug(bug))
3572 return sysfs_emit(buf, "Not affected\n");
3573
3574 switch (bug) {
3575 case X86_BUG_CPU_MELTDOWN:
3576 if (boot_cpu_has(X86_FEATURE_PTI))
3577 return sysfs_emit(buf, "Mitigation: PTI\n");
3578
3579 if (hypervisor_is_type(X86_HYPER_XEN_PV))
3580 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
3581
3582 break;
3583
3584 case X86_BUG_SPECTRE_V1:
3585 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
3586
3587 case X86_BUG_SPECTRE_V2:
3588 return spectre_v2_show_state(buf);
3589
3590 case X86_BUG_SPEC_STORE_BYPASS:
3591 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
3592
3593 case X86_BUG_L1TF:
3594 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
3595 return l1tf_show_state(buf);
3596 break;
3597
3598 case X86_BUG_MDS:
3599 return mds_show_state(buf);
3600
3601 case X86_BUG_TAA:
3602 return tsx_async_abort_show_state(buf);
3603
3604 case X86_BUG_ITLB_MULTIHIT:
3605 return itlb_multihit_show_state(buf);
3606
3607 case X86_BUG_SRBDS:
3608 return srbds_show_state(buf);
3609
3610 case X86_BUG_MMIO_STALE_DATA:
3611 return mmio_stale_data_show_state(buf);
3612
3613 case X86_BUG_RETBLEED:
3614 return retbleed_show_state(buf);
3615
3616 case X86_BUG_SRSO:
3617 return srso_show_state(buf);
3618
3619 case X86_BUG_GDS:
3620 return gds_show_state(buf);
3621
3622 case X86_BUG_RFDS:
3623 return rfds_show_state(buf);
3624
3625 case X86_BUG_OLD_MICROCODE:
3626 return old_microcode_show_state(buf);
3627
3628 case X86_BUG_ITS:
3629 return its_show_state(buf);
3630
3631 case X86_BUG_TSA:
3632 return tsa_show_state(buf);
3633
3634 case X86_BUG_VMSCAPE:
3635 return vmscape_show_state(buf);
3636
3637 default:
3638 break;
3639 }
3640
3641 return sysfs_emit(buf, "Vulnerable\n");
3642 }
3643
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)3644 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
3645 {
3646 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
3647 }
3648
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)3649 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
3650 {
3651 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
3652 }
3653
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)3654 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
3655 {
3656 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
3657 }
3658
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)3659 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
3660 {
3661 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
3662 }
3663
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)3664 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
3665 {
3666 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
3667 }
3668
cpu_show_mds(struct device * dev,struct device_attribute * attr,char * buf)3669 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
3670 {
3671 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
3672 }
3673
cpu_show_tsx_async_abort(struct device * dev,struct device_attribute * attr,char * buf)3674 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
3675 {
3676 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
3677 }
3678
cpu_show_itlb_multihit(struct device * dev,struct device_attribute * attr,char * buf)3679 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
3680 {
3681 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
3682 }
3683
cpu_show_srbds(struct device * dev,struct device_attribute * attr,char * buf)3684 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
3685 {
3686 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
3687 }
3688
cpu_show_mmio_stale_data(struct device * dev,struct device_attribute * attr,char * buf)3689 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
3690 {
3691 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3692 }
3693
cpu_show_retbleed(struct device * dev,struct device_attribute * attr,char * buf)3694 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3695 {
3696 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3697 }
3698
cpu_show_spec_rstack_overflow(struct device * dev,struct device_attribute * attr,char * buf)3699 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3700 {
3701 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3702 }
3703
cpu_show_gds(struct device * dev,struct device_attribute * attr,char * buf)3704 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3705 {
3706 return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3707 }
3708
cpu_show_reg_file_data_sampling(struct device * dev,struct device_attribute * attr,char * buf)3709 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3710 {
3711 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3712 }
3713
cpu_show_old_microcode(struct device * dev,struct device_attribute * attr,char * buf)3714 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
3715 {
3716 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
3717 }
3718
cpu_show_indirect_target_selection(struct device * dev,struct device_attribute * attr,char * buf)3719 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
3720 {
3721 return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
3722 }
3723
cpu_show_tsa(struct device * dev,struct device_attribute * attr,char * buf)3724 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
3725 {
3726 return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
3727 }
3728
cpu_show_vmscape(struct device * dev,struct device_attribute * attr,char * buf)3729 ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
3730 {
3731 return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
3732 }
3733 #endif
3734
__warn_thunk(void)3735 void __warn_thunk(void)
3736 {
3737 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3738 }
3739