1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/cpu_device_id.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34
35 #include "cpu.h"
36
37 /*
38 * Speculation Vulnerability Handling
39 *
40 * Each vulnerability is handled with the following functions:
41 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should
42 * take into account all relevant command line
43 * options.
44 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have
45 * selected a mitigation, in case the selection
46 * may want to change based on other choices
47 * made. This function is optional.
48 * <vuln>_apply_mitigation() -- Enable the selected mitigation.
49 *
50 * The compile-time mitigation in all cases should be AUTO. An explicit
51 * command-line option can override AUTO. If no such option is
52 * provided, <vuln>_select_mitigation() will override AUTO to the best
53 * mitigation option.
54 */
55
56 static void __init spectre_v1_select_mitigation(void);
57 static void __init spectre_v1_apply_mitigation(void);
58 static void __init spectre_v2_select_mitigation(void);
59 static void __init spectre_v2_update_mitigation(void);
60 static void __init spectre_v2_apply_mitigation(void);
61 static void __init retbleed_select_mitigation(void);
62 static void __init retbleed_update_mitigation(void);
63 static void __init retbleed_apply_mitigation(void);
64 static void __init spectre_v2_user_select_mitigation(void);
65 static void __init spectre_v2_user_update_mitigation(void);
66 static void __init spectre_v2_user_apply_mitigation(void);
67 static void __init ssb_select_mitigation(void);
68 static void __init ssb_apply_mitigation(void);
69 static void __init l1tf_select_mitigation(void);
70 static void __init l1tf_apply_mitigation(void);
71 static void __init mds_select_mitigation(void);
72 static void __init mds_update_mitigation(void);
73 static void __init mds_apply_mitigation(void);
74 static void __init taa_select_mitigation(void);
75 static void __init taa_update_mitigation(void);
76 static void __init taa_apply_mitigation(void);
77 static void __init mmio_select_mitigation(void);
78 static void __init mmio_update_mitigation(void);
79 static void __init mmio_apply_mitigation(void);
80 static void __init rfds_select_mitigation(void);
81 static void __init rfds_update_mitigation(void);
82 static void __init rfds_apply_mitigation(void);
83 static void __init srbds_select_mitigation(void);
84 static void __init srbds_apply_mitigation(void);
85 static void __init l1d_flush_select_mitigation(void);
86 static void __init srso_select_mitigation(void);
87 static void __init srso_update_mitigation(void);
88 static void __init srso_apply_mitigation(void);
89 static void __init gds_select_mitigation(void);
90 static void __init gds_apply_mitigation(void);
91 static void __init bhi_select_mitigation(void);
92 static void __init bhi_update_mitigation(void);
93 static void __init bhi_apply_mitigation(void);
94 static void __init its_select_mitigation(void);
95 static void __init its_update_mitigation(void);
96 static void __init its_apply_mitigation(void);
97 static void __init tsa_select_mitigation(void);
98 static void __init tsa_apply_mitigation(void);
99
100 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
101 u64 x86_spec_ctrl_base;
102 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
103
104 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
105 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
106 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
107
108 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
109
110 static u64 __ro_after_init x86_arch_cap_msr;
111
112 static DEFINE_MUTEX(spec_ctrl_mutex);
113
114 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
115
set_return_thunk(void * thunk)116 static void __init set_return_thunk(void *thunk)
117 {
118 if (x86_return_thunk != __x86_return_thunk)
119 pr_warn("x86/bugs: return thunk changed\n");
120
121 x86_return_thunk = thunk;
122 }
123
124 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
update_spec_ctrl(u64 val)125 static void update_spec_ctrl(u64 val)
126 {
127 this_cpu_write(x86_spec_ctrl_current, val);
128 wrmsrq(MSR_IA32_SPEC_CTRL, val);
129 }
130
131 /*
132 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
133 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
134 */
update_spec_ctrl_cond(u64 val)135 void update_spec_ctrl_cond(u64 val)
136 {
137 if (this_cpu_read(x86_spec_ctrl_current) == val)
138 return;
139
140 this_cpu_write(x86_spec_ctrl_current, val);
141
142 /*
143 * When KERNEL_IBRS this MSR is written on return-to-user, unless
144 * forced the update can be delayed until that time.
145 */
146 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
147 wrmsrq(MSR_IA32_SPEC_CTRL, val);
148 }
149
spec_ctrl_current(void)150 noinstr u64 spec_ctrl_current(void)
151 {
152 return this_cpu_read(x86_spec_ctrl_current);
153 }
154 EXPORT_SYMBOL_GPL(spec_ctrl_current);
155
156 /*
157 * AMD specific MSR info for Speculative Store Bypass control.
158 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
159 */
160 u64 __ro_after_init x86_amd_ls_cfg_base;
161 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
162
163 /* Control conditional STIBP in switch_to() */
164 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
165 /* Control conditional IBPB in switch_mm() */
166 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
167 /* Control unconditional IBPB in switch_mm() */
168 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
169
170 /* Control IBPB on vCPU load */
171 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
172 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
173
174 /* Control CPU buffer clear before idling (halt, mwait) */
175 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
176 EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
177
178 /*
179 * Controls whether l1d flush based mitigations are enabled,
180 * based on hw features and admin setting via boot parameter
181 * defaults to false
182 */
183 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
184
185 /*
186 * Controls CPU Fill buffer clear before VMenter. This is a subset of
187 * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
188 * mitigation is required.
189 */
190 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
191 EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
192
cpu_select_mitigations(void)193 void __init cpu_select_mitigations(void)
194 {
195 /*
196 * Read the SPEC_CTRL MSR to account for reserved bits which may
197 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
198 * init code as it is not enumerated and depends on the family.
199 */
200 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
201 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
202
203 /*
204 * Previously running kernel (kexec), may have some controls
205 * turned ON. Clear them and let the mitigations setup below
206 * rediscover them based on configuration.
207 */
208 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
209 }
210
211 x86_arch_cap_msr = x86_read_arch_cap_msr();
212
213 /* Select the proper CPU mitigations before patching alternatives: */
214 spectre_v1_select_mitigation();
215 spectre_v2_select_mitigation();
216 retbleed_select_mitigation();
217 spectre_v2_user_select_mitigation();
218 ssb_select_mitigation();
219 l1tf_select_mitigation();
220 mds_select_mitigation();
221 taa_select_mitigation();
222 mmio_select_mitigation();
223 rfds_select_mitigation();
224 srbds_select_mitigation();
225 l1d_flush_select_mitigation();
226 srso_select_mitigation();
227 gds_select_mitigation();
228 its_select_mitigation();
229 bhi_select_mitigation();
230 tsa_select_mitigation();
231
232 /*
233 * After mitigations are selected, some may need to update their
234 * choices.
235 */
236 spectre_v2_update_mitigation();
237 /*
238 * retbleed_update_mitigation() relies on the state set by
239 * spectre_v2_update_mitigation(); specifically it wants to know about
240 * spectre_v2=ibrs.
241 */
242 retbleed_update_mitigation();
243 /*
244 * its_update_mitigation() depends on spectre_v2_update_mitigation()
245 * and retbleed_update_mitigation().
246 */
247 its_update_mitigation();
248
249 /*
250 * spectre_v2_user_update_mitigation() depends on
251 * retbleed_update_mitigation(), specifically the STIBP
252 * selection is forced for UNRET or IBPB.
253 */
254 spectre_v2_user_update_mitigation();
255 mds_update_mitigation();
256 taa_update_mitigation();
257 mmio_update_mitigation();
258 rfds_update_mitigation();
259 bhi_update_mitigation();
260 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
261 srso_update_mitigation();
262
263 spectre_v1_apply_mitigation();
264 spectre_v2_apply_mitigation();
265 retbleed_apply_mitigation();
266 spectre_v2_user_apply_mitigation();
267 ssb_apply_mitigation();
268 l1tf_apply_mitigation();
269 mds_apply_mitigation();
270 taa_apply_mitigation();
271 mmio_apply_mitigation();
272 rfds_apply_mitigation();
273 srbds_apply_mitigation();
274 srso_apply_mitigation();
275 gds_apply_mitigation();
276 its_apply_mitigation();
277 bhi_apply_mitigation();
278 tsa_apply_mitigation();
279 }
280
281 /*
282 * NOTE: This function is *only* called for SVM, since Intel uses
283 * MSR_IA32_SPEC_CTRL for SSBD.
284 */
285 void
x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl,bool setguest)286 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
287 {
288 u64 guestval, hostval;
289 struct thread_info *ti = current_thread_info();
290
291 /*
292 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
293 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
294 */
295 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
296 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
297 return;
298
299 /*
300 * If the host has SSBD mitigation enabled, force it in the host's
301 * virtual MSR value. If its not permanently enabled, evaluate
302 * current's TIF_SSBD thread flag.
303 */
304 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
305 hostval = SPEC_CTRL_SSBD;
306 else
307 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
308
309 /* Sanitize the guest value */
310 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
311
312 if (hostval != guestval) {
313 unsigned long tif;
314
315 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
316 ssbd_spec_ctrl_to_tif(hostval);
317
318 speculation_ctrl_update(tif);
319 }
320 }
321 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
322
x86_amd_ssb_disable(void)323 static void x86_amd_ssb_disable(void)
324 {
325 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
326
327 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
328 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
329 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
330 wrmsrq(MSR_AMD64_LS_CFG, msrval);
331 }
332
333 #undef pr_fmt
334 #define pr_fmt(fmt) "MDS: " fmt
335
336 /* Default mitigation for MDS-affected CPUs */
337 static enum mds_mitigations mds_mitigation __ro_after_init =
338 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
339 static bool mds_nosmt __ro_after_init = false;
340
341 static const char * const mds_strings[] = {
342 [MDS_MITIGATION_OFF] = "Vulnerable",
343 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
344 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
345 };
346
347 enum taa_mitigations {
348 TAA_MITIGATION_OFF,
349 TAA_MITIGATION_AUTO,
350 TAA_MITIGATION_UCODE_NEEDED,
351 TAA_MITIGATION_VERW,
352 TAA_MITIGATION_TSX_DISABLED,
353 };
354
355 /* Default mitigation for TAA-affected CPUs */
356 static enum taa_mitigations taa_mitigation __ro_after_init =
357 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
358
359 enum mmio_mitigations {
360 MMIO_MITIGATION_OFF,
361 MMIO_MITIGATION_AUTO,
362 MMIO_MITIGATION_UCODE_NEEDED,
363 MMIO_MITIGATION_VERW,
364 };
365
366 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
367 static enum mmio_mitigations mmio_mitigation __ro_after_init =
368 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
369
370 enum rfds_mitigations {
371 RFDS_MITIGATION_OFF,
372 RFDS_MITIGATION_AUTO,
373 RFDS_MITIGATION_VERW,
374 RFDS_MITIGATION_UCODE_NEEDED,
375 };
376
377 /* Default mitigation for Register File Data Sampling */
378 static enum rfds_mitigations rfds_mitigation __ro_after_init =
379 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
380
381 /*
382 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing
383 * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry.
384 */
385 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init;
386
mds_select_mitigation(void)387 static void __init mds_select_mitigation(void)
388 {
389 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
390 mds_mitigation = MDS_MITIGATION_OFF;
391 return;
392 }
393
394 if (mds_mitigation == MDS_MITIGATION_AUTO)
395 mds_mitigation = MDS_MITIGATION_FULL;
396
397 if (mds_mitigation == MDS_MITIGATION_OFF)
398 return;
399
400 verw_clear_cpu_buf_mitigation_selected = true;
401 }
402
mds_update_mitigation(void)403 static void __init mds_update_mitigation(void)
404 {
405 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
406 return;
407
408 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */
409 if (verw_clear_cpu_buf_mitigation_selected)
410 mds_mitigation = MDS_MITIGATION_FULL;
411
412 if (mds_mitigation == MDS_MITIGATION_FULL) {
413 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
414 mds_mitigation = MDS_MITIGATION_VMWERV;
415 }
416
417 pr_info("%s\n", mds_strings[mds_mitigation]);
418 }
419
mds_apply_mitigation(void)420 static void __init mds_apply_mitigation(void)
421 {
422 if (mds_mitigation == MDS_MITIGATION_FULL ||
423 mds_mitigation == MDS_MITIGATION_VMWERV) {
424 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
425 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
426 (mds_nosmt || cpu_mitigations_auto_nosmt()))
427 cpu_smt_disable(false);
428 }
429 }
430
mds_cmdline(char * str)431 static int __init mds_cmdline(char *str)
432 {
433 if (!boot_cpu_has_bug(X86_BUG_MDS))
434 return 0;
435
436 if (!str)
437 return -EINVAL;
438
439 if (!strcmp(str, "off"))
440 mds_mitigation = MDS_MITIGATION_OFF;
441 else if (!strcmp(str, "full"))
442 mds_mitigation = MDS_MITIGATION_FULL;
443 else if (!strcmp(str, "full,nosmt")) {
444 mds_mitigation = MDS_MITIGATION_FULL;
445 mds_nosmt = true;
446 }
447
448 return 0;
449 }
450 early_param("mds", mds_cmdline);
451
452 #undef pr_fmt
453 #define pr_fmt(fmt) "TAA: " fmt
454
455 static bool taa_nosmt __ro_after_init;
456
457 static const char * const taa_strings[] = {
458 [TAA_MITIGATION_OFF] = "Vulnerable",
459 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
460 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
461 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
462 };
463
taa_vulnerable(void)464 static bool __init taa_vulnerable(void)
465 {
466 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM);
467 }
468
taa_select_mitigation(void)469 static void __init taa_select_mitigation(void)
470 {
471 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
472 taa_mitigation = TAA_MITIGATION_OFF;
473 return;
474 }
475
476 /* TSX previously disabled by tsx=off */
477 if (!boot_cpu_has(X86_FEATURE_RTM)) {
478 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
479 return;
480 }
481
482 if (cpu_mitigations_off())
483 taa_mitigation = TAA_MITIGATION_OFF;
484
485 /* Microcode will be checked in taa_update_mitigation(). */
486 if (taa_mitigation == TAA_MITIGATION_AUTO)
487 taa_mitigation = TAA_MITIGATION_VERW;
488
489 if (taa_mitigation != TAA_MITIGATION_OFF)
490 verw_clear_cpu_buf_mitigation_selected = true;
491 }
492
taa_update_mitigation(void)493 static void __init taa_update_mitigation(void)
494 {
495 if (!taa_vulnerable() || cpu_mitigations_off())
496 return;
497
498 if (verw_clear_cpu_buf_mitigation_selected)
499 taa_mitigation = TAA_MITIGATION_VERW;
500
501 if (taa_mitigation == TAA_MITIGATION_VERW) {
502 /* Check if the requisite ucode is available. */
503 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
504 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
505
506 /*
507 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
508 * A microcode update fixes this behavior to clear CPU buffers. It also
509 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
510 * ARCH_CAP_TSX_CTRL_MSR bit.
511 *
512 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
513 * update is required.
514 */
515 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
516 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
517 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
518 }
519
520 pr_info("%s\n", taa_strings[taa_mitigation]);
521 }
522
taa_apply_mitigation(void)523 static void __init taa_apply_mitigation(void)
524 {
525 if (taa_mitigation == TAA_MITIGATION_VERW ||
526 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) {
527 /*
528 * TSX is enabled, select alternate mitigation for TAA which is
529 * the same as MDS. Enable MDS static branch to clear CPU buffers.
530 *
531 * For guests that can't determine whether the correct microcode is
532 * present on host, enable the mitigation for UCODE_NEEDED as well.
533 */
534 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
535
536 if (taa_nosmt || cpu_mitigations_auto_nosmt())
537 cpu_smt_disable(false);
538 }
539 }
540
tsx_async_abort_parse_cmdline(char * str)541 static int __init tsx_async_abort_parse_cmdline(char *str)
542 {
543 if (!boot_cpu_has_bug(X86_BUG_TAA))
544 return 0;
545
546 if (!str)
547 return -EINVAL;
548
549 if (!strcmp(str, "off")) {
550 taa_mitigation = TAA_MITIGATION_OFF;
551 } else if (!strcmp(str, "full")) {
552 taa_mitigation = TAA_MITIGATION_VERW;
553 } else if (!strcmp(str, "full,nosmt")) {
554 taa_mitigation = TAA_MITIGATION_VERW;
555 taa_nosmt = true;
556 }
557
558 return 0;
559 }
560 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
561
562 #undef pr_fmt
563 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
564
565 static bool mmio_nosmt __ro_after_init = false;
566
567 static const char * const mmio_strings[] = {
568 [MMIO_MITIGATION_OFF] = "Vulnerable",
569 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
570 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
571 };
572
mmio_select_mitigation(void)573 static void __init mmio_select_mitigation(void)
574 {
575 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
576 cpu_mitigations_off()) {
577 mmio_mitigation = MMIO_MITIGATION_OFF;
578 return;
579 }
580
581 /* Microcode will be checked in mmio_update_mitigation(). */
582 if (mmio_mitigation == MMIO_MITIGATION_AUTO)
583 mmio_mitigation = MMIO_MITIGATION_VERW;
584
585 if (mmio_mitigation == MMIO_MITIGATION_OFF)
586 return;
587
588 /*
589 * Enable CPU buffer clear mitigation for host and VMM, if also affected
590 * by MDS or TAA.
591 */
592 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable())
593 verw_clear_cpu_buf_mitigation_selected = true;
594 }
595
mmio_update_mitigation(void)596 static void __init mmio_update_mitigation(void)
597 {
598 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || cpu_mitigations_off())
599 return;
600
601 if (verw_clear_cpu_buf_mitigation_selected)
602 mmio_mitigation = MMIO_MITIGATION_VERW;
603
604 if (mmio_mitigation == MMIO_MITIGATION_VERW) {
605 /*
606 * Check if the system has the right microcode.
607 *
608 * CPU Fill buffer clear mitigation is enumerated by either an explicit
609 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
610 * affected systems.
611 */
612 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
613 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
614 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
615 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO))))
616 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
617 }
618
619 pr_info("%s\n", mmio_strings[mmio_mitigation]);
620 }
621
mmio_apply_mitigation(void)622 static void __init mmio_apply_mitigation(void)
623 {
624 if (mmio_mitigation == MMIO_MITIGATION_OFF)
625 return;
626
627 /*
628 * Only enable the VMM mitigation if the CPU buffer clear mitigation is
629 * not being used.
630 */
631 if (verw_clear_cpu_buf_mitigation_selected) {
632 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
633 static_branch_disable(&cpu_buf_vm_clear);
634 } else {
635 static_branch_enable(&cpu_buf_vm_clear);
636 }
637
638 /*
639 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
640 * be propagated to uncore buffers, clearing the Fill buffers on idle
641 * is required irrespective of SMT state.
642 */
643 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
644 static_branch_enable(&cpu_buf_idle_clear);
645
646 if (mmio_nosmt || cpu_mitigations_auto_nosmt())
647 cpu_smt_disable(false);
648 }
649
mmio_stale_data_parse_cmdline(char * str)650 static int __init mmio_stale_data_parse_cmdline(char *str)
651 {
652 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
653 return 0;
654
655 if (!str)
656 return -EINVAL;
657
658 if (!strcmp(str, "off")) {
659 mmio_mitigation = MMIO_MITIGATION_OFF;
660 } else if (!strcmp(str, "full")) {
661 mmio_mitigation = MMIO_MITIGATION_VERW;
662 } else if (!strcmp(str, "full,nosmt")) {
663 mmio_mitigation = MMIO_MITIGATION_VERW;
664 mmio_nosmt = true;
665 }
666
667 return 0;
668 }
669 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
670
671 #undef pr_fmt
672 #define pr_fmt(fmt) "Register File Data Sampling: " fmt
673
674 static const char * const rfds_strings[] = {
675 [RFDS_MITIGATION_OFF] = "Vulnerable",
676 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
677 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
678 };
679
verw_clears_cpu_reg_file(void)680 static inline bool __init verw_clears_cpu_reg_file(void)
681 {
682 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR);
683 }
684
rfds_select_mitigation(void)685 static void __init rfds_select_mitigation(void)
686 {
687 if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
688 rfds_mitigation = RFDS_MITIGATION_OFF;
689 return;
690 }
691
692 if (rfds_mitigation == RFDS_MITIGATION_AUTO)
693 rfds_mitigation = RFDS_MITIGATION_VERW;
694
695 if (rfds_mitigation == RFDS_MITIGATION_OFF)
696 return;
697
698 if (verw_clears_cpu_reg_file())
699 verw_clear_cpu_buf_mitigation_selected = true;
700 }
701
rfds_update_mitigation(void)702 static void __init rfds_update_mitigation(void)
703 {
704 if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off())
705 return;
706
707 if (verw_clear_cpu_buf_mitigation_selected)
708 rfds_mitigation = RFDS_MITIGATION_VERW;
709
710 if (rfds_mitigation == RFDS_MITIGATION_VERW) {
711 if (!verw_clears_cpu_reg_file())
712 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
713 }
714
715 pr_info("%s\n", rfds_strings[rfds_mitigation]);
716 }
717
rfds_apply_mitigation(void)718 static void __init rfds_apply_mitigation(void)
719 {
720 if (rfds_mitigation == RFDS_MITIGATION_VERW)
721 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
722 }
723
rfds_parse_cmdline(char * str)724 static __init int rfds_parse_cmdline(char *str)
725 {
726 if (!str)
727 return -EINVAL;
728
729 if (!boot_cpu_has_bug(X86_BUG_RFDS))
730 return 0;
731
732 if (!strcmp(str, "off"))
733 rfds_mitigation = RFDS_MITIGATION_OFF;
734 else if (!strcmp(str, "on"))
735 rfds_mitigation = RFDS_MITIGATION_VERW;
736
737 return 0;
738 }
739 early_param("reg_file_data_sampling", rfds_parse_cmdline);
740
741 #undef pr_fmt
742 #define pr_fmt(fmt) "SRBDS: " fmt
743
744 enum srbds_mitigations {
745 SRBDS_MITIGATION_OFF,
746 SRBDS_MITIGATION_AUTO,
747 SRBDS_MITIGATION_UCODE_NEEDED,
748 SRBDS_MITIGATION_FULL,
749 SRBDS_MITIGATION_TSX_OFF,
750 SRBDS_MITIGATION_HYPERVISOR,
751 };
752
753 static enum srbds_mitigations srbds_mitigation __ro_after_init =
754 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF;
755
756 static const char * const srbds_strings[] = {
757 [SRBDS_MITIGATION_OFF] = "Vulnerable",
758 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
759 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
760 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
761 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
762 };
763
764 static bool srbds_off;
765
update_srbds_msr(void)766 void update_srbds_msr(void)
767 {
768 u64 mcu_ctrl;
769
770 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
771 return;
772
773 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
774 return;
775
776 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
777 return;
778
779 /*
780 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
781 * being disabled and it hasn't received the SRBDS MSR microcode.
782 */
783 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
784 return;
785
786 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
787
788 switch (srbds_mitigation) {
789 case SRBDS_MITIGATION_OFF:
790 case SRBDS_MITIGATION_TSX_OFF:
791 mcu_ctrl |= RNGDS_MITG_DIS;
792 break;
793 case SRBDS_MITIGATION_FULL:
794 mcu_ctrl &= ~RNGDS_MITG_DIS;
795 break;
796 default:
797 break;
798 }
799
800 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
801 }
802
srbds_select_mitigation(void)803 static void __init srbds_select_mitigation(void)
804 {
805 if (!boot_cpu_has_bug(X86_BUG_SRBDS) || cpu_mitigations_off()) {
806 srbds_mitigation = SRBDS_MITIGATION_OFF;
807 return;
808 }
809
810 if (srbds_mitigation == SRBDS_MITIGATION_AUTO)
811 srbds_mitigation = SRBDS_MITIGATION_FULL;
812
813 /*
814 * Check to see if this is one of the MDS_NO systems supporting TSX that
815 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
816 * by Processor MMIO Stale Data vulnerability.
817 */
818 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
819 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
820 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
821 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
822 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
823 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
824 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
825 else if (srbds_off)
826 srbds_mitigation = SRBDS_MITIGATION_OFF;
827
828 pr_info("%s\n", srbds_strings[srbds_mitigation]);
829 }
830
srbds_apply_mitigation(void)831 static void __init srbds_apply_mitigation(void)
832 {
833 update_srbds_msr();
834 }
835
srbds_parse_cmdline(char * str)836 static int __init srbds_parse_cmdline(char *str)
837 {
838 if (!str)
839 return -EINVAL;
840
841 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
842 return 0;
843
844 srbds_off = !strcmp(str, "off");
845 return 0;
846 }
847 early_param("srbds", srbds_parse_cmdline);
848
849 #undef pr_fmt
850 #define pr_fmt(fmt) "L1D Flush : " fmt
851
852 enum l1d_flush_mitigations {
853 L1D_FLUSH_OFF = 0,
854 L1D_FLUSH_ON,
855 };
856
857 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
858
l1d_flush_select_mitigation(void)859 static void __init l1d_flush_select_mitigation(void)
860 {
861 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
862 return;
863
864 static_branch_enable(&switch_mm_cond_l1d_flush);
865 pr_info("Conditional flush on switch_mm() enabled\n");
866 }
867
l1d_flush_parse_cmdline(char * str)868 static int __init l1d_flush_parse_cmdline(char *str)
869 {
870 if (!strcmp(str, "on"))
871 l1d_flush_mitigation = L1D_FLUSH_ON;
872
873 return 0;
874 }
875 early_param("l1d_flush", l1d_flush_parse_cmdline);
876
877 #undef pr_fmt
878 #define pr_fmt(fmt) "GDS: " fmt
879
880 enum gds_mitigations {
881 GDS_MITIGATION_OFF,
882 GDS_MITIGATION_AUTO,
883 GDS_MITIGATION_UCODE_NEEDED,
884 GDS_MITIGATION_FORCE,
885 GDS_MITIGATION_FULL,
886 GDS_MITIGATION_FULL_LOCKED,
887 GDS_MITIGATION_HYPERVISOR,
888 };
889
890 static enum gds_mitigations gds_mitigation __ro_after_init =
891 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF;
892
893 static const char * const gds_strings[] = {
894 [GDS_MITIGATION_OFF] = "Vulnerable",
895 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
896 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode",
897 [GDS_MITIGATION_FULL] = "Mitigation: Microcode",
898 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)",
899 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
900 };
901
gds_ucode_mitigated(void)902 bool gds_ucode_mitigated(void)
903 {
904 return (gds_mitigation == GDS_MITIGATION_FULL ||
905 gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
906 }
907 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
908
update_gds_msr(void)909 void update_gds_msr(void)
910 {
911 u64 mcu_ctrl_after;
912 u64 mcu_ctrl;
913
914 switch (gds_mitigation) {
915 case GDS_MITIGATION_OFF:
916 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
917 mcu_ctrl |= GDS_MITG_DIS;
918 break;
919 case GDS_MITIGATION_FULL_LOCKED:
920 /*
921 * The LOCKED state comes from the boot CPU. APs might not have
922 * the same state. Make sure the mitigation is enabled on all
923 * CPUs.
924 */
925 case GDS_MITIGATION_FULL:
926 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
927 mcu_ctrl &= ~GDS_MITG_DIS;
928 break;
929 case GDS_MITIGATION_FORCE:
930 case GDS_MITIGATION_UCODE_NEEDED:
931 case GDS_MITIGATION_HYPERVISOR:
932 case GDS_MITIGATION_AUTO:
933 return;
934 }
935
936 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
937
938 /*
939 * Check to make sure that the WRMSR value was not ignored. Writes to
940 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
941 * processor was not.
942 */
943 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
944 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
945 }
946
gds_select_mitigation(void)947 static void __init gds_select_mitigation(void)
948 {
949 u64 mcu_ctrl;
950
951 if (!boot_cpu_has_bug(X86_BUG_GDS))
952 return;
953
954 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
955 gds_mitigation = GDS_MITIGATION_HYPERVISOR;
956 return;
957 }
958
959 if (cpu_mitigations_off())
960 gds_mitigation = GDS_MITIGATION_OFF;
961 /* Will verify below that mitigation _can_ be disabled */
962
963 if (gds_mitigation == GDS_MITIGATION_AUTO)
964 gds_mitigation = GDS_MITIGATION_FULL;
965
966 /* No microcode */
967 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
968 if (gds_mitigation != GDS_MITIGATION_FORCE)
969 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
970 return;
971 }
972
973 /* Microcode has mitigation, use it */
974 if (gds_mitigation == GDS_MITIGATION_FORCE)
975 gds_mitigation = GDS_MITIGATION_FULL;
976
977 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
978 if (mcu_ctrl & GDS_MITG_LOCKED) {
979 if (gds_mitigation == GDS_MITIGATION_OFF)
980 pr_warn("Mitigation locked. Disable failed.\n");
981
982 /*
983 * The mitigation is selected from the boot CPU. All other CPUs
984 * _should_ have the same state. If the boot CPU isn't locked
985 * but others are then update_gds_msr() will WARN() of the state
986 * mismatch. If the boot CPU is locked update_gds_msr() will
987 * ensure the other CPUs have the mitigation enabled.
988 */
989 gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
990 }
991 }
992
gds_apply_mitigation(void)993 static void __init gds_apply_mitigation(void)
994 {
995 if (!boot_cpu_has_bug(X86_BUG_GDS))
996 return;
997
998 /* Microcode is present */
999 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)
1000 update_gds_msr();
1001 else if (gds_mitigation == GDS_MITIGATION_FORCE) {
1002 /*
1003 * This only needs to be done on the boot CPU so do it
1004 * here rather than in update_gds_msr()
1005 */
1006 setup_clear_cpu_cap(X86_FEATURE_AVX);
1007 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
1008 }
1009
1010 pr_info("%s\n", gds_strings[gds_mitigation]);
1011 }
1012
gds_parse_cmdline(char * str)1013 static int __init gds_parse_cmdline(char *str)
1014 {
1015 if (!str)
1016 return -EINVAL;
1017
1018 if (!boot_cpu_has_bug(X86_BUG_GDS))
1019 return 0;
1020
1021 if (!strcmp(str, "off"))
1022 gds_mitigation = GDS_MITIGATION_OFF;
1023 else if (!strcmp(str, "force"))
1024 gds_mitigation = GDS_MITIGATION_FORCE;
1025
1026 return 0;
1027 }
1028 early_param("gather_data_sampling", gds_parse_cmdline);
1029
1030 #undef pr_fmt
1031 #define pr_fmt(fmt) "Spectre V1 : " fmt
1032
1033 enum spectre_v1_mitigation {
1034 SPECTRE_V1_MITIGATION_NONE,
1035 SPECTRE_V1_MITIGATION_AUTO,
1036 };
1037
1038 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
1039 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
1040 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
1041
1042 static const char * const spectre_v1_strings[] = {
1043 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
1044 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
1045 };
1046
1047 /*
1048 * Does SMAP provide full mitigation against speculative kernel access to
1049 * userspace?
1050 */
smap_works_speculatively(void)1051 static bool smap_works_speculatively(void)
1052 {
1053 if (!boot_cpu_has(X86_FEATURE_SMAP))
1054 return false;
1055
1056 /*
1057 * On CPUs which are vulnerable to Meltdown, SMAP does not
1058 * prevent speculative access to user data in the L1 cache.
1059 * Consider SMAP to be non-functional as a mitigation on these
1060 * CPUs.
1061 */
1062 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
1063 return false;
1064
1065 return true;
1066 }
1067
spectre_v1_select_mitigation(void)1068 static void __init spectre_v1_select_mitigation(void)
1069 {
1070 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off())
1071 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1072 }
1073
spectre_v1_apply_mitigation(void)1074 static void __init spectre_v1_apply_mitigation(void)
1075 {
1076 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off())
1077 return;
1078
1079 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
1080 /*
1081 * With Spectre v1, a user can speculatively control either
1082 * path of a conditional swapgs with a user-controlled GS
1083 * value. The mitigation is to add lfences to both code paths.
1084 *
1085 * If FSGSBASE is enabled, the user can put a kernel address in
1086 * GS, in which case SMAP provides no protection.
1087 *
1088 * If FSGSBASE is disabled, the user can only put a user space
1089 * address in GS. That makes an attack harder, but still
1090 * possible if there's no SMAP protection.
1091 */
1092 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
1093 !smap_works_speculatively()) {
1094 /*
1095 * Mitigation can be provided from SWAPGS itself or
1096 * PTI as the CR3 write in the Meltdown mitigation
1097 * is serializing.
1098 *
1099 * If neither is there, mitigate with an LFENCE to
1100 * stop speculation through swapgs.
1101 */
1102 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
1103 !boot_cpu_has(X86_FEATURE_PTI))
1104 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
1105
1106 /*
1107 * Enable lfences in the kernel entry (non-swapgs)
1108 * paths, to prevent user entry from speculatively
1109 * skipping swapgs.
1110 */
1111 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
1112 }
1113 }
1114
1115 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1116 }
1117
nospectre_v1_cmdline(char * str)1118 static int __init nospectre_v1_cmdline(char *str)
1119 {
1120 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
1121 return 0;
1122 }
1123 early_param("nospectre_v1", nospectre_v1_cmdline);
1124
1125 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
1126
1127 #undef pr_fmt
1128 #define pr_fmt(fmt) "RETBleed: " fmt
1129
1130 enum its_mitigation {
1131 ITS_MITIGATION_OFF,
1132 ITS_MITIGATION_AUTO,
1133 ITS_MITIGATION_VMEXIT_ONLY,
1134 ITS_MITIGATION_ALIGNED_THUNKS,
1135 ITS_MITIGATION_RETPOLINE_STUFF,
1136 };
1137
1138 static enum its_mitigation its_mitigation __ro_after_init =
1139 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF;
1140
1141 enum retbleed_mitigation {
1142 RETBLEED_MITIGATION_NONE,
1143 RETBLEED_MITIGATION_AUTO,
1144 RETBLEED_MITIGATION_UNRET,
1145 RETBLEED_MITIGATION_IBPB,
1146 RETBLEED_MITIGATION_IBRS,
1147 RETBLEED_MITIGATION_EIBRS,
1148 RETBLEED_MITIGATION_STUFF,
1149 };
1150
1151 static const char * const retbleed_strings[] = {
1152 [RETBLEED_MITIGATION_NONE] = "Vulnerable",
1153 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
1154 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
1155 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
1156 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
1157 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
1158 };
1159
1160 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
1161 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE;
1162
1163 static int __ro_after_init retbleed_nosmt = false;
1164
retbleed_parse_cmdline(char * str)1165 static int __init retbleed_parse_cmdline(char *str)
1166 {
1167 if (!str)
1168 return -EINVAL;
1169
1170 while (str) {
1171 char *next = strchr(str, ',');
1172 if (next) {
1173 *next = 0;
1174 next++;
1175 }
1176
1177 if (!strcmp(str, "off")) {
1178 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1179 } else if (!strcmp(str, "auto")) {
1180 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1181 } else if (!strcmp(str, "unret")) {
1182 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1183 } else if (!strcmp(str, "ibpb")) {
1184 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1185 } else if (!strcmp(str, "stuff")) {
1186 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1187 } else if (!strcmp(str, "nosmt")) {
1188 retbleed_nosmt = true;
1189 } else if (!strcmp(str, "force")) {
1190 setup_force_cpu_bug(X86_BUG_RETBLEED);
1191 } else {
1192 pr_err("Ignoring unknown retbleed option (%s).", str);
1193 }
1194
1195 str = next;
1196 }
1197
1198 return 0;
1199 }
1200 early_param("retbleed", retbleed_parse_cmdline);
1201
1202 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1203 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1204
retbleed_select_mitigation(void)1205 static void __init retbleed_select_mitigation(void)
1206 {
1207 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) {
1208 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1209 return;
1210 }
1211
1212 switch (retbleed_mitigation) {
1213 case RETBLEED_MITIGATION_UNRET:
1214 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1215 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1216 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1217 }
1218 break;
1219 case RETBLEED_MITIGATION_IBPB:
1220 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1221 pr_err("WARNING: CPU does not support IBPB.\n");
1222 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1223 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1224 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1225 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1226 }
1227 break;
1228 case RETBLEED_MITIGATION_STUFF:
1229 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1230 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1231 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1232 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1233 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n");
1234 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1235 }
1236 break;
1237 default:
1238 break;
1239 }
1240
1241 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO)
1242 return;
1243
1244 /* Intel mitigation selected in retbleed_update_mitigation() */
1245 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1246 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1247 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1248 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1249 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1250 boot_cpu_has(X86_FEATURE_IBPB))
1251 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1252 else
1253 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1254 }
1255 }
1256
retbleed_update_mitigation(void)1257 static void __init retbleed_update_mitigation(void)
1258 {
1259 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
1260 return;
1261
1262 if (retbleed_mitigation == RETBLEED_MITIGATION_NONE)
1263 goto out;
1264
1265 /*
1266 * retbleed=stuff is only allowed on Intel. If stuffing can't be used
1267 * then a different mitigation will be selected below.
1268 *
1269 * its=stuff will also attempt to enable stuffing.
1270 */
1271 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF ||
1272 its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) {
1273 if (spectre_v2_enabled != SPECTRE_V2_RETPOLINE) {
1274 pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
1275 retbleed_mitigation = RETBLEED_MITIGATION_AUTO;
1276 } else {
1277 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1278 pr_info("Retbleed mitigation updated to stuffing\n");
1279
1280 retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1281 }
1282 }
1283 /*
1284 * Let IBRS trump all on Intel without affecting the effects of the
1285 * retbleed= cmdline option except for call depth based stuffing
1286 */
1287 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1288 switch (spectre_v2_enabled) {
1289 case SPECTRE_V2_IBRS:
1290 retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1291 break;
1292 case SPECTRE_V2_EIBRS:
1293 case SPECTRE_V2_EIBRS_RETPOLINE:
1294 case SPECTRE_V2_EIBRS_LFENCE:
1295 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1296 break;
1297 default:
1298 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1299 pr_err(RETBLEED_INTEL_MSG);
1300 }
1301 /* If nothing has set the mitigation yet, default to NONE. */
1302 if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO)
1303 retbleed_mitigation = RETBLEED_MITIGATION_NONE;
1304 }
1305 out:
1306 pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1307 }
1308
1309
retbleed_apply_mitigation(void)1310 static void __init retbleed_apply_mitigation(void)
1311 {
1312 bool mitigate_smt = false;
1313
1314 switch (retbleed_mitigation) {
1315 case RETBLEED_MITIGATION_NONE:
1316 return;
1317
1318 case RETBLEED_MITIGATION_UNRET:
1319 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1320 setup_force_cpu_cap(X86_FEATURE_UNRET);
1321
1322 set_return_thunk(retbleed_return_thunk);
1323
1324 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1325 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1326 pr_err(RETBLEED_UNTRAIN_MSG);
1327
1328 mitigate_smt = true;
1329 break;
1330
1331 case RETBLEED_MITIGATION_IBPB:
1332 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1333 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1334 mitigate_smt = true;
1335
1336 /*
1337 * IBPB on entry already obviates the need for
1338 * software-based untraining so clear those in case some
1339 * other mitigation like SRSO has selected them.
1340 */
1341 setup_clear_cpu_cap(X86_FEATURE_UNRET);
1342 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
1343
1344 /*
1345 * There is no need for RSB filling: write_ibpb() ensures
1346 * all predictions, including the RSB, are invalidated,
1347 * regardless of IBPB implementation.
1348 */
1349 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1350
1351 break;
1352
1353 case RETBLEED_MITIGATION_STUFF:
1354 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1355 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1356
1357 set_return_thunk(call_depth_return_thunk);
1358 break;
1359
1360 default:
1361 break;
1362 }
1363
1364 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1365 (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
1366 cpu_smt_disable(false);
1367 }
1368
1369 #undef pr_fmt
1370 #define pr_fmt(fmt) "ITS: " fmt
1371
1372 static const char * const its_strings[] = {
1373 [ITS_MITIGATION_OFF] = "Vulnerable",
1374 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
1375 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
1376 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
1377 };
1378
its_parse_cmdline(char * str)1379 static int __init its_parse_cmdline(char *str)
1380 {
1381 if (!str)
1382 return -EINVAL;
1383
1384 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
1385 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
1386 return 0;
1387 }
1388
1389 if (!strcmp(str, "off")) {
1390 its_mitigation = ITS_MITIGATION_OFF;
1391 } else if (!strcmp(str, "on")) {
1392 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1393 } else if (!strcmp(str, "force")) {
1394 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1395 setup_force_cpu_bug(X86_BUG_ITS);
1396 } else if (!strcmp(str, "vmexit")) {
1397 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
1398 } else if (!strcmp(str, "stuff")) {
1399 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1400 } else {
1401 pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
1402 }
1403
1404 return 0;
1405 }
1406 early_param("indirect_target_selection", its_parse_cmdline);
1407
its_select_mitigation(void)1408 static void __init its_select_mitigation(void)
1409 {
1410 if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
1411 its_mitigation = ITS_MITIGATION_OFF;
1412 return;
1413 }
1414
1415 if (its_mitigation == ITS_MITIGATION_AUTO)
1416 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1417
1418 if (its_mitigation == ITS_MITIGATION_OFF)
1419 return;
1420
1421 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
1422 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
1423 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
1424 its_mitigation = ITS_MITIGATION_OFF;
1425 return;
1426 }
1427
1428 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
1429 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
1430 its_mitigation = ITS_MITIGATION_OFF;
1431 return;
1432 }
1433
1434 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1435 !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) {
1436 pr_err("RSB stuff mitigation not supported, using default\n");
1437 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1438 }
1439
1440 if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY &&
1441 !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY))
1442 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1443 }
1444
its_update_mitigation(void)1445 static void __init its_update_mitigation(void)
1446 {
1447 if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off())
1448 return;
1449
1450 switch (spectre_v2_enabled) {
1451 case SPECTRE_V2_NONE:
1452 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
1453 its_mitigation = ITS_MITIGATION_OFF;
1454 break;
1455 case SPECTRE_V2_RETPOLINE:
1456 /* Retpoline+CDT mitigates ITS */
1457 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF)
1458 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
1459 break;
1460 case SPECTRE_V2_LFENCE:
1461 case SPECTRE_V2_EIBRS_LFENCE:
1462 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
1463 its_mitigation = ITS_MITIGATION_OFF;
1464 break;
1465 default:
1466 break;
1467 }
1468
1469 /*
1470 * retbleed_update_mitigation() will try to do stuffing if its=stuff.
1471 * If it can't, such as if spectre_v2!=retpoline, then fall back to
1472 * aligned thunks.
1473 */
1474 if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF &&
1475 retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1476 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
1477
1478 pr_info("%s\n", its_strings[its_mitigation]);
1479 }
1480
its_apply_mitigation(void)1481 static void __init its_apply_mitigation(void)
1482 {
1483 /* its=stuff forces retbleed stuffing and is enabled there. */
1484 if (its_mitigation != ITS_MITIGATION_ALIGNED_THUNKS)
1485 return;
1486
1487 if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
1488 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
1489
1490 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1491 set_return_thunk(its_return_thunk);
1492 }
1493
1494 #undef pr_fmt
1495 #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
1496
1497 enum tsa_mitigations {
1498 TSA_MITIGATION_NONE,
1499 TSA_MITIGATION_AUTO,
1500 TSA_MITIGATION_UCODE_NEEDED,
1501 TSA_MITIGATION_USER_KERNEL,
1502 TSA_MITIGATION_VM,
1503 TSA_MITIGATION_FULL,
1504 };
1505
1506 static const char * const tsa_strings[] = {
1507 [TSA_MITIGATION_NONE] = "Vulnerable",
1508 [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
1509 [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
1510 [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
1511 [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
1512 };
1513
1514 static enum tsa_mitigations tsa_mitigation __ro_after_init =
1515 IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
1516
tsa_parse_cmdline(char * str)1517 static int __init tsa_parse_cmdline(char *str)
1518 {
1519 if (!str)
1520 return -EINVAL;
1521
1522 if (!strcmp(str, "off"))
1523 tsa_mitigation = TSA_MITIGATION_NONE;
1524 else if (!strcmp(str, "on"))
1525 tsa_mitigation = TSA_MITIGATION_FULL;
1526 else if (!strcmp(str, "user"))
1527 tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
1528 else if (!strcmp(str, "vm"))
1529 tsa_mitigation = TSA_MITIGATION_VM;
1530 else
1531 pr_err("Ignoring unknown tsa=%s option.\n", str);
1532
1533 return 0;
1534 }
1535 early_param("tsa", tsa_parse_cmdline);
1536
tsa_select_mitigation(void)1537 static void __init tsa_select_mitigation(void)
1538 {
1539 if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
1540 tsa_mitigation = TSA_MITIGATION_NONE;
1541 return;
1542 }
1543
1544 if (tsa_mitigation == TSA_MITIGATION_NONE)
1545 return;
1546
1547 if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) {
1548 tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
1549 goto out;
1550 }
1551
1552 if (tsa_mitigation == TSA_MITIGATION_AUTO)
1553 tsa_mitigation = TSA_MITIGATION_FULL;
1554
1555 /*
1556 * No need to set verw_clear_cpu_buf_mitigation_selected - it
1557 * doesn't fit all cases here and it is not needed because this
1558 * is the only VERW-based mitigation on AMD.
1559 */
1560 out:
1561 pr_info("%s\n", tsa_strings[tsa_mitigation]);
1562 }
1563
tsa_apply_mitigation(void)1564 static void __init tsa_apply_mitigation(void)
1565 {
1566 switch (tsa_mitigation) {
1567 case TSA_MITIGATION_USER_KERNEL:
1568 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1569 break;
1570 case TSA_MITIGATION_VM:
1571 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1572 break;
1573 case TSA_MITIGATION_FULL:
1574 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
1575 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
1576 break;
1577 default:
1578 break;
1579 }
1580 }
1581
1582 #undef pr_fmt
1583 #define pr_fmt(fmt) "Spectre V2 : " fmt
1584
1585 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1586 SPECTRE_V2_USER_NONE;
1587 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1588 SPECTRE_V2_USER_NONE;
1589
1590 #ifdef CONFIG_MITIGATION_RETPOLINE
1591 static bool spectre_v2_bad_module;
1592
retpoline_module_ok(bool has_retpoline)1593 bool retpoline_module_ok(bool has_retpoline)
1594 {
1595 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1596 return true;
1597
1598 pr_err("System may be vulnerable to spectre v2\n");
1599 spectre_v2_bad_module = true;
1600 return false;
1601 }
1602
spectre_v2_module_string(void)1603 static inline const char *spectre_v2_module_string(void)
1604 {
1605 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1606 }
1607 #else
spectre_v2_module_string(void)1608 static inline const char *spectre_v2_module_string(void) { return ""; }
1609 #endif
1610
1611 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1612 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1613 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1614 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1615
1616 #ifdef CONFIG_BPF_SYSCALL
unpriv_ebpf_notify(int new_state)1617 void unpriv_ebpf_notify(int new_state)
1618 {
1619 if (new_state)
1620 return;
1621
1622 /* Unprivileged eBPF is enabled */
1623
1624 switch (spectre_v2_enabled) {
1625 case SPECTRE_V2_EIBRS:
1626 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1627 break;
1628 case SPECTRE_V2_EIBRS_LFENCE:
1629 if (sched_smt_active())
1630 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1631 break;
1632 default:
1633 break;
1634 }
1635 }
1636 #endif
1637
match_option(const char * arg,int arglen,const char * opt)1638 static inline bool match_option(const char *arg, int arglen, const char *opt)
1639 {
1640 int len = strlen(opt);
1641
1642 return len == arglen && !strncmp(arg, opt, len);
1643 }
1644
1645 /* The kernel command line selection for spectre v2 */
1646 enum spectre_v2_mitigation_cmd {
1647 SPECTRE_V2_CMD_NONE,
1648 SPECTRE_V2_CMD_AUTO,
1649 SPECTRE_V2_CMD_FORCE,
1650 SPECTRE_V2_CMD_RETPOLINE,
1651 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1652 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1653 SPECTRE_V2_CMD_EIBRS,
1654 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1655 SPECTRE_V2_CMD_EIBRS_LFENCE,
1656 SPECTRE_V2_CMD_IBRS,
1657 };
1658
1659 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO;
1660
1661 enum spectre_v2_user_cmd {
1662 SPECTRE_V2_USER_CMD_NONE,
1663 SPECTRE_V2_USER_CMD_AUTO,
1664 SPECTRE_V2_USER_CMD_FORCE,
1665 SPECTRE_V2_USER_CMD_PRCTL,
1666 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1667 SPECTRE_V2_USER_CMD_SECCOMP,
1668 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1669 };
1670
1671 static const char * const spectre_v2_user_strings[] = {
1672 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
1673 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
1674 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
1675 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
1676 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
1677 };
1678
1679 static const struct {
1680 const char *option;
1681 enum spectre_v2_user_cmd cmd;
1682 bool secure;
1683 } v2_user_options[] __initconst = {
1684 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
1685 { "off", SPECTRE_V2_USER_CMD_NONE, false },
1686 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
1687 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
1688 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
1689 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
1690 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
1691 };
1692
spec_v2_user_print_cond(const char * reason,bool secure)1693 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1694 {
1695 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1696 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1697 }
1698
spectre_v2_parse_user_cmdline(void)1699 static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void)
1700 {
1701 char arg[20];
1702 int ret, i;
1703
1704 if (cpu_mitigations_off() || !IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2))
1705 return SPECTRE_V2_USER_CMD_NONE;
1706
1707 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1708 arg, sizeof(arg));
1709 if (ret < 0)
1710 return SPECTRE_V2_USER_CMD_AUTO;
1711
1712 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1713 if (match_option(arg, ret, v2_user_options[i].option)) {
1714 spec_v2_user_print_cond(v2_user_options[i].option,
1715 v2_user_options[i].secure);
1716 return v2_user_options[i].cmd;
1717 }
1718 }
1719
1720 pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
1721 return SPECTRE_V2_USER_CMD_AUTO;
1722 }
1723
spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)1724 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1725 {
1726 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1727 }
1728
spectre_v2_user_select_mitigation(void)1729 static void __init spectre_v2_user_select_mitigation(void)
1730 {
1731 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1732 return;
1733
1734 switch (spectre_v2_parse_user_cmdline()) {
1735 case SPECTRE_V2_USER_CMD_NONE:
1736 return;
1737 case SPECTRE_V2_USER_CMD_FORCE:
1738 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1739 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1740 break;
1741 case SPECTRE_V2_USER_CMD_AUTO:
1742 case SPECTRE_V2_USER_CMD_PRCTL:
1743 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1744 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1745 break;
1746 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1747 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1748 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1749 break;
1750 case SPECTRE_V2_USER_CMD_SECCOMP:
1751 if (IS_ENABLED(CONFIG_SECCOMP))
1752 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP;
1753 else
1754 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL;
1755 spectre_v2_user_stibp = spectre_v2_user_ibpb;
1756 break;
1757 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1758 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1759 if (IS_ENABLED(CONFIG_SECCOMP))
1760 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP;
1761 else
1762 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL;
1763 break;
1764 }
1765
1766 /*
1767 * At this point, an STIBP mode other than "off" has been set.
1768 * If STIBP support is not being forced, check if STIBP always-on
1769 * is preferred.
1770 */
1771 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1772 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) &&
1773 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1774 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1775
1776 if (!boot_cpu_has(X86_FEATURE_IBPB))
1777 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1778
1779 if (!boot_cpu_has(X86_FEATURE_STIBP))
1780 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1781 }
1782
spectre_v2_user_update_mitigation(void)1783 static void __init spectre_v2_user_update_mitigation(void)
1784 {
1785 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1786 return;
1787
1788 /* The spectre_v2 cmd line can override spectre_v2_user options */
1789 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) {
1790 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE;
1791 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1792 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) {
1793 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1794 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT;
1795 }
1796
1797 /*
1798 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1799 * is not required.
1800 *
1801 * Intel's Enhanced IBRS also protects against cross-thread branch target
1802 * injection in user-mode as the IBRS bit remains always set which
1803 * implicitly enables cross-thread protections. However, in legacy IBRS
1804 * mode, the IBRS bit is set only on kernel entry and cleared on return
1805 * to userspace. AMD Automatic IBRS also does not protect userspace.
1806 * These modes therefore disable the implicit cross-thread protection,
1807 * so allow for STIBP to be selected in those cases.
1808 */
1809 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1810 !cpu_smt_possible() ||
1811 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1812 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) {
1813 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE;
1814 return;
1815 }
1816
1817 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE &&
1818 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1819 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) {
1820 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT &&
1821 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED)
1822 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1823 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED;
1824 }
1825 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]);
1826 }
1827
spectre_v2_user_apply_mitigation(void)1828 static void __init spectre_v2_user_apply_mitigation(void)
1829 {
1830 /* Initialize Indirect Branch Prediction Barrier */
1831 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) {
1832 static_branch_enable(&switch_vcpu_ibpb);
1833
1834 switch (spectre_v2_user_ibpb) {
1835 case SPECTRE_V2_USER_STRICT:
1836 static_branch_enable(&switch_mm_always_ibpb);
1837 break;
1838 case SPECTRE_V2_USER_PRCTL:
1839 case SPECTRE_V2_USER_SECCOMP:
1840 static_branch_enable(&switch_mm_cond_ibpb);
1841 break;
1842 default:
1843 break;
1844 }
1845
1846 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1847 static_key_enabled(&switch_mm_always_ibpb) ?
1848 "always-on" : "conditional");
1849 }
1850 }
1851
1852 static const char * const spectre_v2_strings[] = {
1853 [SPECTRE_V2_NONE] = "Vulnerable",
1854 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
1855 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
1856 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
1857 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1858 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1859 [SPECTRE_V2_IBRS] = "Mitigation: IBRS",
1860 };
1861
1862 static const struct {
1863 const char *option;
1864 enum spectre_v2_mitigation_cmd cmd;
1865 bool secure;
1866 } mitigation_options[] __initconst = {
1867 { "off", SPECTRE_V2_CMD_NONE, false },
1868 { "on", SPECTRE_V2_CMD_FORCE, true },
1869 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
1870 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
1871 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
1872 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1873 { "eibrs", SPECTRE_V2_CMD_EIBRS, false },
1874 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
1875 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
1876 { "auto", SPECTRE_V2_CMD_AUTO, false },
1877 { "ibrs", SPECTRE_V2_CMD_IBRS, false },
1878 };
1879
spec_v2_print_cond(const char * reason,bool secure)1880 static void __init spec_v2_print_cond(const char *reason, bool secure)
1881 {
1882 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1883 pr_info("%s selected on command line.\n", reason);
1884 }
1885
spectre_v2_parse_cmdline(void)1886 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1887 {
1888 enum spectre_v2_mitigation_cmd cmd;
1889 char arg[20];
1890 int ret, i;
1891
1892 cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
1893 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1894 cpu_mitigations_off())
1895 return SPECTRE_V2_CMD_NONE;
1896
1897 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1898 if (ret < 0)
1899 return cmd;
1900
1901 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1902 if (!match_option(arg, ret, mitigation_options[i].option))
1903 continue;
1904 cmd = mitigation_options[i].cmd;
1905 break;
1906 }
1907
1908 if (i >= ARRAY_SIZE(mitigation_options)) {
1909 pr_err("unknown option (%s). Switching to default mode\n", arg);
1910 return cmd;
1911 }
1912
1913 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1914 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1915 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1916 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1917 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1918 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1919 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1920 mitigation_options[i].option);
1921 return SPECTRE_V2_CMD_AUTO;
1922 }
1923
1924 if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1925 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1926 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1927 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1928 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
1929 mitigation_options[i].option);
1930 return SPECTRE_V2_CMD_AUTO;
1931 }
1932
1933 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1934 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1935 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1936 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1937 mitigation_options[i].option);
1938 return SPECTRE_V2_CMD_AUTO;
1939 }
1940
1941 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
1942 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1943 mitigation_options[i].option);
1944 return SPECTRE_V2_CMD_AUTO;
1945 }
1946
1947 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1948 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1949 mitigation_options[i].option);
1950 return SPECTRE_V2_CMD_AUTO;
1951 }
1952
1953 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
1954 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1955 mitigation_options[i].option);
1956 return SPECTRE_V2_CMD_AUTO;
1957 }
1958
1959 if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
1960 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1961 mitigation_options[i].option);
1962 return SPECTRE_V2_CMD_AUTO;
1963 }
1964
1965 spec_v2_print_cond(mitigation_options[i].option,
1966 mitigation_options[i].secure);
1967 return cmd;
1968 }
1969
spectre_v2_select_retpoline(void)1970 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1971 {
1972 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1973 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1974 return SPECTRE_V2_NONE;
1975 }
1976
1977 return SPECTRE_V2_RETPOLINE;
1978 }
1979
1980 static bool __ro_after_init rrsba_disabled;
1981
1982 /* Disable in-kernel use of non-RSB RET predictors */
spec_ctrl_disable_kernel_rrsba(void)1983 static void __init spec_ctrl_disable_kernel_rrsba(void)
1984 {
1985 if (rrsba_disabled)
1986 return;
1987
1988 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
1989 rrsba_disabled = true;
1990 return;
1991 }
1992
1993 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1994 return;
1995
1996 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1997 update_spec_ctrl(x86_spec_ctrl_base);
1998 rrsba_disabled = true;
1999 }
2000
spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)2001 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
2002 {
2003 /*
2004 * WARNING! There are many subtleties to consider when changing *any*
2005 * code related to RSB-related mitigations. Before doing so, carefully
2006 * read the following document, and update if necessary:
2007 *
2008 * Documentation/admin-guide/hw-vuln/rsb.rst
2009 *
2010 * In an overly simplified nutshell:
2011 *
2012 * - User->user RSB attacks are conditionally mitigated during
2013 * context switches by cond_mitigation -> write_ibpb().
2014 *
2015 * - User->kernel and guest->host attacks are mitigated by eIBRS or
2016 * RSB filling.
2017 *
2018 * Though, depending on config, note that other alternative
2019 * mitigations may end up getting used instead, e.g., IBPB on
2020 * entry/vmexit, call depth tracking, or return thunks.
2021 */
2022
2023 switch (mode) {
2024 case SPECTRE_V2_NONE:
2025 break;
2026
2027 case SPECTRE_V2_EIBRS:
2028 case SPECTRE_V2_EIBRS_LFENCE:
2029 case SPECTRE_V2_EIBRS_RETPOLINE:
2030 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2031 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
2032 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
2033 }
2034 break;
2035
2036 case SPECTRE_V2_RETPOLINE:
2037 case SPECTRE_V2_LFENCE:
2038 case SPECTRE_V2_IBRS:
2039 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
2040 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
2041 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
2042 break;
2043
2044 default:
2045 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
2046 dump_stack();
2047 break;
2048 }
2049 }
2050
2051 /*
2052 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
2053 * branch history in userspace. Not needed if BHI_NO is set.
2054 */
spec_ctrl_bhi_dis(void)2055 static bool __init spec_ctrl_bhi_dis(void)
2056 {
2057 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL))
2058 return false;
2059
2060 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S;
2061 update_spec_ctrl(x86_spec_ctrl_base);
2062 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW);
2063
2064 return true;
2065 }
2066
2067 enum bhi_mitigations {
2068 BHI_MITIGATION_OFF,
2069 BHI_MITIGATION_AUTO,
2070 BHI_MITIGATION_ON,
2071 BHI_MITIGATION_VMEXIT_ONLY,
2072 };
2073
2074 static enum bhi_mitigations bhi_mitigation __ro_after_init =
2075 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF;
2076
spectre_bhi_parse_cmdline(char * str)2077 static int __init spectre_bhi_parse_cmdline(char *str)
2078 {
2079 if (!str)
2080 return -EINVAL;
2081
2082 if (!strcmp(str, "off"))
2083 bhi_mitigation = BHI_MITIGATION_OFF;
2084 else if (!strcmp(str, "on"))
2085 bhi_mitigation = BHI_MITIGATION_ON;
2086 else if (!strcmp(str, "vmexit"))
2087 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
2088 else
2089 pr_err("Ignoring unknown spectre_bhi option (%s)", str);
2090
2091 return 0;
2092 }
2093 early_param("spectre_bhi", spectre_bhi_parse_cmdline);
2094
bhi_select_mitigation(void)2095 static void __init bhi_select_mitigation(void)
2096 {
2097 if (!boot_cpu_has(X86_BUG_BHI) || cpu_mitigations_off())
2098 bhi_mitigation = BHI_MITIGATION_OFF;
2099
2100 if (bhi_mitigation == BHI_MITIGATION_AUTO)
2101 bhi_mitigation = BHI_MITIGATION_ON;
2102 }
2103
bhi_update_mitigation(void)2104 static void __init bhi_update_mitigation(void)
2105 {
2106 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE)
2107 bhi_mitigation = BHI_MITIGATION_OFF;
2108
2109 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2110 spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)
2111 bhi_mitigation = BHI_MITIGATION_OFF;
2112 }
2113
bhi_apply_mitigation(void)2114 static void __init bhi_apply_mitigation(void)
2115 {
2116 if (bhi_mitigation == BHI_MITIGATION_OFF)
2117 return;
2118
2119 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
2120 if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2121 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
2122 spec_ctrl_disable_kernel_rrsba();
2123 if (rrsba_disabled)
2124 return;
2125 }
2126
2127 if (!IS_ENABLED(CONFIG_X86_64))
2128 return;
2129
2130 /* Mitigate in hardware if supported */
2131 if (spec_ctrl_bhi_dis())
2132 return;
2133
2134 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
2135 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
2136 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2137 return;
2138 }
2139
2140 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
2141 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
2142 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT);
2143 }
2144
spectre_v2_select_mitigation(void)2145 static void __init spectre_v2_select_mitigation(void)
2146 {
2147 spectre_v2_cmd = spectre_v2_parse_cmdline();
2148
2149 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
2150 (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO))
2151 return;
2152
2153 switch (spectre_v2_cmd) {
2154 case SPECTRE_V2_CMD_NONE:
2155 return;
2156
2157 case SPECTRE_V2_CMD_FORCE:
2158 case SPECTRE_V2_CMD_AUTO:
2159 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
2160 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2161 break;
2162 }
2163
2164 spectre_v2_enabled = spectre_v2_select_retpoline();
2165 break;
2166
2167 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
2168 pr_err(SPECTRE_V2_LFENCE_MSG);
2169 spectre_v2_enabled = SPECTRE_V2_LFENCE;
2170 break;
2171
2172 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
2173 spectre_v2_enabled = SPECTRE_V2_RETPOLINE;
2174 break;
2175
2176 case SPECTRE_V2_CMD_RETPOLINE:
2177 spectre_v2_enabled = spectre_v2_select_retpoline();
2178 break;
2179
2180 case SPECTRE_V2_CMD_IBRS:
2181 spectre_v2_enabled = SPECTRE_V2_IBRS;
2182 break;
2183
2184 case SPECTRE_V2_CMD_EIBRS:
2185 spectre_v2_enabled = SPECTRE_V2_EIBRS;
2186 break;
2187
2188 case SPECTRE_V2_CMD_EIBRS_LFENCE:
2189 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE;
2190 break;
2191
2192 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
2193 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE;
2194 break;
2195 }
2196 }
2197
spectre_v2_update_mitigation(void)2198 static void __init spectre_v2_update_mitigation(void)
2199 {
2200 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO &&
2201 !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) {
2202 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
2203 boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2204 retbleed_mitigation != RETBLEED_MITIGATION_NONE &&
2205 retbleed_mitigation != RETBLEED_MITIGATION_STUFF &&
2206 boot_cpu_has(X86_FEATURE_IBRS) &&
2207 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
2208 spectre_v2_enabled = SPECTRE_V2_IBRS;
2209 }
2210 }
2211
2212 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && !cpu_mitigations_off())
2213 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]);
2214 }
2215
spectre_v2_apply_mitigation(void)2216 static void __init spectre_v2_apply_mitigation(void)
2217 {
2218 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2219 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
2220
2221 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2222 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
2223 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
2224 } else {
2225 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
2226 update_spec_ctrl(x86_spec_ctrl_base);
2227 }
2228 }
2229
2230 switch (spectre_v2_enabled) {
2231 case SPECTRE_V2_NONE:
2232 return;
2233
2234 case SPECTRE_V2_EIBRS:
2235 break;
2236
2237 case SPECTRE_V2_IBRS:
2238 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
2239 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
2240 pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
2241 break;
2242
2243 case SPECTRE_V2_LFENCE:
2244 case SPECTRE_V2_EIBRS_LFENCE:
2245 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
2246 fallthrough;
2247
2248 case SPECTRE_V2_RETPOLINE:
2249 case SPECTRE_V2_EIBRS_RETPOLINE:
2250 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
2251 break;
2252 }
2253
2254 /*
2255 * Disable alternate RSB predictions in kernel when indirect CALLs and
2256 * JMPs gets protection against BHI and Intramode-BTI, but RET
2257 * prediction from a non-RSB predictor is still a risk.
2258 */
2259 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE ||
2260 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE ||
2261 spectre_v2_enabled == SPECTRE_V2_RETPOLINE)
2262 spec_ctrl_disable_kernel_rrsba();
2263
2264 spectre_v2_select_rsb_mitigation(spectre_v2_enabled);
2265
2266 /*
2267 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
2268 * and Enhanced IBRS protect firmware too, so enable IBRS around
2269 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
2270 * otherwise enabled.
2271 *
2272 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of
2273 * boot_cpu_has(), because the user might select retpoline on the kernel
2274 * command line and if the CPU supports Enhanced IBRS, kernel might
2275 * un-intentionally not enable IBRS around firmware calls.
2276 */
2277 if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
2278 boot_cpu_has(X86_FEATURE_IBPB) &&
2279 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2280 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
2281
2282 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) {
2283 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
2284 pr_info("Enabling Speculation Barrier for firmware calls\n");
2285 }
2286
2287 } else if (boot_cpu_has(X86_FEATURE_IBRS) &&
2288 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) {
2289 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2290 pr_info("Enabling Restricted Speculation for firmware calls\n");
2291 }
2292 }
2293
update_stibp_msr(void * __unused)2294 static void update_stibp_msr(void * __unused)
2295 {
2296 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
2297 update_spec_ctrl(val);
2298 }
2299
2300 /* Update x86_spec_ctrl_base in case SMT state changed. */
update_stibp_strict(void)2301 static void update_stibp_strict(void)
2302 {
2303 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
2304
2305 if (sched_smt_active())
2306 mask |= SPEC_CTRL_STIBP;
2307
2308 if (mask == x86_spec_ctrl_base)
2309 return;
2310
2311 pr_info("Update user space SMT mitigation: STIBP %s\n",
2312 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
2313 x86_spec_ctrl_base = mask;
2314 on_each_cpu(update_stibp_msr, NULL, 1);
2315 }
2316
2317 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
update_indir_branch_cond(void)2318 static void update_indir_branch_cond(void)
2319 {
2320 if (sched_smt_active())
2321 static_branch_enable(&switch_to_cond_stibp);
2322 else
2323 static_branch_disable(&switch_to_cond_stibp);
2324 }
2325
2326 #undef pr_fmt
2327 #define pr_fmt(fmt) fmt
2328
2329 /* Update the static key controlling the MDS CPU buffer clear in idle */
update_mds_branch_idle(void)2330 static void update_mds_branch_idle(void)
2331 {
2332 /*
2333 * Enable the idle clearing if SMT is active on CPUs which are
2334 * affected only by MSBDS and not any other MDS variant.
2335 *
2336 * The other variants cannot be mitigated when SMT is enabled, so
2337 * clearing the buffers on idle just to prevent the Store Buffer
2338 * repartitioning leak would be a window dressing exercise.
2339 */
2340 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
2341 return;
2342
2343 if (sched_smt_active()) {
2344 static_branch_enable(&cpu_buf_idle_clear);
2345 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
2346 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2347 static_branch_disable(&cpu_buf_idle_clear);
2348 }
2349 }
2350
2351 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
2352 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
2353 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
2354
cpu_bugs_smt_update(void)2355 void cpu_bugs_smt_update(void)
2356 {
2357 mutex_lock(&spec_ctrl_mutex);
2358
2359 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2360 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2361 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
2362
2363 switch (spectre_v2_user_stibp) {
2364 case SPECTRE_V2_USER_NONE:
2365 break;
2366 case SPECTRE_V2_USER_STRICT:
2367 case SPECTRE_V2_USER_STRICT_PREFERRED:
2368 update_stibp_strict();
2369 break;
2370 case SPECTRE_V2_USER_PRCTL:
2371 case SPECTRE_V2_USER_SECCOMP:
2372 update_indir_branch_cond();
2373 break;
2374 }
2375
2376 switch (mds_mitigation) {
2377 case MDS_MITIGATION_FULL:
2378 case MDS_MITIGATION_AUTO:
2379 case MDS_MITIGATION_VMWERV:
2380 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
2381 pr_warn_once(MDS_MSG_SMT);
2382 update_mds_branch_idle();
2383 break;
2384 case MDS_MITIGATION_OFF:
2385 break;
2386 }
2387
2388 switch (taa_mitigation) {
2389 case TAA_MITIGATION_VERW:
2390 case TAA_MITIGATION_AUTO:
2391 case TAA_MITIGATION_UCODE_NEEDED:
2392 if (sched_smt_active())
2393 pr_warn_once(TAA_MSG_SMT);
2394 break;
2395 case TAA_MITIGATION_TSX_DISABLED:
2396 case TAA_MITIGATION_OFF:
2397 break;
2398 }
2399
2400 switch (mmio_mitigation) {
2401 case MMIO_MITIGATION_VERW:
2402 case MMIO_MITIGATION_AUTO:
2403 case MMIO_MITIGATION_UCODE_NEEDED:
2404 if (sched_smt_active())
2405 pr_warn_once(MMIO_MSG_SMT);
2406 break;
2407 case MMIO_MITIGATION_OFF:
2408 break;
2409 }
2410
2411 switch (tsa_mitigation) {
2412 case TSA_MITIGATION_USER_KERNEL:
2413 case TSA_MITIGATION_VM:
2414 case TSA_MITIGATION_AUTO:
2415 case TSA_MITIGATION_FULL:
2416 /*
2417 * TSA-SQ can potentially lead to info leakage between
2418 * SMT threads.
2419 */
2420 if (sched_smt_active())
2421 static_branch_enable(&cpu_buf_idle_clear);
2422 else
2423 static_branch_disable(&cpu_buf_idle_clear);
2424 break;
2425 case TSA_MITIGATION_NONE:
2426 case TSA_MITIGATION_UCODE_NEEDED:
2427 break;
2428 }
2429
2430 mutex_unlock(&spec_ctrl_mutex);
2431 }
2432
2433 #undef pr_fmt
2434 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
2435
2436 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
2437
2438 /* The kernel command line selection */
2439 enum ssb_mitigation_cmd {
2440 SPEC_STORE_BYPASS_CMD_NONE,
2441 SPEC_STORE_BYPASS_CMD_AUTO,
2442 SPEC_STORE_BYPASS_CMD_ON,
2443 SPEC_STORE_BYPASS_CMD_PRCTL,
2444 SPEC_STORE_BYPASS_CMD_SECCOMP,
2445 };
2446
2447 static const char * const ssb_strings[] = {
2448 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
2449 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
2450 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
2451 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2452 };
2453
2454 static const struct {
2455 const char *option;
2456 enum ssb_mitigation_cmd cmd;
2457 } ssb_mitigation_options[] __initconst = {
2458 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
2459 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
2460 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
2461 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
2462 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
2463 };
2464
ssb_parse_cmdline(void)2465 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
2466 {
2467 enum ssb_mitigation_cmd cmd;
2468 char arg[20];
2469 int ret, i;
2470
2471 cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ?
2472 SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE;
2473 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
2474 cpu_mitigations_off()) {
2475 return SPEC_STORE_BYPASS_CMD_NONE;
2476 } else {
2477 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
2478 arg, sizeof(arg));
2479 if (ret < 0)
2480 return cmd;
2481
2482 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
2483 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
2484 continue;
2485
2486 cmd = ssb_mitigation_options[i].cmd;
2487 break;
2488 }
2489
2490 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
2491 pr_err("unknown option (%s). Switching to default mode\n", arg);
2492 return cmd;
2493 }
2494 }
2495
2496 return cmd;
2497 }
2498
ssb_select_mitigation(void)2499 static void __init ssb_select_mitigation(void)
2500 {
2501 enum ssb_mitigation_cmd cmd;
2502
2503 if (!boot_cpu_has(X86_FEATURE_SSBD))
2504 goto out;
2505
2506 cmd = ssb_parse_cmdline();
2507 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
2508 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
2509 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
2510 return;
2511
2512 switch (cmd) {
2513 case SPEC_STORE_BYPASS_CMD_SECCOMP:
2514 /*
2515 * Choose prctl+seccomp as the default mode if seccomp is
2516 * enabled.
2517 */
2518 if (IS_ENABLED(CONFIG_SECCOMP))
2519 ssb_mode = SPEC_STORE_BYPASS_SECCOMP;
2520 else
2521 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2522 break;
2523 case SPEC_STORE_BYPASS_CMD_ON:
2524 ssb_mode = SPEC_STORE_BYPASS_DISABLE;
2525 break;
2526 case SPEC_STORE_BYPASS_CMD_AUTO:
2527 case SPEC_STORE_BYPASS_CMD_PRCTL:
2528 ssb_mode = SPEC_STORE_BYPASS_PRCTL;
2529 break;
2530 case SPEC_STORE_BYPASS_CMD_NONE:
2531 break;
2532 }
2533
2534 out:
2535 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2536 pr_info("%s\n", ssb_strings[ssb_mode]);
2537 }
2538
ssb_apply_mitigation(void)2539 static void __init ssb_apply_mitigation(void)
2540 {
2541 /*
2542 * We have three CPU feature flags that are in play here:
2543 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2544 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2545 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2546 */
2547 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) {
2548 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2549 /*
2550 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2551 * use a completely different MSR and bit dependent on family.
2552 */
2553 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2554 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2555 x86_amd_ssb_disable();
2556 } else {
2557 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2558 update_spec_ctrl(x86_spec_ctrl_base);
2559 }
2560 }
2561 }
2562
2563 #undef pr_fmt
2564 #define pr_fmt(fmt) "Speculation prctl: " fmt
2565
task_update_spec_tif(struct task_struct * tsk)2566 static void task_update_spec_tif(struct task_struct *tsk)
2567 {
2568 /* Force the update of the real TIF bits */
2569 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2570
2571 /*
2572 * Immediately update the speculation control MSRs for the current
2573 * task, but for a non-current task delay setting the CPU
2574 * mitigation until it is scheduled next.
2575 *
2576 * This can only happen for SECCOMP mitigation. For PRCTL it's
2577 * always the current task.
2578 */
2579 if (tsk == current)
2580 speculation_ctrl_update_current();
2581 }
2582
l1d_flush_prctl_set(struct task_struct * task,unsigned long ctrl)2583 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2584 {
2585
2586 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2587 return -EPERM;
2588
2589 switch (ctrl) {
2590 case PR_SPEC_ENABLE:
2591 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2592 return 0;
2593 case PR_SPEC_DISABLE:
2594 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2595 return 0;
2596 default:
2597 return -ERANGE;
2598 }
2599 }
2600
ssb_prctl_set(struct task_struct * task,unsigned long ctrl)2601 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2602 {
2603 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2604 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2605 return -ENXIO;
2606
2607 switch (ctrl) {
2608 case PR_SPEC_ENABLE:
2609 /* If speculation is force disabled, enable is not allowed */
2610 if (task_spec_ssb_force_disable(task))
2611 return -EPERM;
2612 task_clear_spec_ssb_disable(task);
2613 task_clear_spec_ssb_noexec(task);
2614 task_update_spec_tif(task);
2615 break;
2616 case PR_SPEC_DISABLE:
2617 task_set_spec_ssb_disable(task);
2618 task_clear_spec_ssb_noexec(task);
2619 task_update_spec_tif(task);
2620 break;
2621 case PR_SPEC_FORCE_DISABLE:
2622 task_set_spec_ssb_disable(task);
2623 task_set_spec_ssb_force_disable(task);
2624 task_clear_spec_ssb_noexec(task);
2625 task_update_spec_tif(task);
2626 break;
2627 case PR_SPEC_DISABLE_NOEXEC:
2628 if (task_spec_ssb_force_disable(task))
2629 return -EPERM;
2630 task_set_spec_ssb_disable(task);
2631 task_set_spec_ssb_noexec(task);
2632 task_update_spec_tif(task);
2633 break;
2634 default:
2635 return -ERANGE;
2636 }
2637 return 0;
2638 }
2639
is_spec_ib_user_controlled(void)2640 static bool is_spec_ib_user_controlled(void)
2641 {
2642 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2643 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2644 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2645 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2646 }
2647
ib_prctl_set(struct task_struct * task,unsigned long ctrl)2648 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2649 {
2650 switch (ctrl) {
2651 case PR_SPEC_ENABLE:
2652 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2653 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2654 return 0;
2655
2656 /*
2657 * With strict mode for both IBPB and STIBP, the instruction
2658 * code paths avoid checking this task flag and instead,
2659 * unconditionally run the instruction. However, STIBP and IBPB
2660 * are independent and either can be set to conditionally
2661 * enabled regardless of the mode of the other.
2662 *
2663 * If either is set to conditional, allow the task flag to be
2664 * updated, unless it was force-disabled by a previous prctl
2665 * call. Currently, this is possible on an AMD CPU which has the
2666 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2667 * kernel is booted with 'spectre_v2_user=seccomp', then
2668 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2669 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2670 */
2671 if (!is_spec_ib_user_controlled() ||
2672 task_spec_ib_force_disable(task))
2673 return -EPERM;
2674
2675 task_clear_spec_ib_disable(task);
2676 task_update_spec_tif(task);
2677 break;
2678 case PR_SPEC_DISABLE:
2679 case PR_SPEC_FORCE_DISABLE:
2680 /*
2681 * Indirect branch speculation is always allowed when
2682 * mitigation is force disabled.
2683 */
2684 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2685 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2686 return -EPERM;
2687
2688 if (!is_spec_ib_user_controlled())
2689 return 0;
2690
2691 task_set_spec_ib_disable(task);
2692 if (ctrl == PR_SPEC_FORCE_DISABLE)
2693 task_set_spec_ib_force_disable(task);
2694 task_update_spec_tif(task);
2695 if (task == current)
2696 indirect_branch_prediction_barrier();
2697 break;
2698 default:
2699 return -ERANGE;
2700 }
2701 return 0;
2702 }
2703
arch_prctl_spec_ctrl_set(struct task_struct * task,unsigned long which,unsigned long ctrl)2704 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2705 unsigned long ctrl)
2706 {
2707 switch (which) {
2708 case PR_SPEC_STORE_BYPASS:
2709 return ssb_prctl_set(task, ctrl);
2710 case PR_SPEC_INDIRECT_BRANCH:
2711 return ib_prctl_set(task, ctrl);
2712 case PR_SPEC_L1D_FLUSH:
2713 return l1d_flush_prctl_set(task, ctrl);
2714 default:
2715 return -ENODEV;
2716 }
2717 }
2718
2719 #ifdef CONFIG_SECCOMP
arch_seccomp_spec_mitigate(struct task_struct * task)2720 void arch_seccomp_spec_mitigate(struct task_struct *task)
2721 {
2722 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2723 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2724 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2725 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2726 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2727 }
2728 #endif
2729
l1d_flush_prctl_get(struct task_struct * task)2730 static int l1d_flush_prctl_get(struct task_struct *task)
2731 {
2732 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2733 return PR_SPEC_FORCE_DISABLE;
2734
2735 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2736 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2737 else
2738 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2739 }
2740
ssb_prctl_get(struct task_struct * task)2741 static int ssb_prctl_get(struct task_struct *task)
2742 {
2743 switch (ssb_mode) {
2744 case SPEC_STORE_BYPASS_NONE:
2745 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2746 return PR_SPEC_ENABLE;
2747 return PR_SPEC_NOT_AFFECTED;
2748 case SPEC_STORE_BYPASS_DISABLE:
2749 return PR_SPEC_DISABLE;
2750 case SPEC_STORE_BYPASS_SECCOMP:
2751 case SPEC_STORE_BYPASS_PRCTL:
2752 if (task_spec_ssb_force_disable(task))
2753 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2754 if (task_spec_ssb_noexec(task))
2755 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2756 if (task_spec_ssb_disable(task))
2757 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2758 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2759 }
2760 BUG();
2761 }
2762
ib_prctl_get(struct task_struct * task)2763 static int ib_prctl_get(struct task_struct *task)
2764 {
2765 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2766 return PR_SPEC_NOT_AFFECTED;
2767
2768 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2769 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2770 return PR_SPEC_ENABLE;
2771 else if (is_spec_ib_user_controlled()) {
2772 if (task_spec_ib_force_disable(task))
2773 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2774 if (task_spec_ib_disable(task))
2775 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2776 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2777 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2778 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2779 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2780 return PR_SPEC_DISABLE;
2781 else
2782 return PR_SPEC_NOT_AFFECTED;
2783 }
2784
arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)2785 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2786 {
2787 switch (which) {
2788 case PR_SPEC_STORE_BYPASS:
2789 return ssb_prctl_get(task);
2790 case PR_SPEC_INDIRECT_BRANCH:
2791 return ib_prctl_get(task);
2792 case PR_SPEC_L1D_FLUSH:
2793 return l1d_flush_prctl_get(task);
2794 default:
2795 return -ENODEV;
2796 }
2797 }
2798
x86_spec_ctrl_setup_ap(void)2799 void x86_spec_ctrl_setup_ap(void)
2800 {
2801 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2802 update_spec_ctrl(x86_spec_ctrl_base);
2803
2804 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2805 x86_amd_ssb_disable();
2806 }
2807
2808 bool itlb_multihit_kvm_mitigation;
2809 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2810
2811 #undef pr_fmt
2812 #define pr_fmt(fmt) "L1TF: " fmt
2813
2814 /* Default mitigation for L1TF-affected CPUs */
2815 enum l1tf_mitigations l1tf_mitigation __ro_after_init =
2816 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF;
2817 #if IS_ENABLED(CONFIG_KVM_INTEL)
2818 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2819 #endif
2820 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2821 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2822
2823 /*
2824 * These CPUs all support 44bits physical address space internally in the
2825 * cache but CPUID can report a smaller number of physical address bits.
2826 *
2827 * The L1TF mitigation uses the top most address bit for the inversion of
2828 * non present PTEs. When the installed memory reaches into the top most
2829 * address bit due to memory holes, which has been observed on machines
2830 * which report 36bits physical address bits and have 32G RAM installed,
2831 * then the mitigation range check in l1tf_select_mitigation() triggers.
2832 * This is a false positive because the mitigation is still possible due to
2833 * the fact that the cache uses 44bit internally. Use the cache bits
2834 * instead of the reported physical bits and adjust them on the affected
2835 * machines to 44bit if the reported bits are less than 44.
2836 */
override_cache_bits(struct cpuinfo_x86 * c)2837 static void override_cache_bits(struct cpuinfo_x86 *c)
2838 {
2839 if (c->x86 != 6)
2840 return;
2841
2842 switch (c->x86_vfm) {
2843 case INTEL_NEHALEM:
2844 case INTEL_WESTMERE:
2845 case INTEL_SANDYBRIDGE:
2846 case INTEL_IVYBRIDGE:
2847 case INTEL_HASWELL:
2848 case INTEL_HASWELL_L:
2849 case INTEL_HASWELL_G:
2850 case INTEL_BROADWELL:
2851 case INTEL_BROADWELL_G:
2852 case INTEL_SKYLAKE_L:
2853 case INTEL_SKYLAKE:
2854 case INTEL_KABYLAKE_L:
2855 case INTEL_KABYLAKE:
2856 if (c->x86_cache_bits < 44)
2857 c->x86_cache_bits = 44;
2858 break;
2859 }
2860 }
2861
l1tf_select_mitigation(void)2862 static void __init l1tf_select_mitigation(void)
2863 {
2864 if (!boot_cpu_has_bug(X86_BUG_L1TF) || cpu_mitigations_off()) {
2865 l1tf_mitigation = L1TF_MITIGATION_OFF;
2866 return;
2867 }
2868
2869 if (l1tf_mitigation == L1TF_MITIGATION_AUTO) {
2870 if (cpu_mitigations_auto_nosmt())
2871 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2872 else
2873 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2874 }
2875 }
2876
l1tf_apply_mitigation(void)2877 static void __init l1tf_apply_mitigation(void)
2878 {
2879 u64 half_pa;
2880
2881 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2882 return;
2883
2884 override_cache_bits(&boot_cpu_data);
2885
2886 switch (l1tf_mitigation) {
2887 case L1TF_MITIGATION_OFF:
2888 case L1TF_MITIGATION_FLUSH_NOWARN:
2889 case L1TF_MITIGATION_FLUSH:
2890 case L1TF_MITIGATION_AUTO:
2891 break;
2892 case L1TF_MITIGATION_FLUSH_NOSMT:
2893 case L1TF_MITIGATION_FULL:
2894 cpu_smt_disable(false);
2895 break;
2896 case L1TF_MITIGATION_FULL_FORCE:
2897 cpu_smt_disable(true);
2898 break;
2899 }
2900
2901 #if CONFIG_PGTABLE_LEVELS == 2
2902 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2903 return;
2904 #endif
2905
2906 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2907 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2908 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2909 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2910 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2911 half_pa);
2912 pr_info("However, doing so will make a part of your RAM unusable.\n");
2913 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2914 return;
2915 }
2916
2917 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2918 }
2919
l1tf_cmdline(char * str)2920 static int __init l1tf_cmdline(char *str)
2921 {
2922 if (!boot_cpu_has_bug(X86_BUG_L1TF))
2923 return 0;
2924
2925 if (!str)
2926 return -EINVAL;
2927
2928 if (!strcmp(str, "off"))
2929 l1tf_mitigation = L1TF_MITIGATION_OFF;
2930 else if (!strcmp(str, "flush,nowarn"))
2931 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2932 else if (!strcmp(str, "flush"))
2933 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2934 else if (!strcmp(str, "flush,nosmt"))
2935 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2936 else if (!strcmp(str, "full"))
2937 l1tf_mitigation = L1TF_MITIGATION_FULL;
2938 else if (!strcmp(str, "full,force"))
2939 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2940
2941 return 0;
2942 }
2943 early_param("l1tf", l1tf_cmdline);
2944
2945 #undef pr_fmt
2946 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
2947
2948 enum srso_mitigation {
2949 SRSO_MITIGATION_NONE,
2950 SRSO_MITIGATION_AUTO,
2951 SRSO_MITIGATION_UCODE_NEEDED,
2952 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
2953 SRSO_MITIGATION_MICROCODE,
2954 SRSO_MITIGATION_SAFE_RET,
2955 SRSO_MITIGATION_IBPB,
2956 SRSO_MITIGATION_IBPB_ON_VMEXIT,
2957 SRSO_MITIGATION_BP_SPEC_REDUCE,
2958 };
2959
2960 static const char * const srso_strings[] = {
2961 [SRSO_MITIGATION_NONE] = "Vulnerable",
2962 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
2963 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
2964 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
2965 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
2966 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
2967 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
2968 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
2969 };
2970
2971 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO;
2972
srso_parse_cmdline(char * str)2973 static int __init srso_parse_cmdline(char *str)
2974 {
2975 if (!str)
2976 return -EINVAL;
2977
2978 if (!strcmp(str, "off"))
2979 srso_mitigation = SRSO_MITIGATION_NONE;
2980 else if (!strcmp(str, "microcode"))
2981 srso_mitigation = SRSO_MITIGATION_MICROCODE;
2982 else if (!strcmp(str, "safe-ret"))
2983 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2984 else if (!strcmp(str, "ibpb"))
2985 srso_mitigation = SRSO_MITIGATION_IBPB;
2986 else if (!strcmp(str, "ibpb-vmexit"))
2987 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2988 else
2989 pr_err("Ignoring unknown SRSO option (%s).", str);
2990
2991 return 0;
2992 }
2993 early_param("spec_rstack_overflow", srso_parse_cmdline);
2994
2995 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2996
srso_select_mitigation(void)2997 static void __init srso_select_mitigation(void)
2998 {
2999 bool has_microcode;
3000
3001 if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
3002 srso_mitigation = SRSO_MITIGATION_NONE;
3003
3004 if (srso_mitigation == SRSO_MITIGATION_NONE)
3005 return;
3006
3007 if (srso_mitigation == SRSO_MITIGATION_AUTO)
3008 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
3009
3010 has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
3011 if (has_microcode) {
3012 /*
3013 * Zen1/2 with SMT off aren't vulnerable after the right
3014 * IBPB microcode has been applied.
3015 */
3016 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
3017 setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
3018 srso_mitigation = SRSO_MITIGATION_NONE;
3019 return;
3020 }
3021 } else {
3022 pr_warn("IBPB-extending microcode not applied!\n");
3023 pr_warn(SRSO_NOTICE);
3024 }
3025
3026 switch (srso_mitigation) {
3027 case SRSO_MITIGATION_SAFE_RET:
3028 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) {
3029 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
3030 goto ibpb_on_vmexit;
3031 }
3032
3033 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
3034 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
3035 srso_mitigation = SRSO_MITIGATION_NONE;
3036 }
3037
3038 if (!has_microcode)
3039 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
3040 break;
3041 ibpb_on_vmexit:
3042 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3043 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
3044 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
3045 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
3046 break;
3047 }
3048 fallthrough;
3049 case SRSO_MITIGATION_IBPB:
3050 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
3051 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
3052 srso_mitigation = SRSO_MITIGATION_NONE;
3053 }
3054
3055 if (!has_microcode)
3056 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
3057 break;
3058 default:
3059 break;
3060 }
3061 }
3062
srso_update_mitigation(void)3063 static void __init srso_update_mitigation(void)
3064 {
3065 /* If retbleed is using IBPB, that works for SRSO as well */
3066 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB &&
3067 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE))
3068 srso_mitigation = SRSO_MITIGATION_IBPB;
3069
3070 if (boot_cpu_has_bug(X86_BUG_SRSO) &&
3071 !cpu_mitigations_off() &&
3072 !boot_cpu_has(X86_FEATURE_SRSO_NO))
3073 pr_info("%s\n", srso_strings[srso_mitigation]);
3074 }
3075
srso_apply_mitigation(void)3076 static void __init srso_apply_mitigation(void)
3077 {
3078 /*
3079 * Clear the feature flag if this mitigation is not selected as that
3080 * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
3081 */
3082 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
3083 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
3084
3085 if (srso_mitigation == SRSO_MITIGATION_NONE) {
3086 if (boot_cpu_has(X86_FEATURE_SBPB))
3087 x86_pred_cmd = PRED_CMD_SBPB;
3088 return;
3089 }
3090
3091 switch (srso_mitigation) {
3092 case SRSO_MITIGATION_SAFE_RET:
3093 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
3094 /*
3095 * Enable the return thunk for generated code
3096 * like ftrace, static_call, etc.
3097 */
3098 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
3099 setup_force_cpu_cap(X86_FEATURE_UNRET);
3100
3101 if (boot_cpu_data.x86 == 0x19) {
3102 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
3103 set_return_thunk(srso_alias_return_thunk);
3104 } else {
3105 setup_force_cpu_cap(X86_FEATURE_SRSO);
3106 set_return_thunk(srso_return_thunk);
3107 }
3108 break;
3109 case SRSO_MITIGATION_IBPB:
3110 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
3111 /*
3112 * IBPB on entry already obviates the need for
3113 * software-based untraining so clear those in case some
3114 * other mitigation like Retbleed has selected them.
3115 */
3116 setup_clear_cpu_cap(X86_FEATURE_UNRET);
3117 setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
3118 fallthrough;
3119 case SRSO_MITIGATION_IBPB_ON_VMEXIT:
3120 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
3121 /*
3122 * There is no need for RSB filling: entry_ibpb() ensures
3123 * all predictions, including the RSB, are invalidated,
3124 * regardless of IBPB implementation.
3125 */
3126 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
3127 break;
3128 default:
3129 break;
3130 }
3131 }
3132
3133 #undef pr_fmt
3134 #define pr_fmt(fmt) fmt
3135
3136 #ifdef CONFIG_SYSFS
3137
3138 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
3139
3140 #if IS_ENABLED(CONFIG_KVM_INTEL)
3141 static const char * const l1tf_vmx_states[] = {
3142 [VMENTER_L1D_FLUSH_AUTO] = "auto",
3143 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
3144 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
3145 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
3146 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
3147 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
3148 };
3149
l1tf_show_state(char * buf)3150 static ssize_t l1tf_show_state(char *buf)
3151 {
3152 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
3153 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3154
3155 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
3156 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
3157 sched_smt_active())) {
3158 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
3159 l1tf_vmx_states[l1tf_vmx_mitigation]);
3160 }
3161
3162 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
3163 l1tf_vmx_states[l1tf_vmx_mitigation],
3164 sched_smt_active() ? "vulnerable" : "disabled");
3165 }
3166
itlb_multihit_show_state(char * buf)3167 static ssize_t itlb_multihit_show_state(char *buf)
3168 {
3169 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
3170 !boot_cpu_has(X86_FEATURE_VMX))
3171 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
3172 else if (!(cr4_read_shadow() & X86_CR4_VMXE))
3173 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
3174 else if (itlb_multihit_kvm_mitigation)
3175 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
3176 else
3177 return sysfs_emit(buf, "KVM: Vulnerable\n");
3178 }
3179 #else
l1tf_show_state(char * buf)3180 static ssize_t l1tf_show_state(char *buf)
3181 {
3182 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
3183 }
3184
itlb_multihit_show_state(char * buf)3185 static ssize_t itlb_multihit_show_state(char *buf)
3186 {
3187 return sysfs_emit(buf, "Processor vulnerable\n");
3188 }
3189 #endif
3190
mds_show_state(char * buf)3191 static ssize_t mds_show_state(char *buf)
3192 {
3193 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3194 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3195 mds_strings[mds_mitigation]);
3196 }
3197
3198 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
3199 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3200 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
3201 sched_smt_active() ? "mitigated" : "disabled"));
3202 }
3203
3204 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
3205 sched_smt_active() ? "vulnerable" : "disabled");
3206 }
3207
tsx_async_abort_show_state(char * buf)3208 static ssize_t tsx_async_abort_show_state(char *buf)
3209 {
3210 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
3211 (taa_mitigation == TAA_MITIGATION_OFF))
3212 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
3213
3214 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3215 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3216 taa_strings[taa_mitigation]);
3217 }
3218
3219 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
3220 sched_smt_active() ? "vulnerable" : "disabled");
3221 }
3222
mmio_stale_data_show_state(char * buf)3223 static ssize_t mmio_stale_data_show_state(char *buf)
3224 {
3225 if (mmio_mitigation == MMIO_MITIGATION_OFF)
3226 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
3227
3228 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
3229 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
3230 mmio_strings[mmio_mitigation]);
3231 }
3232
3233 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
3234 sched_smt_active() ? "vulnerable" : "disabled");
3235 }
3236
rfds_show_state(char * buf)3237 static ssize_t rfds_show_state(char *buf)
3238 {
3239 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
3240 }
3241
old_microcode_show_state(char * buf)3242 static ssize_t old_microcode_show_state(char *buf)
3243 {
3244 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
3245 return sysfs_emit(buf, "Unknown: running under hypervisor");
3246
3247 return sysfs_emit(buf, "Vulnerable\n");
3248 }
3249
its_show_state(char * buf)3250 static ssize_t its_show_state(char *buf)
3251 {
3252 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
3253 }
3254
stibp_state(void)3255 static char *stibp_state(void)
3256 {
3257 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
3258 !boot_cpu_has(X86_FEATURE_AUTOIBRS))
3259 return "";
3260
3261 switch (spectre_v2_user_stibp) {
3262 case SPECTRE_V2_USER_NONE:
3263 return "; STIBP: disabled";
3264 case SPECTRE_V2_USER_STRICT:
3265 return "; STIBP: forced";
3266 case SPECTRE_V2_USER_STRICT_PREFERRED:
3267 return "; STIBP: always-on";
3268 case SPECTRE_V2_USER_PRCTL:
3269 case SPECTRE_V2_USER_SECCOMP:
3270 if (static_key_enabled(&switch_to_cond_stibp))
3271 return "; STIBP: conditional";
3272 }
3273 return "";
3274 }
3275
ibpb_state(void)3276 static char *ibpb_state(void)
3277 {
3278 if (boot_cpu_has(X86_FEATURE_IBPB)) {
3279 if (static_key_enabled(&switch_mm_always_ibpb))
3280 return "; IBPB: always-on";
3281 if (static_key_enabled(&switch_mm_cond_ibpb))
3282 return "; IBPB: conditional";
3283 return "; IBPB: disabled";
3284 }
3285 return "";
3286 }
3287
pbrsb_eibrs_state(void)3288 static char *pbrsb_eibrs_state(void)
3289 {
3290 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
3291 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
3292 boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
3293 return "; PBRSB-eIBRS: SW sequence";
3294 else
3295 return "; PBRSB-eIBRS: Vulnerable";
3296 } else {
3297 return "; PBRSB-eIBRS: Not affected";
3298 }
3299 }
3300
spectre_bhi_state(void)3301 static const char *spectre_bhi_state(void)
3302 {
3303 if (!boot_cpu_has_bug(X86_BUG_BHI))
3304 return "; BHI: Not affected";
3305 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
3306 return "; BHI: BHI_DIS_S";
3307 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
3308 return "; BHI: SW loop, KVM: SW loop";
3309 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
3310 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
3311 rrsba_disabled)
3312 return "; BHI: Retpoline";
3313 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT))
3314 return "; BHI: Vulnerable, KVM: SW loop";
3315
3316 return "; BHI: Vulnerable";
3317 }
3318
spectre_v2_show_state(char * buf)3319 static ssize_t spectre_v2_show_state(char *buf)
3320 {
3321 if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
3322 return sysfs_emit(buf, "Vulnerable: LFENCE\n");
3323
3324 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
3325 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
3326
3327 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
3328 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
3329 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
3330
3331 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
3332 spectre_v2_strings[spectre_v2_enabled],
3333 ibpb_state(),
3334 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
3335 stibp_state(),
3336 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
3337 pbrsb_eibrs_state(),
3338 spectre_bhi_state(),
3339 /* this should always be at the end */
3340 spectre_v2_module_string());
3341 }
3342
srbds_show_state(char * buf)3343 static ssize_t srbds_show_state(char *buf)
3344 {
3345 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
3346 }
3347
retbleed_show_state(char * buf)3348 static ssize_t retbleed_show_state(char *buf)
3349 {
3350 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
3351 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
3352 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
3353 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
3354 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
3355
3356 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
3357 !sched_smt_active() ? "disabled" :
3358 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
3359 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
3360 "enabled with STIBP protection" : "vulnerable");
3361 }
3362
3363 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
3364 }
3365
srso_show_state(char * buf)3366 static ssize_t srso_show_state(char *buf)
3367 {
3368 if (boot_cpu_has(X86_FEATURE_SRSO_NO))
3369 return sysfs_emit(buf, "Mitigation: SMT disabled\n");
3370
3371 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
3372 }
3373
gds_show_state(char * buf)3374 static ssize_t gds_show_state(char *buf)
3375 {
3376 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
3377 }
3378
tsa_show_state(char * buf)3379 static ssize_t tsa_show_state(char *buf)
3380 {
3381 return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
3382 }
3383
cpu_show_common(struct device * dev,struct device_attribute * attr,char * buf,unsigned int bug)3384 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
3385 char *buf, unsigned int bug)
3386 {
3387 if (!boot_cpu_has_bug(bug))
3388 return sysfs_emit(buf, "Not affected\n");
3389
3390 switch (bug) {
3391 case X86_BUG_CPU_MELTDOWN:
3392 if (boot_cpu_has(X86_FEATURE_PTI))
3393 return sysfs_emit(buf, "Mitigation: PTI\n");
3394
3395 if (hypervisor_is_type(X86_HYPER_XEN_PV))
3396 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
3397
3398 break;
3399
3400 case X86_BUG_SPECTRE_V1:
3401 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
3402
3403 case X86_BUG_SPECTRE_V2:
3404 return spectre_v2_show_state(buf);
3405
3406 case X86_BUG_SPEC_STORE_BYPASS:
3407 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
3408
3409 case X86_BUG_L1TF:
3410 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
3411 return l1tf_show_state(buf);
3412 break;
3413
3414 case X86_BUG_MDS:
3415 return mds_show_state(buf);
3416
3417 case X86_BUG_TAA:
3418 return tsx_async_abort_show_state(buf);
3419
3420 case X86_BUG_ITLB_MULTIHIT:
3421 return itlb_multihit_show_state(buf);
3422
3423 case X86_BUG_SRBDS:
3424 return srbds_show_state(buf);
3425
3426 case X86_BUG_MMIO_STALE_DATA:
3427 return mmio_stale_data_show_state(buf);
3428
3429 case X86_BUG_RETBLEED:
3430 return retbleed_show_state(buf);
3431
3432 case X86_BUG_SRSO:
3433 return srso_show_state(buf);
3434
3435 case X86_BUG_GDS:
3436 return gds_show_state(buf);
3437
3438 case X86_BUG_RFDS:
3439 return rfds_show_state(buf);
3440
3441 case X86_BUG_OLD_MICROCODE:
3442 return old_microcode_show_state(buf);
3443
3444 case X86_BUG_ITS:
3445 return its_show_state(buf);
3446
3447 case X86_BUG_TSA:
3448 return tsa_show_state(buf);
3449
3450 default:
3451 break;
3452 }
3453
3454 return sysfs_emit(buf, "Vulnerable\n");
3455 }
3456
cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)3457 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
3458 {
3459 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
3460 }
3461
cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)3462 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
3463 {
3464 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
3465 }
3466
cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)3467 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
3468 {
3469 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
3470 }
3471
cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)3472 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
3473 {
3474 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
3475 }
3476
cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)3477 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
3478 {
3479 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
3480 }
3481
cpu_show_mds(struct device * dev,struct device_attribute * attr,char * buf)3482 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
3483 {
3484 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
3485 }
3486
cpu_show_tsx_async_abort(struct device * dev,struct device_attribute * attr,char * buf)3487 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
3488 {
3489 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
3490 }
3491
cpu_show_itlb_multihit(struct device * dev,struct device_attribute * attr,char * buf)3492 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
3493 {
3494 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
3495 }
3496
cpu_show_srbds(struct device * dev,struct device_attribute * attr,char * buf)3497 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
3498 {
3499 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
3500 }
3501
cpu_show_mmio_stale_data(struct device * dev,struct device_attribute * attr,char * buf)3502 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
3503 {
3504 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
3505 }
3506
cpu_show_retbleed(struct device * dev,struct device_attribute * attr,char * buf)3507 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
3508 {
3509 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
3510 }
3511
cpu_show_spec_rstack_overflow(struct device * dev,struct device_attribute * attr,char * buf)3512 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
3513 {
3514 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
3515 }
3516
cpu_show_gds(struct device * dev,struct device_attribute * attr,char * buf)3517 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
3518 {
3519 return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
3520 }
3521
cpu_show_reg_file_data_sampling(struct device * dev,struct device_attribute * attr,char * buf)3522 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
3523 {
3524 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
3525 }
3526
cpu_show_old_microcode(struct device * dev,struct device_attribute * attr,char * buf)3527 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
3528 {
3529 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
3530 }
3531
cpu_show_indirect_target_selection(struct device * dev,struct device_attribute * attr,char * buf)3532 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
3533 {
3534 return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
3535 }
3536
cpu_show_tsa(struct device * dev,struct device_attribute * attr,char * buf)3537 ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
3538 {
3539 return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
3540 }
3541 #endif
3542
__warn_thunk(void)3543 void __warn_thunk(void)
3544 {
3545 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
3546 }
3547