1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1994 Linus Torvalds 4 * 5 * Cyrix stuff, June 1998 by: 6 * - Rafael R. Reilova (moved everything from head.S), 7 * <rreilova@ececs.uc.edu> 8 * - Channing Corn (tests & fixes), 9 * - Andrew D. Balsa (code cleanup). 10 */ 11 #include <linux/init.h> 12 #include <linux/cpu.h> 13 #include <linux/module.h> 14 #include <linux/nospec.h> 15 #include <linux/prctl.h> 16 #include <linux/sched/smt.h> 17 #include <linux/pgtable.h> 18 #include <linux/bpf.h> 19 20 #include <asm/spec-ctrl.h> 21 #include <asm/cmdline.h> 22 #include <asm/bugs.h> 23 #include <asm/processor.h> 24 #include <asm/processor-flags.h> 25 #include <asm/fpu/api.h> 26 #include <asm/msr.h> 27 #include <asm/vmx.h> 28 #include <asm/paravirt.h> 29 #include <asm/cpu_device_id.h> 30 #include <asm/e820/api.h> 31 #include <asm/hypervisor.h> 32 #include <asm/tlbflush.h> 33 #include <asm/cpu.h> 34 35 #include "cpu.h" 36 37 /* 38 * Speculation Vulnerability Handling 39 * 40 * Each vulnerability is handled with the following functions: 41 * <vuln>_select_mitigation() -- Selects a mitigation to use. This should 42 * take into account all relevant command line 43 * options. 44 * <vuln>_update_mitigation() -- This is called after all vulnerabilities have 45 * selected a mitigation, in case the selection 46 * may want to change based on other choices 47 * made. This function is optional. 48 * <vuln>_apply_mitigation() -- Enable the selected mitigation. 49 * 50 * The compile-time mitigation in all cases should be AUTO. An explicit 51 * command-line option can override AUTO. If no such option is 52 * provided, <vuln>_select_mitigation() will override AUTO to the best 53 * mitigation option. 54 */ 55 56 static void __init spectre_v1_select_mitigation(void); 57 static void __init spectre_v1_apply_mitigation(void); 58 static void __init spectre_v2_select_mitigation(void); 59 static void __init spectre_v2_update_mitigation(void); 60 static void __init spectre_v2_apply_mitigation(void); 61 static void __init retbleed_select_mitigation(void); 62 static void __init retbleed_update_mitigation(void); 63 static void __init retbleed_apply_mitigation(void); 64 static void __init spectre_v2_user_select_mitigation(void); 65 static void __init spectre_v2_user_update_mitigation(void); 66 static void __init spectre_v2_user_apply_mitigation(void); 67 static void __init ssb_select_mitigation(void); 68 static void __init ssb_apply_mitigation(void); 69 static void __init l1tf_select_mitigation(void); 70 static void __init l1tf_apply_mitigation(void); 71 static void __init mds_select_mitigation(void); 72 static void __init mds_update_mitigation(void); 73 static void __init mds_apply_mitigation(void); 74 static void __init taa_select_mitigation(void); 75 static void __init taa_update_mitigation(void); 76 static void __init taa_apply_mitigation(void); 77 static void __init mmio_select_mitigation(void); 78 static void __init mmio_update_mitigation(void); 79 static void __init mmio_apply_mitigation(void); 80 static void __init rfds_select_mitigation(void); 81 static void __init rfds_update_mitigation(void); 82 static void __init rfds_apply_mitigation(void); 83 static void __init srbds_select_mitigation(void); 84 static void __init srbds_apply_mitigation(void); 85 static void __init l1d_flush_select_mitigation(void); 86 static void __init srso_select_mitigation(void); 87 static void __init srso_update_mitigation(void); 88 static void __init srso_apply_mitigation(void); 89 static void __init gds_select_mitigation(void); 90 static void __init gds_apply_mitigation(void); 91 static void __init bhi_select_mitigation(void); 92 static void __init bhi_update_mitigation(void); 93 static void __init bhi_apply_mitigation(void); 94 static void __init its_select_mitigation(void); 95 96 /* The base value of the SPEC_CTRL MSR without task-specific bits set */ 97 u64 x86_spec_ctrl_base; 98 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); 99 100 /* The current value of the SPEC_CTRL MSR with task-specific bits set */ 101 DEFINE_PER_CPU(u64, x86_spec_ctrl_current); 102 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current); 103 104 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; 105 106 static u64 __ro_after_init x86_arch_cap_msr; 107 108 static DEFINE_MUTEX(spec_ctrl_mutex); 109 110 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; 111 112 static void __init set_return_thunk(void *thunk) 113 { 114 if (x86_return_thunk != __x86_return_thunk) 115 pr_warn("x86/bugs: return thunk changed\n"); 116 117 x86_return_thunk = thunk; 118 } 119 120 /* Update SPEC_CTRL MSR and its cached copy unconditionally */ 121 static void update_spec_ctrl(u64 val) 122 { 123 this_cpu_write(x86_spec_ctrl_current, val); 124 wrmsrq(MSR_IA32_SPEC_CTRL, val); 125 } 126 127 /* 128 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ 129 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). 130 */ 131 void update_spec_ctrl_cond(u64 val) 132 { 133 if (this_cpu_read(x86_spec_ctrl_current) == val) 134 return; 135 136 this_cpu_write(x86_spec_ctrl_current, val); 137 138 /* 139 * When KERNEL_IBRS this MSR is written on return-to-user, unless 140 * forced the update can be delayed until that time. 141 */ 142 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) 143 wrmsrq(MSR_IA32_SPEC_CTRL, val); 144 } 145 146 noinstr u64 spec_ctrl_current(void) 147 { 148 return this_cpu_read(x86_spec_ctrl_current); 149 } 150 EXPORT_SYMBOL_GPL(spec_ctrl_current); 151 152 /* 153 * AMD specific MSR info for Speculative Store Bypass control. 154 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). 155 */ 156 u64 __ro_after_init x86_amd_ls_cfg_base; 157 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; 158 159 /* Control conditional STIBP in switch_to() */ 160 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); 161 /* Control conditional IBPB in switch_mm() */ 162 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); 163 /* Control unconditional IBPB in switch_mm() */ 164 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); 165 166 /* Control IBPB on vCPU load */ 167 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); 168 EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); 169 170 /* Control MDS CPU buffer clear before idling (halt, mwait) */ 171 DEFINE_STATIC_KEY_FALSE(mds_idle_clear); 172 EXPORT_SYMBOL_GPL(mds_idle_clear); 173 174 /* 175 * Controls whether l1d flush based mitigations are enabled, 176 * based on hw features and admin setting via boot parameter 177 * defaults to false 178 */ 179 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); 180 181 /* 182 * Controls CPU Fill buffer clear before VMenter. This is a subset of 183 * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only 184 * mitigation is required. 185 */ 186 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); 187 EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); 188 189 void __init cpu_select_mitigations(void) 190 { 191 /* 192 * Read the SPEC_CTRL MSR to account for reserved bits which may 193 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD 194 * init code as it is not enumerated and depends on the family. 195 */ 196 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { 197 rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); 198 199 /* 200 * Previously running kernel (kexec), may have some controls 201 * turned ON. Clear them and let the mitigations setup below 202 * rediscover them based on configuration. 203 */ 204 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; 205 } 206 207 x86_arch_cap_msr = x86_read_arch_cap_msr(); 208 209 /* Select the proper CPU mitigations before patching alternatives: */ 210 spectre_v1_select_mitigation(); 211 spectre_v2_select_mitigation(); 212 retbleed_select_mitigation(); 213 spectre_v2_user_select_mitigation(); 214 ssb_select_mitigation(); 215 l1tf_select_mitigation(); 216 mds_select_mitigation(); 217 taa_select_mitigation(); 218 mmio_select_mitigation(); 219 rfds_select_mitigation(); 220 srbds_select_mitigation(); 221 l1d_flush_select_mitigation(); 222 srso_select_mitigation(); 223 gds_select_mitigation(); 224 its_select_mitigation(); 225 bhi_select_mitigation(); 226 227 /* 228 * After mitigations are selected, some may need to update their 229 * choices. 230 */ 231 spectre_v2_update_mitigation(); 232 /* 233 * retbleed_update_mitigation() relies on the state set by 234 * spectre_v2_update_mitigation(); specifically it wants to know about 235 * spectre_v2=ibrs. 236 */ 237 retbleed_update_mitigation(); 238 239 /* 240 * spectre_v2_user_update_mitigation() depends on 241 * retbleed_update_mitigation(), specifically the STIBP 242 * selection is forced for UNRET or IBPB. 243 */ 244 spectre_v2_user_update_mitigation(); 245 mds_update_mitigation(); 246 taa_update_mitigation(); 247 mmio_update_mitigation(); 248 rfds_update_mitigation(); 249 bhi_update_mitigation(); 250 /* srso_update_mitigation() depends on retbleed_update_mitigation(). */ 251 srso_update_mitigation(); 252 253 spectre_v1_apply_mitigation(); 254 spectre_v2_apply_mitigation(); 255 retbleed_apply_mitigation(); 256 spectre_v2_user_apply_mitigation(); 257 ssb_apply_mitigation(); 258 l1tf_apply_mitigation(); 259 mds_apply_mitigation(); 260 taa_apply_mitigation(); 261 mmio_apply_mitigation(); 262 rfds_apply_mitigation(); 263 srbds_apply_mitigation(); 264 srso_apply_mitigation(); 265 gds_apply_mitigation(); 266 bhi_apply_mitigation(); 267 } 268 269 /* 270 * NOTE: This function is *only* called for SVM, since Intel uses 271 * MSR_IA32_SPEC_CTRL for SSBD. 272 */ 273 void 274 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) 275 { 276 u64 guestval, hostval; 277 struct thread_info *ti = current_thread_info(); 278 279 /* 280 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update 281 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. 282 */ 283 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && 284 !static_cpu_has(X86_FEATURE_VIRT_SSBD)) 285 return; 286 287 /* 288 * If the host has SSBD mitigation enabled, force it in the host's 289 * virtual MSR value. If its not permanently enabled, evaluate 290 * current's TIF_SSBD thread flag. 291 */ 292 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) 293 hostval = SPEC_CTRL_SSBD; 294 else 295 hostval = ssbd_tif_to_spec_ctrl(ti->flags); 296 297 /* Sanitize the guest value */ 298 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; 299 300 if (hostval != guestval) { 301 unsigned long tif; 302 303 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : 304 ssbd_spec_ctrl_to_tif(hostval); 305 306 speculation_ctrl_update(tif); 307 } 308 } 309 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 310 311 static void x86_amd_ssb_disable(void) 312 { 313 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; 314 315 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) 316 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); 317 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) 318 wrmsrq(MSR_AMD64_LS_CFG, msrval); 319 } 320 321 #undef pr_fmt 322 #define pr_fmt(fmt) "MDS: " fmt 323 324 /* Default mitigation for MDS-affected CPUs */ 325 static enum mds_mitigations mds_mitigation __ro_after_init = 326 IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF; 327 static bool mds_nosmt __ro_after_init = false; 328 329 static const char * const mds_strings[] = { 330 [MDS_MITIGATION_OFF] = "Vulnerable", 331 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", 332 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", 333 }; 334 335 enum taa_mitigations { 336 TAA_MITIGATION_OFF, 337 TAA_MITIGATION_AUTO, 338 TAA_MITIGATION_UCODE_NEEDED, 339 TAA_MITIGATION_VERW, 340 TAA_MITIGATION_TSX_DISABLED, 341 }; 342 343 /* Default mitigation for TAA-affected CPUs */ 344 static enum taa_mitigations taa_mitigation __ro_after_init = 345 IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF; 346 347 enum mmio_mitigations { 348 MMIO_MITIGATION_OFF, 349 MMIO_MITIGATION_AUTO, 350 MMIO_MITIGATION_UCODE_NEEDED, 351 MMIO_MITIGATION_VERW, 352 }; 353 354 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ 355 static enum mmio_mitigations mmio_mitigation __ro_after_init = 356 IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF; 357 358 enum rfds_mitigations { 359 RFDS_MITIGATION_OFF, 360 RFDS_MITIGATION_AUTO, 361 RFDS_MITIGATION_VERW, 362 RFDS_MITIGATION_UCODE_NEEDED, 363 }; 364 365 /* Default mitigation for Register File Data Sampling */ 366 static enum rfds_mitigations rfds_mitigation __ro_after_init = 367 IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; 368 369 /* 370 * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing 371 * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. 372 */ 373 static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; 374 375 static void __init mds_select_mitigation(void) 376 { 377 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { 378 mds_mitigation = MDS_MITIGATION_OFF; 379 return; 380 } 381 382 if (mds_mitigation == MDS_MITIGATION_AUTO) 383 mds_mitigation = MDS_MITIGATION_FULL; 384 385 if (mds_mitigation == MDS_MITIGATION_OFF) 386 return; 387 388 verw_clear_cpu_buf_mitigation_selected = true; 389 } 390 391 static void __init mds_update_mitigation(void) 392 { 393 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) 394 return; 395 396 /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ 397 if (verw_clear_cpu_buf_mitigation_selected) 398 mds_mitigation = MDS_MITIGATION_FULL; 399 400 if (mds_mitigation == MDS_MITIGATION_FULL) { 401 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 402 mds_mitigation = MDS_MITIGATION_VMWERV; 403 } 404 405 pr_info("%s\n", mds_strings[mds_mitigation]); 406 } 407 408 static void __init mds_apply_mitigation(void) 409 { 410 if (mds_mitigation == MDS_MITIGATION_FULL || 411 mds_mitigation == MDS_MITIGATION_VMWERV) { 412 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 413 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && 414 (mds_nosmt || cpu_mitigations_auto_nosmt())) 415 cpu_smt_disable(false); 416 } 417 } 418 419 static int __init mds_cmdline(char *str) 420 { 421 if (!boot_cpu_has_bug(X86_BUG_MDS)) 422 return 0; 423 424 if (!str) 425 return -EINVAL; 426 427 if (!strcmp(str, "off")) 428 mds_mitigation = MDS_MITIGATION_OFF; 429 else if (!strcmp(str, "full")) 430 mds_mitigation = MDS_MITIGATION_FULL; 431 else if (!strcmp(str, "full,nosmt")) { 432 mds_mitigation = MDS_MITIGATION_FULL; 433 mds_nosmt = true; 434 } 435 436 return 0; 437 } 438 early_param("mds", mds_cmdline); 439 440 #undef pr_fmt 441 #define pr_fmt(fmt) "TAA: " fmt 442 443 static bool taa_nosmt __ro_after_init; 444 445 static const char * const taa_strings[] = { 446 [TAA_MITIGATION_OFF] = "Vulnerable", 447 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 448 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 449 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", 450 }; 451 452 static bool __init taa_vulnerable(void) 453 { 454 return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM); 455 } 456 457 static void __init taa_select_mitigation(void) 458 { 459 if (!boot_cpu_has_bug(X86_BUG_TAA)) { 460 taa_mitigation = TAA_MITIGATION_OFF; 461 return; 462 } 463 464 /* TSX previously disabled by tsx=off */ 465 if (!boot_cpu_has(X86_FEATURE_RTM)) { 466 taa_mitigation = TAA_MITIGATION_TSX_DISABLED; 467 return; 468 } 469 470 if (cpu_mitigations_off()) 471 taa_mitigation = TAA_MITIGATION_OFF; 472 473 /* Microcode will be checked in taa_update_mitigation(). */ 474 if (taa_mitigation == TAA_MITIGATION_AUTO) 475 taa_mitigation = TAA_MITIGATION_VERW; 476 477 if (taa_mitigation != TAA_MITIGATION_OFF) 478 verw_clear_cpu_buf_mitigation_selected = true; 479 } 480 481 static void __init taa_update_mitigation(void) 482 { 483 if (!taa_vulnerable() || cpu_mitigations_off()) 484 return; 485 486 if (verw_clear_cpu_buf_mitigation_selected) 487 taa_mitigation = TAA_MITIGATION_VERW; 488 489 if (taa_mitigation == TAA_MITIGATION_VERW) { 490 /* Check if the requisite ucode is available. */ 491 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) 492 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 493 494 /* 495 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. 496 * A microcode update fixes this behavior to clear CPU buffers. It also 497 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the 498 * ARCH_CAP_TSX_CTRL_MSR bit. 499 * 500 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode 501 * update is required. 502 */ 503 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && 504 !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) 505 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; 506 } 507 508 pr_info("%s\n", taa_strings[taa_mitigation]); 509 } 510 511 static void __init taa_apply_mitigation(void) 512 { 513 if (taa_mitigation == TAA_MITIGATION_VERW || 514 taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) { 515 /* 516 * TSX is enabled, select alternate mitigation for TAA which is 517 * the same as MDS. Enable MDS static branch to clear CPU buffers. 518 * 519 * For guests that can't determine whether the correct microcode is 520 * present on host, enable the mitigation for UCODE_NEEDED as well. 521 */ 522 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 523 524 if (taa_nosmt || cpu_mitigations_auto_nosmt()) 525 cpu_smt_disable(false); 526 } 527 } 528 529 static int __init tsx_async_abort_parse_cmdline(char *str) 530 { 531 if (!boot_cpu_has_bug(X86_BUG_TAA)) 532 return 0; 533 534 if (!str) 535 return -EINVAL; 536 537 if (!strcmp(str, "off")) { 538 taa_mitigation = TAA_MITIGATION_OFF; 539 } else if (!strcmp(str, "full")) { 540 taa_mitigation = TAA_MITIGATION_VERW; 541 } else if (!strcmp(str, "full,nosmt")) { 542 taa_mitigation = TAA_MITIGATION_VERW; 543 taa_nosmt = true; 544 } 545 546 return 0; 547 } 548 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); 549 550 #undef pr_fmt 551 #define pr_fmt(fmt) "MMIO Stale Data: " fmt 552 553 static bool mmio_nosmt __ro_after_init = false; 554 555 static const char * const mmio_strings[] = { 556 [MMIO_MITIGATION_OFF] = "Vulnerable", 557 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", 558 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", 559 }; 560 561 static void __init mmio_select_mitigation(void) 562 { 563 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || 564 cpu_mitigations_off()) { 565 mmio_mitigation = MMIO_MITIGATION_OFF; 566 return; 567 } 568 569 /* Microcode will be checked in mmio_update_mitigation(). */ 570 if (mmio_mitigation == MMIO_MITIGATION_AUTO) 571 mmio_mitigation = MMIO_MITIGATION_VERW; 572 573 if (mmio_mitigation == MMIO_MITIGATION_OFF) 574 return; 575 576 /* 577 * Enable CPU buffer clear mitigation for host and VMM, if also affected 578 * by MDS or TAA. 579 */ 580 if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable()) 581 verw_clear_cpu_buf_mitigation_selected = true; 582 } 583 584 static void __init mmio_update_mitigation(void) 585 { 586 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || cpu_mitigations_off()) 587 return; 588 589 if (verw_clear_cpu_buf_mitigation_selected) 590 mmio_mitigation = MMIO_MITIGATION_VERW; 591 592 if (mmio_mitigation == MMIO_MITIGATION_VERW) { 593 /* 594 * Check if the system has the right microcode. 595 * 596 * CPU Fill buffer clear mitigation is enumerated by either an explicit 597 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS 598 * affected systems. 599 */ 600 if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || 601 (boot_cpu_has(X86_FEATURE_MD_CLEAR) && 602 boot_cpu_has(X86_FEATURE_FLUSH_L1D) && 603 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))) 604 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; 605 } 606 607 pr_info("%s\n", mmio_strings[mmio_mitigation]); 608 } 609 610 static void __init mmio_apply_mitigation(void) 611 { 612 if (mmio_mitigation == MMIO_MITIGATION_OFF) 613 return; 614 615 /* 616 * Only enable the VMM mitigation if the CPU buffer clear mitigation is 617 * not being used. 618 */ 619 if (verw_clear_cpu_buf_mitigation_selected) { 620 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 621 static_branch_disable(&cpu_buf_vm_clear); 622 } else { 623 static_branch_enable(&cpu_buf_vm_clear); 624 } 625 626 /* 627 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can 628 * be propagated to uncore buffers, clearing the Fill buffers on idle 629 * is required irrespective of SMT state. 630 */ 631 if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) 632 static_branch_enable(&mds_idle_clear); 633 634 if (mmio_nosmt || cpu_mitigations_auto_nosmt()) 635 cpu_smt_disable(false); 636 } 637 638 static int __init mmio_stale_data_parse_cmdline(char *str) 639 { 640 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 641 return 0; 642 643 if (!str) 644 return -EINVAL; 645 646 if (!strcmp(str, "off")) { 647 mmio_mitigation = MMIO_MITIGATION_OFF; 648 } else if (!strcmp(str, "full")) { 649 mmio_mitigation = MMIO_MITIGATION_VERW; 650 } else if (!strcmp(str, "full,nosmt")) { 651 mmio_mitigation = MMIO_MITIGATION_VERW; 652 mmio_nosmt = true; 653 } 654 655 return 0; 656 } 657 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); 658 659 #undef pr_fmt 660 #define pr_fmt(fmt) "Register File Data Sampling: " fmt 661 662 static const char * const rfds_strings[] = { 663 [RFDS_MITIGATION_OFF] = "Vulnerable", 664 [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File", 665 [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 666 }; 667 668 static inline bool __init verw_clears_cpu_reg_file(void) 669 { 670 return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR); 671 } 672 673 static void __init rfds_select_mitigation(void) 674 { 675 if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) { 676 rfds_mitigation = RFDS_MITIGATION_OFF; 677 return; 678 } 679 680 if (rfds_mitigation == RFDS_MITIGATION_AUTO) 681 rfds_mitigation = RFDS_MITIGATION_VERW; 682 683 if (rfds_mitigation == RFDS_MITIGATION_OFF) 684 return; 685 686 if (verw_clears_cpu_reg_file()) 687 verw_clear_cpu_buf_mitigation_selected = true; 688 } 689 690 static void __init rfds_update_mitigation(void) 691 { 692 if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) 693 return; 694 695 if (verw_clear_cpu_buf_mitigation_selected) 696 rfds_mitigation = RFDS_MITIGATION_VERW; 697 698 if (rfds_mitigation == RFDS_MITIGATION_VERW) { 699 if (!verw_clears_cpu_reg_file()) 700 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; 701 } 702 703 pr_info("%s\n", rfds_strings[rfds_mitigation]); 704 } 705 706 static void __init rfds_apply_mitigation(void) 707 { 708 if (rfds_mitigation == RFDS_MITIGATION_VERW) 709 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); 710 } 711 712 static __init int rfds_parse_cmdline(char *str) 713 { 714 if (!str) 715 return -EINVAL; 716 717 if (!boot_cpu_has_bug(X86_BUG_RFDS)) 718 return 0; 719 720 if (!strcmp(str, "off")) 721 rfds_mitigation = RFDS_MITIGATION_OFF; 722 else if (!strcmp(str, "on")) 723 rfds_mitigation = RFDS_MITIGATION_VERW; 724 725 return 0; 726 } 727 early_param("reg_file_data_sampling", rfds_parse_cmdline); 728 729 #undef pr_fmt 730 #define pr_fmt(fmt) "SRBDS: " fmt 731 732 enum srbds_mitigations { 733 SRBDS_MITIGATION_OFF, 734 SRBDS_MITIGATION_AUTO, 735 SRBDS_MITIGATION_UCODE_NEEDED, 736 SRBDS_MITIGATION_FULL, 737 SRBDS_MITIGATION_TSX_OFF, 738 SRBDS_MITIGATION_HYPERVISOR, 739 }; 740 741 static enum srbds_mitigations srbds_mitigation __ro_after_init = 742 IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF; 743 744 static const char * const srbds_strings[] = { 745 [SRBDS_MITIGATION_OFF] = "Vulnerable", 746 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 747 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", 748 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", 749 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 750 }; 751 752 static bool srbds_off; 753 754 void update_srbds_msr(void) 755 { 756 u64 mcu_ctrl; 757 758 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 759 return; 760 761 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 762 return; 763 764 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) 765 return; 766 767 /* 768 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX 769 * being disabled and it hasn't received the SRBDS MSR microcode. 770 */ 771 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 772 return; 773 774 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 775 776 switch (srbds_mitigation) { 777 case SRBDS_MITIGATION_OFF: 778 case SRBDS_MITIGATION_TSX_OFF: 779 mcu_ctrl |= RNGDS_MITG_DIS; 780 break; 781 case SRBDS_MITIGATION_FULL: 782 mcu_ctrl &= ~RNGDS_MITG_DIS; 783 break; 784 default: 785 break; 786 } 787 788 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 789 } 790 791 static void __init srbds_select_mitigation(void) 792 { 793 if (!boot_cpu_has_bug(X86_BUG_SRBDS) || cpu_mitigations_off()) { 794 srbds_mitigation = SRBDS_MITIGATION_OFF; 795 return; 796 } 797 798 if (srbds_mitigation == SRBDS_MITIGATION_AUTO) 799 srbds_mitigation = SRBDS_MITIGATION_FULL; 800 801 /* 802 * Check to see if this is one of the MDS_NO systems supporting TSX that 803 * are only exposed to SRBDS when TSX is enabled or when CPU is affected 804 * by Processor MMIO Stale Data vulnerability. 805 */ 806 if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && 807 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 808 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; 809 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 810 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; 811 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) 812 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; 813 else if (srbds_off) 814 srbds_mitigation = SRBDS_MITIGATION_OFF; 815 816 pr_info("%s\n", srbds_strings[srbds_mitigation]); 817 } 818 819 static void __init srbds_apply_mitigation(void) 820 { 821 update_srbds_msr(); 822 } 823 824 static int __init srbds_parse_cmdline(char *str) 825 { 826 if (!str) 827 return -EINVAL; 828 829 if (!boot_cpu_has_bug(X86_BUG_SRBDS)) 830 return 0; 831 832 srbds_off = !strcmp(str, "off"); 833 return 0; 834 } 835 early_param("srbds", srbds_parse_cmdline); 836 837 #undef pr_fmt 838 #define pr_fmt(fmt) "L1D Flush : " fmt 839 840 enum l1d_flush_mitigations { 841 L1D_FLUSH_OFF = 0, 842 L1D_FLUSH_ON, 843 }; 844 845 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; 846 847 static void __init l1d_flush_select_mitigation(void) 848 { 849 if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) 850 return; 851 852 static_branch_enable(&switch_mm_cond_l1d_flush); 853 pr_info("Conditional flush on switch_mm() enabled\n"); 854 } 855 856 static int __init l1d_flush_parse_cmdline(char *str) 857 { 858 if (!strcmp(str, "on")) 859 l1d_flush_mitigation = L1D_FLUSH_ON; 860 861 return 0; 862 } 863 early_param("l1d_flush", l1d_flush_parse_cmdline); 864 865 #undef pr_fmt 866 #define pr_fmt(fmt) "GDS: " fmt 867 868 enum gds_mitigations { 869 GDS_MITIGATION_OFF, 870 GDS_MITIGATION_AUTO, 871 GDS_MITIGATION_UCODE_NEEDED, 872 GDS_MITIGATION_FORCE, 873 GDS_MITIGATION_FULL, 874 GDS_MITIGATION_FULL_LOCKED, 875 GDS_MITIGATION_HYPERVISOR, 876 }; 877 878 static enum gds_mitigations gds_mitigation __ro_after_init = 879 IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF; 880 881 static const char * const gds_strings[] = { 882 [GDS_MITIGATION_OFF] = "Vulnerable", 883 [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 884 [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", 885 [GDS_MITIGATION_FULL] = "Mitigation: Microcode", 886 [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", 887 [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", 888 }; 889 890 bool gds_ucode_mitigated(void) 891 { 892 return (gds_mitigation == GDS_MITIGATION_FULL || 893 gds_mitigation == GDS_MITIGATION_FULL_LOCKED); 894 } 895 EXPORT_SYMBOL_GPL(gds_ucode_mitigated); 896 897 void update_gds_msr(void) 898 { 899 u64 mcu_ctrl_after; 900 u64 mcu_ctrl; 901 902 switch (gds_mitigation) { 903 case GDS_MITIGATION_OFF: 904 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 905 mcu_ctrl |= GDS_MITG_DIS; 906 break; 907 case GDS_MITIGATION_FULL_LOCKED: 908 /* 909 * The LOCKED state comes from the boot CPU. APs might not have 910 * the same state. Make sure the mitigation is enabled on all 911 * CPUs. 912 */ 913 case GDS_MITIGATION_FULL: 914 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 915 mcu_ctrl &= ~GDS_MITG_DIS; 916 break; 917 case GDS_MITIGATION_FORCE: 918 case GDS_MITIGATION_UCODE_NEEDED: 919 case GDS_MITIGATION_HYPERVISOR: 920 case GDS_MITIGATION_AUTO: 921 return; 922 } 923 924 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 925 926 /* 927 * Check to make sure that the WRMSR value was not ignored. Writes to 928 * GDS_MITG_DIS will be ignored if this processor is locked but the boot 929 * processor was not. 930 */ 931 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); 932 WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); 933 } 934 935 static void __init gds_select_mitigation(void) 936 { 937 u64 mcu_ctrl; 938 939 if (!boot_cpu_has_bug(X86_BUG_GDS)) 940 return; 941 942 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 943 gds_mitigation = GDS_MITIGATION_HYPERVISOR; 944 return; 945 } 946 947 if (cpu_mitigations_off()) 948 gds_mitigation = GDS_MITIGATION_OFF; 949 /* Will verify below that mitigation _can_ be disabled */ 950 951 if (gds_mitigation == GDS_MITIGATION_AUTO) 952 gds_mitigation = GDS_MITIGATION_FULL; 953 954 /* No microcode */ 955 if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { 956 if (gds_mitigation != GDS_MITIGATION_FORCE) 957 gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; 958 return; 959 } 960 961 /* Microcode has mitigation, use it */ 962 if (gds_mitigation == GDS_MITIGATION_FORCE) 963 gds_mitigation = GDS_MITIGATION_FULL; 964 965 rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); 966 if (mcu_ctrl & GDS_MITG_LOCKED) { 967 if (gds_mitigation == GDS_MITIGATION_OFF) 968 pr_warn("Mitigation locked. Disable failed.\n"); 969 970 /* 971 * The mitigation is selected from the boot CPU. All other CPUs 972 * _should_ have the same state. If the boot CPU isn't locked 973 * but others are then update_gds_msr() will WARN() of the state 974 * mismatch. If the boot CPU is locked update_gds_msr() will 975 * ensure the other CPUs have the mitigation enabled. 976 */ 977 gds_mitigation = GDS_MITIGATION_FULL_LOCKED; 978 } 979 } 980 981 static void __init gds_apply_mitigation(void) 982 { 983 if (!boot_cpu_has_bug(X86_BUG_GDS)) 984 return; 985 986 /* Microcode is present */ 987 if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL) 988 update_gds_msr(); 989 else if (gds_mitigation == GDS_MITIGATION_FORCE) { 990 /* 991 * This only needs to be done on the boot CPU so do it 992 * here rather than in update_gds_msr() 993 */ 994 setup_clear_cpu_cap(X86_FEATURE_AVX); 995 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); 996 } 997 998 pr_info("%s\n", gds_strings[gds_mitigation]); 999 } 1000 1001 static int __init gds_parse_cmdline(char *str) 1002 { 1003 if (!str) 1004 return -EINVAL; 1005 1006 if (!boot_cpu_has_bug(X86_BUG_GDS)) 1007 return 0; 1008 1009 if (!strcmp(str, "off")) 1010 gds_mitigation = GDS_MITIGATION_OFF; 1011 else if (!strcmp(str, "force")) 1012 gds_mitigation = GDS_MITIGATION_FORCE; 1013 1014 return 0; 1015 } 1016 early_param("gather_data_sampling", gds_parse_cmdline); 1017 1018 #undef pr_fmt 1019 #define pr_fmt(fmt) "Spectre V1 : " fmt 1020 1021 enum spectre_v1_mitigation { 1022 SPECTRE_V1_MITIGATION_NONE, 1023 SPECTRE_V1_MITIGATION_AUTO, 1024 }; 1025 1026 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = 1027 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ? 1028 SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE; 1029 1030 static const char * const spectre_v1_strings[] = { 1031 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", 1032 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", 1033 }; 1034 1035 /* 1036 * Does SMAP provide full mitigation against speculative kernel access to 1037 * userspace? 1038 */ 1039 static bool smap_works_speculatively(void) 1040 { 1041 if (!boot_cpu_has(X86_FEATURE_SMAP)) 1042 return false; 1043 1044 /* 1045 * On CPUs which are vulnerable to Meltdown, SMAP does not 1046 * prevent speculative access to user data in the L1 cache. 1047 * Consider SMAP to be non-functional as a mitigation on these 1048 * CPUs. 1049 */ 1050 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) 1051 return false; 1052 1053 return true; 1054 } 1055 1056 static void __init spectre_v1_select_mitigation(void) 1057 { 1058 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) 1059 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1060 } 1061 1062 static void __init spectre_v1_apply_mitigation(void) 1063 { 1064 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) 1065 return; 1066 1067 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { 1068 /* 1069 * With Spectre v1, a user can speculatively control either 1070 * path of a conditional swapgs with a user-controlled GS 1071 * value. The mitigation is to add lfences to both code paths. 1072 * 1073 * If FSGSBASE is enabled, the user can put a kernel address in 1074 * GS, in which case SMAP provides no protection. 1075 * 1076 * If FSGSBASE is disabled, the user can only put a user space 1077 * address in GS. That makes an attack harder, but still 1078 * possible if there's no SMAP protection. 1079 */ 1080 if (boot_cpu_has(X86_FEATURE_FSGSBASE) || 1081 !smap_works_speculatively()) { 1082 /* 1083 * Mitigation can be provided from SWAPGS itself or 1084 * PTI as the CR3 write in the Meltdown mitigation 1085 * is serializing. 1086 * 1087 * If neither is there, mitigate with an LFENCE to 1088 * stop speculation through swapgs. 1089 */ 1090 if (boot_cpu_has_bug(X86_BUG_SWAPGS) && 1091 !boot_cpu_has(X86_FEATURE_PTI)) 1092 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); 1093 1094 /* 1095 * Enable lfences in the kernel entry (non-swapgs) 1096 * paths, to prevent user entry from speculatively 1097 * skipping swapgs. 1098 */ 1099 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); 1100 } 1101 } 1102 1103 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); 1104 } 1105 1106 static int __init nospectre_v1_cmdline(char *str) 1107 { 1108 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; 1109 return 0; 1110 } 1111 early_param("nospectre_v1", nospectre_v1_cmdline); 1112 1113 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; 1114 1115 #undef pr_fmt 1116 #define pr_fmt(fmt) "RETBleed: " fmt 1117 1118 enum retbleed_mitigation { 1119 RETBLEED_MITIGATION_NONE, 1120 RETBLEED_MITIGATION_AUTO, 1121 RETBLEED_MITIGATION_UNRET, 1122 RETBLEED_MITIGATION_IBPB, 1123 RETBLEED_MITIGATION_IBRS, 1124 RETBLEED_MITIGATION_EIBRS, 1125 RETBLEED_MITIGATION_STUFF, 1126 }; 1127 1128 static const char * const retbleed_strings[] = { 1129 [RETBLEED_MITIGATION_NONE] = "Vulnerable", 1130 [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", 1131 [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", 1132 [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", 1133 [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", 1134 [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", 1135 }; 1136 1137 static enum retbleed_mitigation retbleed_mitigation __ro_after_init = 1138 IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE; 1139 1140 static int __ro_after_init retbleed_nosmt = false; 1141 1142 static int __init retbleed_parse_cmdline(char *str) 1143 { 1144 if (!str) 1145 return -EINVAL; 1146 1147 while (str) { 1148 char *next = strchr(str, ','); 1149 if (next) { 1150 *next = 0; 1151 next++; 1152 } 1153 1154 if (!strcmp(str, "off")) { 1155 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1156 } else if (!strcmp(str, "auto")) { 1157 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1158 } else if (!strcmp(str, "unret")) { 1159 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 1160 } else if (!strcmp(str, "ibpb")) { 1161 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1162 } else if (!strcmp(str, "stuff")) { 1163 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 1164 } else if (!strcmp(str, "nosmt")) { 1165 retbleed_nosmt = true; 1166 } else if (!strcmp(str, "force")) { 1167 setup_force_cpu_bug(X86_BUG_RETBLEED); 1168 } else { 1169 pr_err("Ignoring unknown retbleed option (%s).", str); 1170 } 1171 1172 str = next; 1173 } 1174 1175 return 0; 1176 } 1177 early_param("retbleed", retbleed_parse_cmdline); 1178 1179 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" 1180 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" 1181 1182 static void __init retbleed_select_mitigation(void) 1183 { 1184 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) { 1185 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1186 return; 1187 } 1188 1189 switch (retbleed_mitigation) { 1190 case RETBLEED_MITIGATION_UNRET: 1191 if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { 1192 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1193 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n"); 1194 } 1195 break; 1196 case RETBLEED_MITIGATION_IBPB: 1197 if (!boot_cpu_has(X86_FEATURE_IBPB)) { 1198 pr_err("WARNING: CPU does not support IBPB.\n"); 1199 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1200 } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { 1201 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); 1202 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1203 } 1204 break; 1205 case RETBLEED_MITIGATION_STUFF: 1206 if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { 1207 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n"); 1208 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1209 } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1210 pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n"); 1211 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1212 } 1213 break; 1214 default: 1215 break; 1216 } 1217 1218 if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) 1219 return; 1220 1221 /* Intel mitigation selected in retbleed_update_mitigation() */ 1222 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1223 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 1224 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) 1225 retbleed_mitigation = RETBLEED_MITIGATION_UNRET; 1226 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && 1227 boot_cpu_has(X86_FEATURE_IBPB)) 1228 retbleed_mitigation = RETBLEED_MITIGATION_IBPB; 1229 else 1230 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1231 } 1232 } 1233 1234 static void __init retbleed_update_mitigation(void) 1235 { 1236 if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) 1237 return; 1238 1239 if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) 1240 goto out; 1241 1242 /* 1243 * retbleed=stuff is only allowed on Intel. If stuffing can't be used 1244 * then a different mitigation will be selected below. 1245 */ 1246 if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) { 1247 if (spectre_v2_enabled != SPECTRE_V2_RETPOLINE) { 1248 pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); 1249 retbleed_mitigation = RETBLEED_MITIGATION_AUTO; 1250 } 1251 } 1252 /* 1253 * Let IBRS trump all on Intel without affecting the effects of the 1254 * retbleed= cmdline option except for call depth based stuffing 1255 */ 1256 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 1257 switch (spectre_v2_enabled) { 1258 case SPECTRE_V2_IBRS: 1259 retbleed_mitigation = RETBLEED_MITIGATION_IBRS; 1260 break; 1261 case SPECTRE_V2_EIBRS: 1262 case SPECTRE_V2_EIBRS_RETPOLINE: 1263 case SPECTRE_V2_EIBRS_LFENCE: 1264 retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; 1265 break; 1266 default: 1267 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) 1268 pr_err(RETBLEED_INTEL_MSG); 1269 } 1270 /* If nothing has set the mitigation yet, default to NONE. */ 1271 if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO) 1272 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1273 } 1274 out: 1275 pr_info("%s\n", retbleed_strings[retbleed_mitigation]); 1276 } 1277 1278 1279 static void __init retbleed_apply_mitigation(void) 1280 { 1281 bool mitigate_smt = false; 1282 1283 switch (retbleed_mitigation) { 1284 case RETBLEED_MITIGATION_NONE: 1285 return; 1286 1287 case RETBLEED_MITIGATION_UNRET: 1288 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1289 setup_force_cpu_cap(X86_FEATURE_UNRET); 1290 1291 set_return_thunk(retbleed_return_thunk); 1292 1293 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 1294 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 1295 pr_err(RETBLEED_UNTRAIN_MSG); 1296 1297 mitigate_smt = true; 1298 break; 1299 1300 case RETBLEED_MITIGATION_IBPB: 1301 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 1302 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 1303 mitigate_smt = true; 1304 1305 /* 1306 * IBPB on entry already obviates the need for 1307 * software-based untraining so clear those in case some 1308 * other mitigation like SRSO has selected them. 1309 */ 1310 setup_clear_cpu_cap(X86_FEATURE_UNRET); 1311 setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 1312 1313 /* 1314 * There is no need for RSB filling: write_ibpb() ensures 1315 * all predictions, including the RSB, are invalidated, 1316 * regardless of IBPB implementation. 1317 */ 1318 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1319 1320 break; 1321 1322 case RETBLEED_MITIGATION_STUFF: 1323 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1324 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 1325 1326 set_return_thunk(call_depth_return_thunk); 1327 break; 1328 1329 default: 1330 break; 1331 } 1332 1333 if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && 1334 (retbleed_nosmt || cpu_mitigations_auto_nosmt())) 1335 cpu_smt_disable(false); 1336 } 1337 1338 #undef pr_fmt 1339 #define pr_fmt(fmt) "ITS: " fmt 1340 1341 enum its_mitigation_cmd { 1342 ITS_CMD_OFF, 1343 ITS_CMD_ON, 1344 ITS_CMD_VMEXIT, 1345 ITS_CMD_RSB_STUFF, 1346 }; 1347 1348 enum its_mitigation { 1349 ITS_MITIGATION_OFF, 1350 ITS_MITIGATION_VMEXIT_ONLY, 1351 ITS_MITIGATION_ALIGNED_THUNKS, 1352 ITS_MITIGATION_RETPOLINE_STUFF, 1353 }; 1354 1355 static const char * const its_strings[] = { 1356 [ITS_MITIGATION_OFF] = "Vulnerable", 1357 [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected", 1358 [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks", 1359 [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB", 1360 }; 1361 1362 static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS; 1363 1364 static enum its_mitigation_cmd its_cmd __ro_after_init = 1365 IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF; 1366 1367 static int __init its_parse_cmdline(char *str) 1368 { 1369 if (!str) 1370 return -EINVAL; 1371 1372 if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) { 1373 pr_err("Mitigation disabled at compile time, ignoring option (%s)", str); 1374 return 0; 1375 } 1376 1377 if (!strcmp(str, "off")) { 1378 its_cmd = ITS_CMD_OFF; 1379 } else if (!strcmp(str, "on")) { 1380 its_cmd = ITS_CMD_ON; 1381 } else if (!strcmp(str, "force")) { 1382 its_cmd = ITS_CMD_ON; 1383 setup_force_cpu_bug(X86_BUG_ITS); 1384 } else if (!strcmp(str, "vmexit")) { 1385 its_cmd = ITS_CMD_VMEXIT; 1386 } else if (!strcmp(str, "stuff")) { 1387 its_cmd = ITS_CMD_RSB_STUFF; 1388 } else { 1389 pr_err("Ignoring unknown indirect_target_selection option (%s).", str); 1390 } 1391 1392 return 0; 1393 } 1394 early_param("indirect_target_selection", its_parse_cmdline); 1395 1396 static void __init its_select_mitigation(void) 1397 { 1398 enum its_mitigation_cmd cmd = its_cmd; 1399 1400 if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) { 1401 its_mitigation = ITS_MITIGATION_OFF; 1402 return; 1403 } 1404 1405 /* Retpoline+CDT mitigates ITS, bail out */ 1406 if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 1407 boot_cpu_has(X86_FEATURE_CALL_DEPTH)) { 1408 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; 1409 goto out; 1410 } 1411 1412 /* Exit early to avoid irrelevant warnings */ 1413 if (cmd == ITS_CMD_OFF) { 1414 its_mitigation = ITS_MITIGATION_OFF; 1415 goto out; 1416 } 1417 if (spectre_v2_enabled == SPECTRE_V2_NONE) { 1418 pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); 1419 its_mitigation = ITS_MITIGATION_OFF; 1420 goto out; 1421 } 1422 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || 1423 !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) { 1424 pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n"); 1425 its_mitigation = ITS_MITIGATION_OFF; 1426 goto out; 1427 } 1428 if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { 1429 pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n"); 1430 its_mitigation = ITS_MITIGATION_OFF; 1431 goto out; 1432 } 1433 if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { 1434 pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); 1435 its_mitigation = ITS_MITIGATION_OFF; 1436 goto out; 1437 } 1438 1439 if (cmd == ITS_CMD_RSB_STUFF && 1440 (!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))) { 1441 pr_err("RSB stuff mitigation not supported, using default\n"); 1442 cmd = ITS_CMD_ON; 1443 } 1444 1445 switch (cmd) { 1446 case ITS_CMD_OFF: 1447 its_mitigation = ITS_MITIGATION_OFF; 1448 break; 1449 case ITS_CMD_VMEXIT: 1450 if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) { 1451 its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; 1452 goto out; 1453 } 1454 fallthrough; 1455 case ITS_CMD_ON: 1456 its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; 1457 if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) 1458 setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); 1459 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1460 set_return_thunk(its_return_thunk); 1461 break; 1462 case ITS_CMD_RSB_STUFF: 1463 its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; 1464 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1465 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); 1466 set_return_thunk(call_depth_return_thunk); 1467 if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) { 1468 retbleed_mitigation = RETBLEED_MITIGATION_STUFF; 1469 pr_info("Retbleed mitigation updated to stuffing\n"); 1470 } 1471 break; 1472 } 1473 out: 1474 pr_info("%s\n", its_strings[its_mitigation]); 1475 } 1476 1477 #undef pr_fmt 1478 #define pr_fmt(fmt) "Spectre V2 : " fmt 1479 1480 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = 1481 SPECTRE_V2_USER_NONE; 1482 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = 1483 SPECTRE_V2_USER_NONE; 1484 1485 #ifdef CONFIG_MITIGATION_RETPOLINE 1486 static bool spectre_v2_bad_module; 1487 1488 bool retpoline_module_ok(bool has_retpoline) 1489 { 1490 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) 1491 return true; 1492 1493 pr_err("System may be vulnerable to spectre v2\n"); 1494 spectre_v2_bad_module = true; 1495 return false; 1496 } 1497 1498 static inline const char *spectre_v2_module_string(void) 1499 { 1500 return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; 1501 } 1502 #else 1503 static inline const char *spectre_v2_module_string(void) { return ""; } 1504 #endif 1505 1506 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" 1507 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" 1508 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" 1509 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" 1510 1511 #ifdef CONFIG_BPF_SYSCALL 1512 void unpriv_ebpf_notify(int new_state) 1513 { 1514 if (new_state) 1515 return; 1516 1517 /* Unprivileged eBPF is enabled */ 1518 1519 switch (spectre_v2_enabled) { 1520 case SPECTRE_V2_EIBRS: 1521 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 1522 break; 1523 case SPECTRE_V2_EIBRS_LFENCE: 1524 if (sched_smt_active()) 1525 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 1526 break; 1527 default: 1528 break; 1529 } 1530 } 1531 #endif 1532 1533 static inline bool match_option(const char *arg, int arglen, const char *opt) 1534 { 1535 int len = strlen(opt); 1536 1537 return len == arglen && !strncmp(arg, opt, len); 1538 } 1539 1540 /* The kernel command line selection for spectre v2 */ 1541 enum spectre_v2_mitigation_cmd { 1542 SPECTRE_V2_CMD_NONE, 1543 SPECTRE_V2_CMD_AUTO, 1544 SPECTRE_V2_CMD_FORCE, 1545 SPECTRE_V2_CMD_RETPOLINE, 1546 SPECTRE_V2_CMD_RETPOLINE_GENERIC, 1547 SPECTRE_V2_CMD_RETPOLINE_LFENCE, 1548 SPECTRE_V2_CMD_EIBRS, 1549 SPECTRE_V2_CMD_EIBRS_RETPOLINE, 1550 SPECTRE_V2_CMD_EIBRS_LFENCE, 1551 SPECTRE_V2_CMD_IBRS, 1552 }; 1553 1554 static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO; 1555 1556 enum spectre_v2_user_cmd { 1557 SPECTRE_V2_USER_CMD_NONE, 1558 SPECTRE_V2_USER_CMD_AUTO, 1559 SPECTRE_V2_USER_CMD_FORCE, 1560 SPECTRE_V2_USER_CMD_PRCTL, 1561 SPECTRE_V2_USER_CMD_PRCTL_IBPB, 1562 SPECTRE_V2_USER_CMD_SECCOMP, 1563 SPECTRE_V2_USER_CMD_SECCOMP_IBPB, 1564 }; 1565 1566 static const char * const spectre_v2_user_strings[] = { 1567 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", 1568 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", 1569 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", 1570 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", 1571 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", 1572 }; 1573 1574 static const struct { 1575 const char *option; 1576 enum spectre_v2_user_cmd cmd; 1577 bool secure; 1578 } v2_user_options[] __initconst = { 1579 { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, 1580 { "off", SPECTRE_V2_USER_CMD_NONE, false }, 1581 { "on", SPECTRE_V2_USER_CMD_FORCE, true }, 1582 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, 1583 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, 1584 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, 1585 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, 1586 }; 1587 1588 static void __init spec_v2_user_print_cond(const char *reason, bool secure) 1589 { 1590 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1591 pr_info("spectre_v2_user=%s forced on command line.\n", reason); 1592 } 1593 1594 static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void) 1595 { 1596 char arg[20]; 1597 int ret, i; 1598 1599 if (cpu_mitigations_off() || !IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2)) 1600 return SPECTRE_V2_USER_CMD_NONE; 1601 1602 ret = cmdline_find_option(boot_command_line, "spectre_v2_user", 1603 arg, sizeof(arg)); 1604 if (ret < 0) 1605 return SPECTRE_V2_USER_CMD_AUTO; 1606 1607 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { 1608 if (match_option(arg, ret, v2_user_options[i].option)) { 1609 spec_v2_user_print_cond(v2_user_options[i].option, 1610 v2_user_options[i].secure); 1611 return v2_user_options[i].cmd; 1612 } 1613 } 1614 1615 pr_err("Unknown user space protection option (%s). Switching to default\n", arg); 1616 return SPECTRE_V2_USER_CMD_AUTO; 1617 } 1618 1619 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) 1620 { 1621 return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; 1622 } 1623 1624 static void __init spectre_v2_user_select_mitigation(void) 1625 { 1626 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1627 return; 1628 1629 switch (spectre_v2_parse_user_cmdline()) { 1630 case SPECTRE_V2_USER_CMD_NONE: 1631 return; 1632 case SPECTRE_V2_USER_CMD_FORCE: 1633 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1634 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; 1635 break; 1636 case SPECTRE_V2_USER_CMD_AUTO: 1637 case SPECTRE_V2_USER_CMD_PRCTL: 1638 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1639 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1640 break; 1641 case SPECTRE_V2_USER_CMD_PRCTL_IBPB: 1642 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1643 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1644 break; 1645 case SPECTRE_V2_USER_CMD_SECCOMP: 1646 if (IS_ENABLED(CONFIG_SECCOMP)) 1647 spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP; 1648 else 1649 spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; 1650 spectre_v2_user_stibp = spectre_v2_user_ibpb; 1651 break; 1652 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: 1653 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1654 if (IS_ENABLED(CONFIG_SECCOMP)) 1655 spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP; 1656 else 1657 spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; 1658 break; 1659 } 1660 1661 /* 1662 * At this point, an STIBP mode other than "off" has been set. 1663 * If STIBP support is not being forced, check if STIBP always-on 1664 * is preferred. 1665 */ 1666 if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 1667 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) && 1668 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) 1669 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; 1670 1671 if (!boot_cpu_has(X86_FEATURE_IBPB)) 1672 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; 1673 1674 if (!boot_cpu_has(X86_FEATURE_STIBP)) 1675 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1676 } 1677 1678 static void __init spectre_v2_user_update_mitigation(void) 1679 { 1680 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) 1681 return; 1682 1683 /* The spectre_v2 cmd line can override spectre_v2_user options */ 1684 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) { 1685 spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; 1686 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1687 } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) { 1688 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; 1689 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; 1690 } 1691 1692 /* 1693 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP 1694 * is not required. 1695 * 1696 * Intel's Enhanced IBRS also protects against cross-thread branch target 1697 * injection in user-mode as the IBRS bit remains always set which 1698 * implicitly enables cross-thread protections. However, in legacy IBRS 1699 * mode, the IBRS bit is set only on kernel entry and cleared on return 1700 * to userspace. AMD Automatic IBRS also does not protect userspace. 1701 * These modes therefore disable the implicit cross-thread protection, 1702 * so allow for STIBP to be selected in those cases. 1703 */ 1704 if (!boot_cpu_has(X86_FEATURE_STIBP) || 1705 !cpu_smt_possible() || 1706 (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 1707 !boot_cpu_has(X86_FEATURE_AUTOIBRS))) { 1708 spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; 1709 return; 1710 } 1711 1712 if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE && 1713 (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 1714 retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) { 1715 if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT && 1716 spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED) 1717 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); 1718 spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; 1719 } 1720 pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]); 1721 } 1722 1723 static void __init spectre_v2_user_apply_mitigation(void) 1724 { 1725 /* Initialize Indirect Branch Prediction Barrier */ 1726 if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) { 1727 static_branch_enable(&switch_vcpu_ibpb); 1728 1729 switch (spectre_v2_user_ibpb) { 1730 case SPECTRE_V2_USER_STRICT: 1731 static_branch_enable(&switch_mm_always_ibpb); 1732 break; 1733 case SPECTRE_V2_USER_PRCTL: 1734 case SPECTRE_V2_USER_SECCOMP: 1735 static_branch_enable(&switch_mm_cond_ibpb); 1736 break; 1737 default: 1738 break; 1739 } 1740 1741 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", 1742 static_key_enabled(&switch_mm_always_ibpb) ? 1743 "always-on" : "conditional"); 1744 } 1745 } 1746 1747 static const char * const spectre_v2_strings[] = { 1748 [SPECTRE_V2_NONE] = "Vulnerable", 1749 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", 1750 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", 1751 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", 1752 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", 1753 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", 1754 [SPECTRE_V2_IBRS] = "Mitigation: IBRS", 1755 }; 1756 1757 static const struct { 1758 const char *option; 1759 enum spectre_v2_mitigation_cmd cmd; 1760 bool secure; 1761 } mitigation_options[] __initconst = { 1762 { "off", SPECTRE_V2_CMD_NONE, false }, 1763 { "on", SPECTRE_V2_CMD_FORCE, true }, 1764 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, 1765 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1766 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, 1767 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, 1768 { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, 1769 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, 1770 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, 1771 { "auto", SPECTRE_V2_CMD_AUTO, false }, 1772 { "ibrs", SPECTRE_V2_CMD_IBRS, false }, 1773 }; 1774 1775 static void __init spec_v2_print_cond(const char *reason, bool secure) 1776 { 1777 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) 1778 pr_info("%s selected on command line.\n", reason); 1779 } 1780 1781 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) 1782 { 1783 enum spectre_v2_mitigation_cmd cmd; 1784 char arg[20]; 1785 int ret, i; 1786 1787 cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE; 1788 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || 1789 cpu_mitigations_off()) 1790 return SPECTRE_V2_CMD_NONE; 1791 1792 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); 1793 if (ret < 0) 1794 return cmd; 1795 1796 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { 1797 if (!match_option(arg, ret, mitigation_options[i].option)) 1798 continue; 1799 cmd = mitigation_options[i].cmd; 1800 break; 1801 } 1802 1803 if (i >= ARRAY_SIZE(mitigation_options)) { 1804 pr_err("unknown option (%s). Switching to default mode\n", arg); 1805 return cmd; 1806 } 1807 1808 if ((cmd == SPECTRE_V2_CMD_RETPOLINE || 1809 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1810 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || 1811 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1812 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1813 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 1814 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1815 mitigation_options[i].option); 1816 return SPECTRE_V2_CMD_AUTO; 1817 } 1818 1819 if ((cmd == SPECTRE_V2_CMD_EIBRS || 1820 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || 1821 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && 1822 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 1823 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n", 1824 mitigation_options[i].option); 1825 return SPECTRE_V2_CMD_AUTO; 1826 } 1827 1828 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || 1829 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && 1830 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { 1831 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", 1832 mitigation_options[i].option); 1833 return SPECTRE_V2_CMD_AUTO; 1834 } 1835 1836 if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) { 1837 pr_err("%s selected but not compiled in. Switching to AUTO select\n", 1838 mitigation_options[i].option); 1839 return SPECTRE_V2_CMD_AUTO; 1840 } 1841 1842 if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { 1843 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", 1844 mitigation_options[i].option); 1845 return SPECTRE_V2_CMD_AUTO; 1846 } 1847 1848 if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { 1849 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", 1850 mitigation_options[i].option); 1851 return SPECTRE_V2_CMD_AUTO; 1852 } 1853 1854 if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { 1855 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", 1856 mitigation_options[i].option); 1857 return SPECTRE_V2_CMD_AUTO; 1858 } 1859 1860 spec_v2_print_cond(mitigation_options[i].option, 1861 mitigation_options[i].secure); 1862 return cmd; 1863 } 1864 1865 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) 1866 { 1867 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { 1868 pr_err("Kernel not compiled with retpoline; no mitigation available!"); 1869 return SPECTRE_V2_NONE; 1870 } 1871 1872 return SPECTRE_V2_RETPOLINE; 1873 } 1874 1875 static bool __ro_after_init rrsba_disabled; 1876 1877 /* Disable in-kernel use of non-RSB RET predictors */ 1878 static void __init spec_ctrl_disable_kernel_rrsba(void) 1879 { 1880 if (rrsba_disabled) 1881 return; 1882 1883 if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) { 1884 rrsba_disabled = true; 1885 return; 1886 } 1887 1888 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) 1889 return; 1890 1891 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; 1892 update_spec_ctrl(x86_spec_ctrl_base); 1893 rrsba_disabled = true; 1894 } 1895 1896 static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode) 1897 { 1898 /* 1899 * WARNING! There are many subtleties to consider when changing *any* 1900 * code related to RSB-related mitigations. Before doing so, carefully 1901 * read the following document, and update if necessary: 1902 * 1903 * Documentation/admin-guide/hw-vuln/rsb.rst 1904 * 1905 * In an overly simplified nutshell: 1906 * 1907 * - User->user RSB attacks are conditionally mitigated during 1908 * context switches by cond_mitigation -> write_ibpb(). 1909 * 1910 * - User->kernel and guest->host attacks are mitigated by eIBRS or 1911 * RSB filling. 1912 * 1913 * Though, depending on config, note that other alternative 1914 * mitigations may end up getting used instead, e.g., IBPB on 1915 * entry/vmexit, call depth tracking, or return thunks. 1916 */ 1917 1918 switch (mode) { 1919 case SPECTRE_V2_NONE: 1920 break; 1921 1922 case SPECTRE_V2_EIBRS: 1923 case SPECTRE_V2_EIBRS_LFENCE: 1924 case SPECTRE_V2_EIBRS_RETPOLINE: 1925 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 1926 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); 1927 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); 1928 } 1929 break; 1930 1931 case SPECTRE_V2_RETPOLINE: 1932 case SPECTRE_V2_LFENCE: 1933 case SPECTRE_V2_IBRS: 1934 pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n"); 1935 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); 1936 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); 1937 break; 1938 1939 default: 1940 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n"); 1941 dump_stack(); 1942 break; 1943 } 1944 } 1945 1946 /* 1947 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by 1948 * branch history in userspace. Not needed if BHI_NO is set. 1949 */ 1950 static bool __init spec_ctrl_bhi_dis(void) 1951 { 1952 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL)) 1953 return false; 1954 1955 x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S; 1956 update_spec_ctrl(x86_spec_ctrl_base); 1957 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW); 1958 1959 return true; 1960 } 1961 1962 enum bhi_mitigations { 1963 BHI_MITIGATION_OFF, 1964 BHI_MITIGATION_AUTO, 1965 BHI_MITIGATION_ON, 1966 BHI_MITIGATION_VMEXIT_ONLY, 1967 }; 1968 1969 static enum bhi_mitigations bhi_mitigation __ro_after_init = 1970 IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF; 1971 1972 static int __init spectre_bhi_parse_cmdline(char *str) 1973 { 1974 if (!str) 1975 return -EINVAL; 1976 1977 if (!strcmp(str, "off")) 1978 bhi_mitigation = BHI_MITIGATION_OFF; 1979 else if (!strcmp(str, "on")) 1980 bhi_mitigation = BHI_MITIGATION_ON; 1981 else if (!strcmp(str, "vmexit")) 1982 bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; 1983 else 1984 pr_err("Ignoring unknown spectre_bhi option (%s)", str); 1985 1986 return 0; 1987 } 1988 early_param("spectre_bhi", spectre_bhi_parse_cmdline); 1989 1990 static void __init bhi_select_mitigation(void) 1991 { 1992 if (!boot_cpu_has(X86_BUG_BHI) || cpu_mitigations_off()) 1993 bhi_mitigation = BHI_MITIGATION_OFF; 1994 1995 if (bhi_mitigation == BHI_MITIGATION_AUTO) 1996 bhi_mitigation = BHI_MITIGATION_ON; 1997 } 1998 1999 static void __init bhi_update_mitigation(void) 2000 { 2001 if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) 2002 bhi_mitigation = BHI_MITIGATION_OFF; 2003 2004 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 2005 spectre_v2_cmd == SPECTRE_V2_CMD_AUTO) 2006 bhi_mitigation = BHI_MITIGATION_OFF; 2007 } 2008 2009 static void __init bhi_apply_mitigation(void) 2010 { 2011 if (bhi_mitigation == BHI_MITIGATION_OFF) 2012 return; 2013 2014 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ 2015 if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 2016 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { 2017 spec_ctrl_disable_kernel_rrsba(); 2018 if (rrsba_disabled) 2019 return; 2020 } 2021 2022 if (!IS_ENABLED(CONFIG_X86_64)) 2023 return; 2024 2025 /* Mitigate in hardware if supported */ 2026 if (spec_ctrl_bhi_dis()) 2027 return; 2028 2029 if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) { 2030 pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n"); 2031 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); 2032 return; 2033 } 2034 2035 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n"); 2036 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP); 2037 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); 2038 } 2039 2040 static void __init spectre_v2_select_mitigation(void) 2041 { 2042 spectre_v2_cmd = spectre_v2_parse_cmdline(); 2043 2044 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && 2045 (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)) 2046 return; 2047 2048 switch (spectre_v2_cmd) { 2049 case SPECTRE_V2_CMD_NONE: 2050 return; 2051 2052 case SPECTRE_V2_CMD_FORCE: 2053 case SPECTRE_V2_CMD_AUTO: 2054 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { 2055 spectre_v2_enabled = SPECTRE_V2_EIBRS; 2056 break; 2057 } 2058 2059 spectre_v2_enabled = spectre_v2_select_retpoline(); 2060 break; 2061 2062 case SPECTRE_V2_CMD_RETPOLINE_LFENCE: 2063 pr_err(SPECTRE_V2_LFENCE_MSG); 2064 spectre_v2_enabled = SPECTRE_V2_LFENCE; 2065 break; 2066 2067 case SPECTRE_V2_CMD_RETPOLINE_GENERIC: 2068 spectre_v2_enabled = SPECTRE_V2_RETPOLINE; 2069 break; 2070 2071 case SPECTRE_V2_CMD_RETPOLINE: 2072 spectre_v2_enabled = spectre_v2_select_retpoline(); 2073 break; 2074 2075 case SPECTRE_V2_CMD_IBRS: 2076 spectre_v2_enabled = SPECTRE_V2_IBRS; 2077 break; 2078 2079 case SPECTRE_V2_CMD_EIBRS: 2080 spectre_v2_enabled = SPECTRE_V2_EIBRS; 2081 break; 2082 2083 case SPECTRE_V2_CMD_EIBRS_LFENCE: 2084 spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE; 2085 break; 2086 2087 case SPECTRE_V2_CMD_EIBRS_RETPOLINE: 2088 spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE; 2089 break; 2090 } 2091 } 2092 2093 static void __init spectre_v2_update_mitigation(void) 2094 { 2095 if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO) { 2096 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && 2097 boot_cpu_has_bug(X86_BUG_RETBLEED) && 2098 retbleed_mitigation != RETBLEED_MITIGATION_NONE && 2099 retbleed_mitigation != RETBLEED_MITIGATION_STUFF && 2100 boot_cpu_has(X86_FEATURE_IBRS) && 2101 boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 2102 spectre_v2_enabled = SPECTRE_V2_IBRS; 2103 } 2104 } 2105 2106 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && !cpu_mitigations_off()) 2107 pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]); 2108 } 2109 2110 static void __init spectre_v2_apply_mitigation(void) 2111 { 2112 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 2113 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); 2114 2115 if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { 2116 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { 2117 msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); 2118 } else { 2119 x86_spec_ctrl_base |= SPEC_CTRL_IBRS; 2120 update_spec_ctrl(x86_spec_ctrl_base); 2121 } 2122 } 2123 2124 switch (spectre_v2_enabled) { 2125 case SPECTRE_V2_NONE: 2126 return; 2127 2128 case SPECTRE_V2_EIBRS: 2129 break; 2130 2131 case SPECTRE_V2_IBRS: 2132 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); 2133 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) 2134 pr_warn(SPECTRE_V2_IBRS_PERF_MSG); 2135 break; 2136 2137 case SPECTRE_V2_LFENCE: 2138 case SPECTRE_V2_EIBRS_LFENCE: 2139 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); 2140 fallthrough; 2141 2142 case SPECTRE_V2_RETPOLINE: 2143 case SPECTRE_V2_EIBRS_RETPOLINE: 2144 setup_force_cpu_cap(X86_FEATURE_RETPOLINE); 2145 break; 2146 } 2147 2148 /* 2149 * Disable alternate RSB predictions in kernel when indirect CALLs and 2150 * JMPs gets protection against BHI and Intramode-BTI, but RET 2151 * prediction from a non-RSB predictor is still a risk. 2152 */ 2153 if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE || 2154 spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE || 2155 spectre_v2_enabled == SPECTRE_V2_RETPOLINE) 2156 spec_ctrl_disable_kernel_rrsba(); 2157 2158 spectre_v2_select_rsb_mitigation(spectre_v2_enabled); 2159 2160 /* 2161 * Retpoline protects the kernel, but doesn't protect firmware. IBRS 2162 * and Enhanced IBRS protect firmware too, so enable IBRS around 2163 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't 2164 * otherwise enabled. 2165 * 2166 * Use "spectre_v2_enabled" to check Enhanced IBRS instead of 2167 * boot_cpu_has(), because the user might select retpoline on the kernel 2168 * command line and if the CPU supports Enhanced IBRS, kernel might 2169 * un-intentionally not enable IBRS around firmware calls. 2170 */ 2171 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 2172 boot_cpu_has(X86_FEATURE_IBPB) && 2173 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 2174 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { 2175 2176 if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) { 2177 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); 2178 pr_info("Enabling Speculation Barrier for firmware calls\n"); 2179 } 2180 2181 } else if (boot_cpu_has(X86_FEATURE_IBRS) && 2182 !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { 2183 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); 2184 pr_info("Enabling Restricted Speculation for firmware calls\n"); 2185 } 2186 } 2187 2188 static void update_stibp_msr(void * __unused) 2189 { 2190 u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); 2191 update_spec_ctrl(val); 2192 } 2193 2194 /* Update x86_spec_ctrl_base in case SMT state changed. */ 2195 static void update_stibp_strict(void) 2196 { 2197 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; 2198 2199 if (sched_smt_active()) 2200 mask |= SPEC_CTRL_STIBP; 2201 2202 if (mask == x86_spec_ctrl_base) 2203 return; 2204 2205 pr_info("Update user space SMT mitigation: STIBP %s\n", 2206 mask & SPEC_CTRL_STIBP ? "always-on" : "off"); 2207 x86_spec_ctrl_base = mask; 2208 on_each_cpu(update_stibp_msr, NULL, 1); 2209 } 2210 2211 /* Update the static key controlling the evaluation of TIF_SPEC_IB */ 2212 static void update_indir_branch_cond(void) 2213 { 2214 if (sched_smt_active()) 2215 static_branch_enable(&switch_to_cond_stibp); 2216 else 2217 static_branch_disable(&switch_to_cond_stibp); 2218 } 2219 2220 #undef pr_fmt 2221 #define pr_fmt(fmt) fmt 2222 2223 /* Update the static key controlling the MDS CPU buffer clear in idle */ 2224 static void update_mds_branch_idle(void) 2225 { 2226 /* 2227 * Enable the idle clearing if SMT is active on CPUs which are 2228 * affected only by MSBDS and not any other MDS variant. 2229 * 2230 * The other variants cannot be mitigated when SMT is enabled, so 2231 * clearing the buffers on idle just to prevent the Store Buffer 2232 * repartitioning leak would be a window dressing exercise. 2233 */ 2234 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) 2235 return; 2236 2237 if (sched_smt_active()) { 2238 static_branch_enable(&mds_idle_clear); 2239 } else if (mmio_mitigation == MMIO_MITIGATION_OFF || 2240 (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { 2241 static_branch_disable(&mds_idle_clear); 2242 } 2243 } 2244 2245 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" 2246 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" 2247 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" 2248 2249 void cpu_bugs_smt_update(void) 2250 { 2251 mutex_lock(&spec_ctrl_mutex); 2252 2253 if (sched_smt_active() && unprivileged_ebpf_enabled() && 2254 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 2255 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); 2256 2257 switch (spectre_v2_user_stibp) { 2258 case SPECTRE_V2_USER_NONE: 2259 break; 2260 case SPECTRE_V2_USER_STRICT: 2261 case SPECTRE_V2_USER_STRICT_PREFERRED: 2262 update_stibp_strict(); 2263 break; 2264 case SPECTRE_V2_USER_PRCTL: 2265 case SPECTRE_V2_USER_SECCOMP: 2266 update_indir_branch_cond(); 2267 break; 2268 } 2269 2270 switch (mds_mitigation) { 2271 case MDS_MITIGATION_FULL: 2272 case MDS_MITIGATION_AUTO: 2273 case MDS_MITIGATION_VMWERV: 2274 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) 2275 pr_warn_once(MDS_MSG_SMT); 2276 update_mds_branch_idle(); 2277 break; 2278 case MDS_MITIGATION_OFF: 2279 break; 2280 } 2281 2282 switch (taa_mitigation) { 2283 case TAA_MITIGATION_VERW: 2284 case TAA_MITIGATION_AUTO: 2285 case TAA_MITIGATION_UCODE_NEEDED: 2286 if (sched_smt_active()) 2287 pr_warn_once(TAA_MSG_SMT); 2288 break; 2289 case TAA_MITIGATION_TSX_DISABLED: 2290 case TAA_MITIGATION_OFF: 2291 break; 2292 } 2293 2294 switch (mmio_mitigation) { 2295 case MMIO_MITIGATION_VERW: 2296 case MMIO_MITIGATION_AUTO: 2297 case MMIO_MITIGATION_UCODE_NEEDED: 2298 if (sched_smt_active()) 2299 pr_warn_once(MMIO_MSG_SMT); 2300 break; 2301 case MMIO_MITIGATION_OFF: 2302 break; 2303 } 2304 2305 mutex_unlock(&spec_ctrl_mutex); 2306 } 2307 2308 #undef pr_fmt 2309 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt 2310 2311 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; 2312 2313 /* The kernel command line selection */ 2314 enum ssb_mitigation_cmd { 2315 SPEC_STORE_BYPASS_CMD_NONE, 2316 SPEC_STORE_BYPASS_CMD_AUTO, 2317 SPEC_STORE_BYPASS_CMD_ON, 2318 SPEC_STORE_BYPASS_CMD_PRCTL, 2319 SPEC_STORE_BYPASS_CMD_SECCOMP, 2320 }; 2321 2322 static const char * const ssb_strings[] = { 2323 [SPEC_STORE_BYPASS_NONE] = "Vulnerable", 2324 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", 2325 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", 2326 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", 2327 }; 2328 2329 static const struct { 2330 const char *option; 2331 enum ssb_mitigation_cmd cmd; 2332 } ssb_mitigation_options[] __initconst = { 2333 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ 2334 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ 2335 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ 2336 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ 2337 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ 2338 }; 2339 2340 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) 2341 { 2342 enum ssb_mitigation_cmd cmd; 2343 char arg[20]; 2344 int ret, i; 2345 2346 cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ? 2347 SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE; 2348 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || 2349 cpu_mitigations_off()) { 2350 return SPEC_STORE_BYPASS_CMD_NONE; 2351 } else { 2352 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", 2353 arg, sizeof(arg)); 2354 if (ret < 0) 2355 return cmd; 2356 2357 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { 2358 if (!match_option(arg, ret, ssb_mitigation_options[i].option)) 2359 continue; 2360 2361 cmd = ssb_mitigation_options[i].cmd; 2362 break; 2363 } 2364 2365 if (i >= ARRAY_SIZE(ssb_mitigation_options)) { 2366 pr_err("unknown option (%s). Switching to default mode\n", arg); 2367 return cmd; 2368 } 2369 } 2370 2371 return cmd; 2372 } 2373 2374 static void __init ssb_select_mitigation(void) 2375 { 2376 enum ssb_mitigation_cmd cmd; 2377 2378 if (!boot_cpu_has(X86_FEATURE_SSBD)) 2379 goto out; 2380 2381 cmd = ssb_parse_cmdline(); 2382 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && 2383 (cmd == SPEC_STORE_BYPASS_CMD_NONE || 2384 cmd == SPEC_STORE_BYPASS_CMD_AUTO)) 2385 return; 2386 2387 switch (cmd) { 2388 case SPEC_STORE_BYPASS_CMD_SECCOMP: 2389 /* 2390 * Choose prctl+seccomp as the default mode if seccomp is 2391 * enabled. 2392 */ 2393 if (IS_ENABLED(CONFIG_SECCOMP)) 2394 ssb_mode = SPEC_STORE_BYPASS_SECCOMP; 2395 else 2396 ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2397 break; 2398 case SPEC_STORE_BYPASS_CMD_ON: 2399 ssb_mode = SPEC_STORE_BYPASS_DISABLE; 2400 break; 2401 case SPEC_STORE_BYPASS_CMD_AUTO: 2402 case SPEC_STORE_BYPASS_CMD_PRCTL: 2403 ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2404 break; 2405 case SPEC_STORE_BYPASS_CMD_NONE: 2406 break; 2407 } 2408 2409 out: 2410 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2411 pr_info("%s\n", ssb_strings[ssb_mode]); 2412 } 2413 2414 static void __init ssb_apply_mitigation(void) 2415 { 2416 /* 2417 * We have three CPU feature flags that are in play here: 2418 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. 2419 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass 2420 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation 2421 */ 2422 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) { 2423 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); 2424 /* 2425 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may 2426 * use a completely different MSR and bit dependent on family. 2427 */ 2428 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && 2429 !static_cpu_has(X86_FEATURE_AMD_SSBD)) { 2430 x86_amd_ssb_disable(); 2431 } else { 2432 x86_spec_ctrl_base |= SPEC_CTRL_SSBD; 2433 update_spec_ctrl(x86_spec_ctrl_base); 2434 } 2435 } 2436 } 2437 2438 #undef pr_fmt 2439 #define pr_fmt(fmt) "Speculation prctl: " fmt 2440 2441 static void task_update_spec_tif(struct task_struct *tsk) 2442 { 2443 /* Force the update of the real TIF bits */ 2444 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); 2445 2446 /* 2447 * Immediately update the speculation control MSRs for the current 2448 * task, but for a non-current task delay setting the CPU 2449 * mitigation until it is scheduled next. 2450 * 2451 * This can only happen for SECCOMP mitigation. For PRCTL it's 2452 * always the current task. 2453 */ 2454 if (tsk == current) 2455 speculation_ctrl_update_current(); 2456 } 2457 2458 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) 2459 { 2460 2461 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2462 return -EPERM; 2463 2464 switch (ctrl) { 2465 case PR_SPEC_ENABLE: 2466 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2467 return 0; 2468 case PR_SPEC_DISABLE: 2469 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); 2470 return 0; 2471 default: 2472 return -ERANGE; 2473 } 2474 } 2475 2476 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) 2477 { 2478 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && 2479 ssb_mode != SPEC_STORE_BYPASS_SECCOMP) 2480 return -ENXIO; 2481 2482 switch (ctrl) { 2483 case PR_SPEC_ENABLE: 2484 /* If speculation is force disabled, enable is not allowed */ 2485 if (task_spec_ssb_force_disable(task)) 2486 return -EPERM; 2487 task_clear_spec_ssb_disable(task); 2488 task_clear_spec_ssb_noexec(task); 2489 task_update_spec_tif(task); 2490 break; 2491 case PR_SPEC_DISABLE: 2492 task_set_spec_ssb_disable(task); 2493 task_clear_spec_ssb_noexec(task); 2494 task_update_spec_tif(task); 2495 break; 2496 case PR_SPEC_FORCE_DISABLE: 2497 task_set_spec_ssb_disable(task); 2498 task_set_spec_ssb_force_disable(task); 2499 task_clear_spec_ssb_noexec(task); 2500 task_update_spec_tif(task); 2501 break; 2502 case PR_SPEC_DISABLE_NOEXEC: 2503 if (task_spec_ssb_force_disable(task)) 2504 return -EPERM; 2505 task_set_spec_ssb_disable(task); 2506 task_set_spec_ssb_noexec(task); 2507 task_update_spec_tif(task); 2508 break; 2509 default: 2510 return -ERANGE; 2511 } 2512 return 0; 2513 } 2514 2515 static bool is_spec_ib_user_controlled(void) 2516 { 2517 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || 2518 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2519 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || 2520 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; 2521 } 2522 2523 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) 2524 { 2525 switch (ctrl) { 2526 case PR_SPEC_ENABLE: 2527 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2528 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2529 return 0; 2530 2531 /* 2532 * With strict mode for both IBPB and STIBP, the instruction 2533 * code paths avoid checking this task flag and instead, 2534 * unconditionally run the instruction. However, STIBP and IBPB 2535 * are independent and either can be set to conditionally 2536 * enabled regardless of the mode of the other. 2537 * 2538 * If either is set to conditional, allow the task flag to be 2539 * updated, unless it was force-disabled by a previous prctl 2540 * call. Currently, this is possible on an AMD CPU which has the 2541 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the 2542 * kernel is booted with 'spectre_v2_user=seccomp', then 2543 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and 2544 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. 2545 */ 2546 if (!is_spec_ib_user_controlled() || 2547 task_spec_ib_force_disable(task)) 2548 return -EPERM; 2549 2550 task_clear_spec_ib_disable(task); 2551 task_update_spec_tif(task); 2552 break; 2553 case PR_SPEC_DISABLE: 2554 case PR_SPEC_FORCE_DISABLE: 2555 /* 2556 * Indirect branch speculation is always allowed when 2557 * mitigation is force disabled. 2558 */ 2559 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2560 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2561 return -EPERM; 2562 2563 if (!is_spec_ib_user_controlled()) 2564 return 0; 2565 2566 task_set_spec_ib_disable(task); 2567 if (ctrl == PR_SPEC_FORCE_DISABLE) 2568 task_set_spec_ib_force_disable(task); 2569 task_update_spec_tif(task); 2570 if (task == current) 2571 indirect_branch_prediction_barrier(); 2572 break; 2573 default: 2574 return -ERANGE; 2575 } 2576 return 0; 2577 } 2578 2579 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, 2580 unsigned long ctrl) 2581 { 2582 switch (which) { 2583 case PR_SPEC_STORE_BYPASS: 2584 return ssb_prctl_set(task, ctrl); 2585 case PR_SPEC_INDIRECT_BRANCH: 2586 return ib_prctl_set(task, ctrl); 2587 case PR_SPEC_L1D_FLUSH: 2588 return l1d_flush_prctl_set(task, ctrl); 2589 default: 2590 return -ENODEV; 2591 } 2592 } 2593 2594 #ifdef CONFIG_SECCOMP 2595 void arch_seccomp_spec_mitigate(struct task_struct *task) 2596 { 2597 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) 2598 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2599 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || 2600 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) 2601 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); 2602 } 2603 #endif 2604 2605 static int l1d_flush_prctl_get(struct task_struct *task) 2606 { 2607 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) 2608 return PR_SPEC_FORCE_DISABLE; 2609 2610 if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) 2611 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2612 else 2613 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2614 } 2615 2616 static int ssb_prctl_get(struct task_struct *task) 2617 { 2618 switch (ssb_mode) { 2619 case SPEC_STORE_BYPASS_NONE: 2620 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) 2621 return PR_SPEC_ENABLE; 2622 return PR_SPEC_NOT_AFFECTED; 2623 case SPEC_STORE_BYPASS_DISABLE: 2624 return PR_SPEC_DISABLE; 2625 case SPEC_STORE_BYPASS_SECCOMP: 2626 case SPEC_STORE_BYPASS_PRCTL: 2627 if (task_spec_ssb_force_disable(task)) 2628 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2629 if (task_spec_ssb_noexec(task)) 2630 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; 2631 if (task_spec_ssb_disable(task)) 2632 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2633 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2634 } 2635 BUG(); 2636 } 2637 2638 static int ib_prctl_get(struct task_struct *task) 2639 { 2640 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 2641 return PR_SPEC_NOT_AFFECTED; 2642 2643 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && 2644 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) 2645 return PR_SPEC_ENABLE; 2646 else if (is_spec_ib_user_controlled()) { 2647 if (task_spec_ib_force_disable(task)) 2648 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; 2649 if (task_spec_ib_disable(task)) 2650 return PR_SPEC_PRCTL | PR_SPEC_DISABLE; 2651 return PR_SPEC_PRCTL | PR_SPEC_ENABLE; 2652 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || 2653 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 2654 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) 2655 return PR_SPEC_DISABLE; 2656 else 2657 return PR_SPEC_NOT_AFFECTED; 2658 } 2659 2660 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 2661 { 2662 switch (which) { 2663 case PR_SPEC_STORE_BYPASS: 2664 return ssb_prctl_get(task); 2665 case PR_SPEC_INDIRECT_BRANCH: 2666 return ib_prctl_get(task); 2667 case PR_SPEC_L1D_FLUSH: 2668 return l1d_flush_prctl_get(task); 2669 default: 2670 return -ENODEV; 2671 } 2672 } 2673 2674 void x86_spec_ctrl_setup_ap(void) 2675 { 2676 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) 2677 update_spec_ctrl(x86_spec_ctrl_base); 2678 2679 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) 2680 x86_amd_ssb_disable(); 2681 } 2682 2683 bool itlb_multihit_kvm_mitigation; 2684 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); 2685 2686 #undef pr_fmt 2687 #define pr_fmt(fmt) "L1TF: " fmt 2688 2689 /* Default mitigation for L1TF-affected CPUs */ 2690 enum l1tf_mitigations l1tf_mitigation __ro_after_init = 2691 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF; 2692 #if IS_ENABLED(CONFIG_KVM_INTEL) 2693 EXPORT_SYMBOL_GPL(l1tf_mitigation); 2694 #endif 2695 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 2696 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 2697 2698 /* 2699 * These CPUs all support 44bits physical address space internally in the 2700 * cache but CPUID can report a smaller number of physical address bits. 2701 * 2702 * The L1TF mitigation uses the top most address bit for the inversion of 2703 * non present PTEs. When the installed memory reaches into the top most 2704 * address bit due to memory holes, which has been observed on machines 2705 * which report 36bits physical address bits and have 32G RAM installed, 2706 * then the mitigation range check in l1tf_select_mitigation() triggers. 2707 * This is a false positive because the mitigation is still possible due to 2708 * the fact that the cache uses 44bit internally. Use the cache bits 2709 * instead of the reported physical bits and adjust them on the affected 2710 * machines to 44bit if the reported bits are less than 44. 2711 */ 2712 static void override_cache_bits(struct cpuinfo_x86 *c) 2713 { 2714 if (c->x86 != 6) 2715 return; 2716 2717 switch (c->x86_vfm) { 2718 case INTEL_NEHALEM: 2719 case INTEL_WESTMERE: 2720 case INTEL_SANDYBRIDGE: 2721 case INTEL_IVYBRIDGE: 2722 case INTEL_HASWELL: 2723 case INTEL_HASWELL_L: 2724 case INTEL_HASWELL_G: 2725 case INTEL_BROADWELL: 2726 case INTEL_BROADWELL_G: 2727 case INTEL_SKYLAKE_L: 2728 case INTEL_SKYLAKE: 2729 case INTEL_KABYLAKE_L: 2730 case INTEL_KABYLAKE: 2731 if (c->x86_cache_bits < 44) 2732 c->x86_cache_bits = 44; 2733 break; 2734 } 2735 } 2736 2737 static void __init l1tf_select_mitigation(void) 2738 { 2739 if (!boot_cpu_has_bug(X86_BUG_L1TF) || cpu_mitigations_off()) { 2740 l1tf_mitigation = L1TF_MITIGATION_OFF; 2741 return; 2742 } 2743 2744 if (l1tf_mitigation == L1TF_MITIGATION_AUTO) { 2745 if (cpu_mitigations_auto_nosmt()) 2746 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2747 else 2748 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 2749 } 2750 } 2751 2752 static void __init l1tf_apply_mitigation(void) 2753 { 2754 u64 half_pa; 2755 2756 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2757 return; 2758 2759 override_cache_bits(&boot_cpu_data); 2760 2761 switch (l1tf_mitigation) { 2762 case L1TF_MITIGATION_OFF: 2763 case L1TF_MITIGATION_FLUSH_NOWARN: 2764 case L1TF_MITIGATION_FLUSH: 2765 case L1TF_MITIGATION_AUTO: 2766 break; 2767 case L1TF_MITIGATION_FLUSH_NOSMT: 2768 case L1TF_MITIGATION_FULL: 2769 cpu_smt_disable(false); 2770 break; 2771 case L1TF_MITIGATION_FULL_FORCE: 2772 cpu_smt_disable(true); 2773 break; 2774 } 2775 2776 #if CONFIG_PGTABLE_LEVELS == 2 2777 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); 2778 return; 2779 #endif 2780 2781 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 2782 if (l1tf_mitigation != L1TF_MITIGATION_OFF && 2783 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 2784 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 2785 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", 2786 half_pa); 2787 pr_info("However, doing so will make a part of your RAM unusable.\n"); 2788 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); 2789 return; 2790 } 2791 2792 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); 2793 } 2794 2795 static int __init l1tf_cmdline(char *str) 2796 { 2797 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 2798 return 0; 2799 2800 if (!str) 2801 return -EINVAL; 2802 2803 if (!strcmp(str, "off")) 2804 l1tf_mitigation = L1TF_MITIGATION_OFF; 2805 else if (!strcmp(str, "flush,nowarn")) 2806 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; 2807 else if (!strcmp(str, "flush")) 2808 l1tf_mitigation = L1TF_MITIGATION_FLUSH; 2809 else if (!strcmp(str, "flush,nosmt")) 2810 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; 2811 else if (!strcmp(str, "full")) 2812 l1tf_mitigation = L1TF_MITIGATION_FULL; 2813 else if (!strcmp(str, "full,force")) 2814 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; 2815 2816 return 0; 2817 } 2818 early_param("l1tf", l1tf_cmdline); 2819 2820 #undef pr_fmt 2821 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt 2822 2823 enum srso_mitigation { 2824 SRSO_MITIGATION_NONE, 2825 SRSO_MITIGATION_AUTO, 2826 SRSO_MITIGATION_UCODE_NEEDED, 2827 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, 2828 SRSO_MITIGATION_MICROCODE, 2829 SRSO_MITIGATION_SAFE_RET, 2830 SRSO_MITIGATION_IBPB, 2831 SRSO_MITIGATION_IBPB_ON_VMEXIT, 2832 SRSO_MITIGATION_BP_SPEC_REDUCE, 2833 }; 2834 2835 static const char * const srso_strings[] = { 2836 [SRSO_MITIGATION_NONE] = "Vulnerable", 2837 [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", 2838 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode", 2839 [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", 2840 [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", 2841 [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", 2842 [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", 2843 [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation" 2844 }; 2845 2846 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO; 2847 2848 static int __init srso_parse_cmdline(char *str) 2849 { 2850 if (!str) 2851 return -EINVAL; 2852 2853 if (!strcmp(str, "off")) 2854 srso_mitigation = SRSO_MITIGATION_NONE; 2855 else if (!strcmp(str, "microcode")) 2856 srso_mitigation = SRSO_MITIGATION_MICROCODE; 2857 else if (!strcmp(str, "safe-ret")) 2858 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 2859 else if (!strcmp(str, "ibpb")) 2860 srso_mitigation = SRSO_MITIGATION_IBPB; 2861 else if (!strcmp(str, "ibpb-vmexit")) 2862 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 2863 else 2864 pr_err("Ignoring unknown SRSO option (%s).", str); 2865 2866 return 0; 2867 } 2868 early_param("spec_rstack_overflow", srso_parse_cmdline); 2869 2870 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." 2871 2872 static void __init srso_select_mitigation(void) 2873 { 2874 bool has_microcode; 2875 2876 if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) 2877 srso_mitigation = SRSO_MITIGATION_NONE; 2878 2879 if (srso_mitigation == SRSO_MITIGATION_NONE) 2880 return; 2881 2882 if (srso_mitigation == SRSO_MITIGATION_AUTO) 2883 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 2884 2885 has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE); 2886 if (has_microcode) { 2887 /* 2888 * Zen1/2 with SMT off aren't vulnerable after the right 2889 * IBPB microcode has been applied. 2890 */ 2891 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { 2892 setup_force_cpu_cap(X86_FEATURE_SRSO_NO); 2893 srso_mitigation = SRSO_MITIGATION_NONE; 2894 return; 2895 } 2896 } else { 2897 pr_warn("IBPB-extending microcode not applied!\n"); 2898 pr_warn(SRSO_NOTICE); 2899 } 2900 2901 switch (srso_mitigation) { 2902 case SRSO_MITIGATION_SAFE_RET: 2903 if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) { 2904 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; 2905 goto ibpb_on_vmexit; 2906 } 2907 2908 if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) { 2909 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); 2910 srso_mitigation = SRSO_MITIGATION_NONE; 2911 } 2912 2913 if (!has_microcode) 2914 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; 2915 break; 2916 ibpb_on_vmexit: 2917 case SRSO_MITIGATION_IBPB_ON_VMEXIT: 2918 if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) { 2919 pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n"); 2920 srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE; 2921 break; 2922 } 2923 fallthrough; 2924 case SRSO_MITIGATION_IBPB: 2925 if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { 2926 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); 2927 srso_mitigation = SRSO_MITIGATION_NONE; 2928 } 2929 2930 if (!has_microcode) 2931 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; 2932 break; 2933 default: 2934 break; 2935 } 2936 } 2937 2938 static void __init srso_update_mitigation(void) 2939 { 2940 /* If retbleed is using IBPB, that works for SRSO as well */ 2941 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB && 2942 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) 2943 srso_mitigation = SRSO_MITIGATION_IBPB; 2944 2945 if (boot_cpu_has_bug(X86_BUG_SRSO) && 2946 !cpu_mitigations_off() && 2947 !boot_cpu_has(X86_FEATURE_SRSO_NO)) 2948 pr_info("%s\n", srso_strings[srso_mitigation]); 2949 } 2950 2951 static void __init srso_apply_mitigation(void) 2952 { 2953 /* 2954 * Clear the feature flag if this mitigation is not selected as that 2955 * feature flag controls the BpSpecReduce MSR bit toggling in KVM. 2956 */ 2957 if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE) 2958 setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE); 2959 2960 if (srso_mitigation == SRSO_MITIGATION_NONE) { 2961 if (boot_cpu_has(X86_FEATURE_SBPB)) 2962 x86_pred_cmd = PRED_CMD_SBPB; 2963 return; 2964 } 2965 2966 switch (srso_mitigation) { 2967 case SRSO_MITIGATION_SAFE_RET: 2968 case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: 2969 /* 2970 * Enable the return thunk for generated code 2971 * like ftrace, static_call, etc. 2972 */ 2973 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 2974 setup_force_cpu_cap(X86_FEATURE_UNRET); 2975 2976 if (boot_cpu_data.x86 == 0x19) { 2977 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); 2978 set_return_thunk(srso_alias_return_thunk); 2979 } else { 2980 setup_force_cpu_cap(X86_FEATURE_SRSO); 2981 set_return_thunk(srso_return_thunk); 2982 } 2983 break; 2984 case SRSO_MITIGATION_IBPB: 2985 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 2986 /* 2987 * IBPB on entry already obviates the need for 2988 * software-based untraining so clear those in case some 2989 * other mitigation like Retbleed has selected them. 2990 */ 2991 setup_clear_cpu_cap(X86_FEATURE_UNRET); 2992 setup_clear_cpu_cap(X86_FEATURE_RETHUNK); 2993 fallthrough; 2994 case SRSO_MITIGATION_IBPB_ON_VMEXIT: 2995 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 2996 /* 2997 * There is no need for RSB filling: entry_ibpb() ensures 2998 * all predictions, including the RSB, are invalidated, 2999 * regardless of IBPB implementation. 3000 */ 3001 setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); 3002 break; 3003 default: 3004 break; 3005 } 3006 } 3007 3008 #undef pr_fmt 3009 #define pr_fmt(fmt) fmt 3010 3011 #ifdef CONFIG_SYSFS 3012 3013 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" 3014 3015 #if IS_ENABLED(CONFIG_KVM_INTEL) 3016 static const char * const l1tf_vmx_states[] = { 3017 [VMENTER_L1D_FLUSH_AUTO] = "auto", 3018 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", 3019 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", 3020 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", 3021 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", 3022 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" 3023 }; 3024 3025 static ssize_t l1tf_show_state(char *buf) 3026 { 3027 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) 3028 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 3029 3030 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || 3031 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && 3032 sched_smt_active())) { 3033 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, 3034 l1tf_vmx_states[l1tf_vmx_mitigation]); 3035 } 3036 3037 return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, 3038 l1tf_vmx_states[l1tf_vmx_mitigation], 3039 sched_smt_active() ? "vulnerable" : "disabled"); 3040 } 3041 3042 static ssize_t itlb_multihit_show_state(char *buf) 3043 { 3044 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 3045 !boot_cpu_has(X86_FEATURE_VMX)) 3046 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); 3047 else if (!(cr4_read_shadow() & X86_CR4_VMXE)) 3048 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); 3049 else if (itlb_multihit_kvm_mitigation) 3050 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); 3051 else 3052 return sysfs_emit(buf, "KVM: Vulnerable\n"); 3053 } 3054 #else 3055 static ssize_t l1tf_show_state(char *buf) 3056 { 3057 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); 3058 } 3059 3060 static ssize_t itlb_multihit_show_state(char *buf) 3061 { 3062 return sysfs_emit(buf, "Processor vulnerable\n"); 3063 } 3064 #endif 3065 3066 static ssize_t mds_show_state(char *buf) 3067 { 3068 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3069 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3070 mds_strings[mds_mitigation]); 3071 } 3072 3073 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { 3074 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 3075 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : 3076 sched_smt_active() ? "mitigated" : "disabled")); 3077 } 3078 3079 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], 3080 sched_smt_active() ? "vulnerable" : "disabled"); 3081 } 3082 3083 static ssize_t tsx_async_abort_show_state(char *buf) 3084 { 3085 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || 3086 (taa_mitigation == TAA_MITIGATION_OFF)) 3087 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); 3088 3089 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3090 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3091 taa_strings[taa_mitigation]); 3092 } 3093 3094 return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], 3095 sched_smt_active() ? "vulnerable" : "disabled"); 3096 } 3097 3098 static ssize_t mmio_stale_data_show_state(char *buf) 3099 { 3100 if (mmio_mitigation == MMIO_MITIGATION_OFF) 3101 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); 3102 3103 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { 3104 return sysfs_emit(buf, "%s; SMT Host state unknown\n", 3105 mmio_strings[mmio_mitigation]); 3106 } 3107 3108 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], 3109 sched_smt_active() ? "vulnerable" : "disabled"); 3110 } 3111 3112 static ssize_t rfds_show_state(char *buf) 3113 { 3114 return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); 3115 } 3116 3117 static ssize_t old_microcode_show_state(char *buf) 3118 { 3119 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) 3120 return sysfs_emit(buf, "Unknown: running under hypervisor"); 3121 3122 return sysfs_emit(buf, "Vulnerable\n"); 3123 } 3124 3125 static ssize_t its_show_state(char *buf) 3126 { 3127 return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]); 3128 } 3129 3130 static char *stibp_state(void) 3131 { 3132 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && 3133 !boot_cpu_has(X86_FEATURE_AUTOIBRS)) 3134 return ""; 3135 3136 switch (spectre_v2_user_stibp) { 3137 case SPECTRE_V2_USER_NONE: 3138 return "; STIBP: disabled"; 3139 case SPECTRE_V2_USER_STRICT: 3140 return "; STIBP: forced"; 3141 case SPECTRE_V2_USER_STRICT_PREFERRED: 3142 return "; STIBP: always-on"; 3143 case SPECTRE_V2_USER_PRCTL: 3144 case SPECTRE_V2_USER_SECCOMP: 3145 if (static_key_enabled(&switch_to_cond_stibp)) 3146 return "; STIBP: conditional"; 3147 } 3148 return ""; 3149 } 3150 3151 static char *ibpb_state(void) 3152 { 3153 if (boot_cpu_has(X86_FEATURE_IBPB)) { 3154 if (static_key_enabled(&switch_mm_always_ibpb)) 3155 return "; IBPB: always-on"; 3156 if (static_key_enabled(&switch_mm_cond_ibpb)) 3157 return "; IBPB: conditional"; 3158 return "; IBPB: disabled"; 3159 } 3160 return ""; 3161 } 3162 3163 static char *pbrsb_eibrs_state(void) 3164 { 3165 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { 3166 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || 3167 boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) 3168 return "; PBRSB-eIBRS: SW sequence"; 3169 else 3170 return "; PBRSB-eIBRS: Vulnerable"; 3171 } else { 3172 return "; PBRSB-eIBRS: Not affected"; 3173 } 3174 } 3175 3176 static const char *spectre_bhi_state(void) 3177 { 3178 if (!boot_cpu_has_bug(X86_BUG_BHI)) 3179 return "; BHI: Not affected"; 3180 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) 3181 return "; BHI: BHI_DIS_S"; 3182 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) 3183 return "; BHI: SW loop, KVM: SW loop"; 3184 else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 3185 !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) && 3186 rrsba_disabled) 3187 return "; BHI: Retpoline"; 3188 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT)) 3189 return "; BHI: Vulnerable, KVM: SW loop"; 3190 3191 return "; BHI: Vulnerable"; 3192 } 3193 3194 static ssize_t spectre_v2_show_state(char *buf) 3195 { 3196 if (spectre_v2_enabled == SPECTRE_V2_LFENCE) 3197 return sysfs_emit(buf, "Vulnerable: LFENCE\n"); 3198 3199 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) 3200 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); 3201 3202 if (sched_smt_active() && unprivileged_ebpf_enabled() && 3203 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) 3204 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); 3205 3206 return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n", 3207 spectre_v2_strings[spectre_v2_enabled], 3208 ibpb_state(), 3209 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "", 3210 stibp_state(), 3211 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "", 3212 pbrsb_eibrs_state(), 3213 spectre_bhi_state(), 3214 /* this should always be at the end */ 3215 spectre_v2_module_string()); 3216 } 3217 3218 static ssize_t srbds_show_state(char *buf) 3219 { 3220 return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); 3221 } 3222 3223 static ssize_t retbleed_show_state(char *buf) 3224 { 3225 if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || 3226 retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { 3227 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 3228 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 3229 return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); 3230 3231 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], 3232 !sched_smt_active() ? "disabled" : 3233 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || 3234 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? 3235 "enabled with STIBP protection" : "vulnerable"); 3236 } 3237 3238 return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); 3239 } 3240 3241 static ssize_t srso_show_state(char *buf) 3242 { 3243 if (boot_cpu_has(X86_FEATURE_SRSO_NO)) 3244 return sysfs_emit(buf, "Mitigation: SMT disabled\n"); 3245 3246 return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]); 3247 } 3248 3249 static ssize_t gds_show_state(char *buf) 3250 { 3251 return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); 3252 } 3253 3254 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, 3255 char *buf, unsigned int bug) 3256 { 3257 if (!boot_cpu_has_bug(bug)) 3258 return sysfs_emit(buf, "Not affected\n"); 3259 3260 switch (bug) { 3261 case X86_BUG_CPU_MELTDOWN: 3262 if (boot_cpu_has(X86_FEATURE_PTI)) 3263 return sysfs_emit(buf, "Mitigation: PTI\n"); 3264 3265 if (hypervisor_is_type(X86_HYPER_XEN_PV)) 3266 return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); 3267 3268 break; 3269 3270 case X86_BUG_SPECTRE_V1: 3271 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); 3272 3273 case X86_BUG_SPECTRE_V2: 3274 return spectre_v2_show_state(buf); 3275 3276 case X86_BUG_SPEC_STORE_BYPASS: 3277 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); 3278 3279 case X86_BUG_L1TF: 3280 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) 3281 return l1tf_show_state(buf); 3282 break; 3283 3284 case X86_BUG_MDS: 3285 return mds_show_state(buf); 3286 3287 case X86_BUG_TAA: 3288 return tsx_async_abort_show_state(buf); 3289 3290 case X86_BUG_ITLB_MULTIHIT: 3291 return itlb_multihit_show_state(buf); 3292 3293 case X86_BUG_SRBDS: 3294 return srbds_show_state(buf); 3295 3296 case X86_BUG_MMIO_STALE_DATA: 3297 return mmio_stale_data_show_state(buf); 3298 3299 case X86_BUG_RETBLEED: 3300 return retbleed_show_state(buf); 3301 3302 case X86_BUG_SRSO: 3303 return srso_show_state(buf); 3304 3305 case X86_BUG_GDS: 3306 return gds_show_state(buf); 3307 3308 case X86_BUG_RFDS: 3309 return rfds_show_state(buf); 3310 3311 case X86_BUG_OLD_MICROCODE: 3312 return old_microcode_show_state(buf); 3313 3314 case X86_BUG_ITS: 3315 return its_show_state(buf); 3316 3317 default: 3318 break; 3319 } 3320 3321 return sysfs_emit(buf, "Vulnerable\n"); 3322 } 3323 3324 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 3325 { 3326 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); 3327 } 3328 3329 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 3330 { 3331 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); 3332 } 3333 3334 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 3335 { 3336 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); 3337 } 3338 3339 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 3340 { 3341 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); 3342 } 3343 3344 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 3345 { 3346 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); 3347 } 3348 3349 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) 3350 { 3351 return cpu_show_common(dev, attr, buf, X86_BUG_MDS); 3352 } 3353 3354 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) 3355 { 3356 return cpu_show_common(dev, attr, buf, X86_BUG_TAA); 3357 } 3358 3359 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) 3360 { 3361 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); 3362 } 3363 3364 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) 3365 { 3366 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); 3367 } 3368 3369 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) 3370 { 3371 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 3372 } 3373 3374 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) 3375 { 3376 return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); 3377 } 3378 3379 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) 3380 { 3381 return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); 3382 } 3383 3384 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) 3385 { 3386 return cpu_show_common(dev, attr, buf, X86_BUG_GDS); 3387 } 3388 3389 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf) 3390 { 3391 return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); 3392 } 3393 3394 ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf) 3395 { 3396 return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE); 3397 } 3398 3399 ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) 3400 { 3401 return cpu_show_common(dev, attr, buf, X86_BUG_ITS); 3402 } 3403 #endif 3404 3405 void __warn_thunk(void) 3406 { 3407 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n"); 3408 } 3409